repo_name
stringclasses 10
values | file_path
stringlengths 29
222
| content
stringlengths 24
926k
| extention
stringclasses 5
values |
---|---|---|---|
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/Common/helper_timer.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Helper Timing Functions
#ifndef COMMON_HELPER_TIMER_H_
#define COMMON_HELPER_TIMER_H_
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// includes, system
#include <vector>
// includes, project
#include <exception.h>
// Definition of the StopWatch Interface, this is used if we don't want to use
// the CUT functions But rather in a self contained class interface
class StopWatchInterface {
public:
StopWatchInterface() {}
virtual ~StopWatchInterface() {}
public:
//! Start time measurement
virtual void start() = 0;
//! Stop time measurement
virtual void stop() = 0;
//! Reset time counters to zero
virtual void reset() = 0;
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
virtual float getTime() = 0;
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
virtual float getAverageTime() = 0;
};
//////////////////////////////////////////////////////////////////
// Begin Stopwatch timer class definitions for all OS platforms //
//////////////////////////////////////////////////////////////////
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// includes, system
#define WINDOWS_LEAN_AND_MEAN
#include <windows.h>
#undef min
#undef max
//! Windows specific implementation of StopWatch
class StopWatchWin : public StopWatchInterface {
public:
//! Constructor, default
StopWatchWin()
: start_time(),
end_time(),
diff_time(0.0f),
total_time(0.0f),
running(false),
clock_sessions(0),
freq(0),
freq_set(false) {
if (!freq_set) {
// helper variable
LARGE_INTEGER temp;
// get the tick frequency from the OS
QueryPerformanceFrequency(reinterpret_cast<LARGE_INTEGER *>(&temp));
// convert to type in which it is needed
freq = (static_cast<double>(temp.QuadPart)) / 1000.0;
// rememeber query
freq_set = true;
}
}
// Destructor
~StopWatchWin() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// member variables
//! Start of measurement
LARGE_INTEGER start_time;
//! End of measurement
LARGE_INTEGER end_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
//! tick frequency
double freq;
//! flag if the frequency has been set
bool freq_set;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::start() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::stop() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&end_time));
diff_time = static_cast<float>(((static_cast<double>(end_time.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
total_time += diff_time;
clock_sessions++;
running = false;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
LARGE_INTEGER temp;
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&temp));
retval += static_cast<float>(((static_cast<double>(temp.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
#else
// Declarations for Stopwatch on Linux and Mac OSX
// includes, system
#include <sys/time.h>
#include <ctime>
//! Windows specific implementation of StopWatch
class StopWatchLinux : public StopWatchInterface {
public:
//! Constructor, default
StopWatchLinux()
: start_time(),
diff_time(0.0),
total_time(0.0),
running(false),
clock_sessions(0) {}
// Destructor
virtual ~StopWatchLinux() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// helper functions
//! Get difference between start time and current time
inline float getDiffTime();
private:
// member variables
//! Start of measurement
struct timeval start_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::start() {
gettimeofday(&start_time, 0);
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::stop() {
diff_time = getDiffTime();
total_time += diff_time;
running = false;
clock_sessions++;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
gettimeofday(&start_time, 0);
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
retval += getDiffTime();
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getDiffTime() {
struct timeval t_time;
gettimeofday(&t_time, 0);
// time difference in milli-seconds
return static_cast<float>(1000.0 * (t_time.tv_sec - start_time.tv_sec) +
(0.001 * (t_time.tv_usec - start_time.tv_usec)));
}
#endif // WIN32
////////////////////////////////////////////////////////////////////////////////
//! Timer functionality exported
////////////////////////////////////////////////////////////////////////////////
//! Create a new timer
//! @return true if a time has been created, otherwise false
//! @param name of the new timer, 0 if the creation failed
////////////////////////////////////////////////////////////////////////////////
inline bool sdkCreateTimer(StopWatchInterface **timer_interface) {
// printf("sdkCreateTimer called object %08x\n", (void *)*timer_interface);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
*timer_interface = reinterpret_cast<StopWatchInterface *>(new StopWatchWin());
#else
*timer_interface =
reinterpret_cast<StopWatchInterface *>(new StopWatchLinux());
#endif
return (*timer_interface != NULL) ? true : false;
}
////////////////////////////////////////////////////////////////////////////////
//! Delete a timer
//! @return true if a time has been deleted, otherwise false
//! @param name of the timer to delete
////////////////////////////////////////////////////////////////////////////////
inline bool sdkDeleteTimer(StopWatchInterface **timer_interface) {
// printf("sdkDeleteTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
delete *timer_interface;
*timer_interface = NULL;
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Start the time with name \a name
//! @param name name of the timer to start
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStartTimer(StopWatchInterface **timer_interface) {
// printf("sdkStartTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->start();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop the time with name \a name. Does not reset.
//! @param name name of the timer to stop
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStopTimer(StopWatchInterface **timer_interface) {
// printf("sdkStopTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->stop();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Resets the timer's counter.
//! @param name name of the timer to reset.
////////////////////////////////////////////////////////////////////////////////
inline bool sdkResetTimer(StopWatchInterface **timer_interface) {
// printf("sdkResetTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->reset();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Return the average time for timer execution as the total time
//! for the timer dividied by the number of completed (stopped) runs the timer
//! has made.
//! Excludes the current running time if the timer is currently running.
//! @param name name of the timer to return the time of
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetAverageTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetAverageTimerValue called object %08x\n", (void
// *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getAverageTime();
} else {
return 0.0f;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Total execution time for the timer over all runs since the last reset
//! or timer creation.
//! @param name name of the timer to obtain the value of.
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetTimerValue called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getTime();
} else {
return 0.0f;
}
}
#endif // COMMON_HELPER_TIMER_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/Common/helper_string.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (string parsing, timers, etc)
#ifndef COMMON_HELPER_STRING_H_
#define COMMON_HELPER_STRING_H_
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <string>
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#ifndef _CRT_SECURE_NO_DEPRECATE
#define _CRT_SECURE_NO_DEPRECATE
#endif
#ifndef STRCASECMP
#define STRCASECMP _stricmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP _strnicmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy_s(sFilePath, nLength, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode)
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result != 0)
#endif
#ifndef SSCANF
#define SSCANF sscanf_s
#endif
#ifndef SPRINTF
#define SPRINTF sprintf_s
#endif
#else // Linux Includes
#include <string.h>
#include <strings.h>
#ifndef STRCASECMP
#define STRCASECMP strcasecmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP strncasecmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy(sFilePath, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode))
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result == NULL)
#endif
#ifndef SSCANF
#define SSCANF sscanf
#endif
#ifndef SPRINTF
#define SPRINTF sprintf
#endif
#endif
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// CUDA Utility Helper Functions
inline int stringRemoveDelimiter(char delimiter, const char *string) {
int string_start = 0;
while (string[string_start] == delimiter) {
string_start++;
}
if (string_start >= static_cast<int>(strlen(string) - 1)) {
return 0;
}
return string_start;
}
inline int getFileExtension(char *filename, char **extension) {
int string_length = static_cast<int>(strlen(filename));
while (filename[string_length--] != '.') {
if (string_length == 0) break;
}
if (string_length > 0) string_length += 2;
if (string_length == 0)
*extension = NULL;
else
*extension = &filename[string_length];
return string_length;
}
inline bool checkCmdLineFlag(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
const char *equal_pos = strchr(string_argv, '=');
int argv_length = static_cast<int>(
equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv);
int length = static_cast<int>(strlen(string_ref));
if (length == argv_length &&
!STRNCASECMP(string_argv, string_ref, length)) {
bFound = true;
continue;
}
}
}
return bFound;
}
// This function wraps the CUDA Driver API into a template function
template <class T>
inline bool getCmdLineArgumentValue(const int argc, const char **argv,
const char *string_ref, T *value) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
*value = (T)atoi(&string_argv[length + auto_inc]);
}
bFound = true;
i = argc;
}
}
}
return bFound;
}
inline int getCmdLineArgumentInt(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
int value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = atoi(&string_argv[length + auto_inc]);
} else {
value = 0;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline float getCmdLineArgumentFloat(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
float value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = static_cast<float>(atof(&string_argv[length + auto_inc]));
} else {
value = 0.f;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline bool getCmdLineArgumentString(const int argc, const char **argv,
const char *string_ref,
char **string_retval) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
char *string_argv = const_cast<char *>(&argv[i][string_start]);
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
*string_retval = &string_argv[length + 1];
bFound = true;
continue;
}
}
}
if (!bFound) {
*string_retval = NULL;
}
return bFound;
}
//////////////////////////////////////////////////////////////////////////////
//! Find the path for a file assuming that
//! files are found in the searchPath.
//!
//! @return the path if succeeded, otherwise 0
//! @param filename name of the file
//! @param executable_path optional absolute path of the executable
//////////////////////////////////////////////////////////////////////////////
inline char *sdkFindFilePath(const char *filename,
const char *executable_path) {
// <executable_name> defines a variable that is replaced with the name of the
// executable
// Typical relative search paths to locate needed companion files (e.g. sample
// input data, or JIT source files) The origin for the relative search may be
// the .exe file, a .bat file launching an .exe, a browser .exe launching the
// .exe or .bat, etc
const char *searchPath[] = {
"./", // same dir
"./data/", // same dir
"../../../../Samples/<executable_name>/", // up 4 in tree
"../../../Samples/<executable_name>/", // up 3 in tree
"../../Samples/<executable_name>/", // up 2 in tree
"../../../../Samples/<executable_name>/data/", // up 4 in tree
"../../../Samples/<executable_name>/data/", // up 3 in tree
"../../Samples/<executable_name>/data/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/", // up 4 in tree
"../../../Samples/0_Introduction/<executable_name>/", // up 3 in tree
"../../Samples/0_Introduction/<executable_name>/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/", // up 4 in tree
"../../../Samples/1_Utilities/<executable_name>/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 4 in tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 3 in tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 2 in tree
"../../../../Samples/3_CUDA_Features/<executable_name>/", // up 4 in tree
"../../../Samples/3_CUDA_Features/<executable_name>/", // up 3 in tree
"../../Samples/3_CUDA_Features/<executable_name>/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 4 in tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 3 in tree
"../../Samples/4_CUDA_Libraries/<executable_name>/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/", // up 4 in tree
"../../../Samples/5_Domain_Specific/<executable_name>/", // up 3 in tree
"../../Samples/5_Domain_Specific/<executable_name>/", // up 2 in tree
"../../../../Samples/6_Performance/<executable_name>/", // up 4 in tree
"../../../Samples/6_Performance/<executable_name>/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/data/", // up 4 in tree
"../../../Samples/0_Introduction/<executable_name>/data/", // up 3 in tree
"../../Samples/0_Introduction/<executable_name>/data/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/data/", // up 4 in tree
"../../../Samples/1_Utilities/<executable_name>/data/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/data/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 4 in tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 3 in tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 2 in tree
"../../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 4 in tree
"../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 3 in tree
"../../Samples/3_CUDA_Features/<executable_name>/data/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 4 in tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 3 in tree
"../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 4 in tree
"../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 3 in tree
"../../Samples/5_Domain_Specific/<executable_name>/data/", // up 2 in tree
"../../../../Samples/6_Performance/<executable_name>/data/", // up 4 in tree
"../../../Samples/6_Performance/<executable_name>/data/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/data/", // up 2 in tree
"../../../../Common/data/", // up 4 in tree
"../../../Common/data/", // up 3 in tree
"../../Common/data/" // up 2 in tree
};
// Extract the executable name
std::string executable_name;
if (executable_path != 0) {
executable_name = std::string(executable_path);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// Windows path delimiter
size_t delimiter_pos = executable_name.find_last_of('\\');
executable_name.erase(0, delimiter_pos + 1);
if (executable_name.rfind(".exe") != std::string::npos) {
// we strip .exe, only if the .exe is found
executable_name.resize(executable_name.size() - 4);
}
#else
// Linux & OSX path delimiter
size_t delimiter_pos = executable_name.find_last_of('/');
executable_name.erase(0, delimiter_pos + 1);
#endif
}
// Loop over all search paths and return the first hit
for (unsigned int i = 0; i < sizeof(searchPath) / sizeof(char *); ++i) {
std::string path(searchPath[i]);
size_t executable_name_pos = path.find("<executable_name>");
// If there is executable_name variable in the searchPath
// replace it with the value
if (executable_name_pos != std::string::npos) {
if (executable_path != 0) {
path.replace(executable_name_pos, strlen("<executable_name>"),
executable_name);
} else {
// Skip this path entry if no executable argument is given
continue;
}
}
#ifdef _DEBUG
printf("sdkFindFilePath <%s> in %s\n", filename, path.c_str());
#endif
// Test if the file exists
path.append(filename);
FILE *fp;
FOPEN(fp, path.c_str(), "rb");
if (fp != NULL) {
fclose(fp);
// File found
// returning an allocated array here for backwards compatibility reasons
char *file_path = reinterpret_cast<char *>(malloc(path.length() + 1));
STRCPY(file_path, path.length() + 1, path.c_str());
return file_path;
}
if (fp) {
fclose(fp);
}
}
// File not found
printf("\nerror: sdkFindFilePath: file <%s> not found!\n", filename);
return 0;
}
#endif // COMMON_HELPER_STRING_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/Common/helper_image.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (image,bitmap)
#ifndef COMMON_HELPER_IMAGE_H_
#define COMMON_HELPER_IMAGE_H_
#include <assert.h>
#include <exception.h>
#include <math.h>
#include <stdint.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#ifndef MIN
#define MIN(a, b) ((a < b) ? a : b)
#endif
#ifndef MAX
#define MAX(a, b) ((a > b) ? a : b)
#endif
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
#include <helper_string.h>
// namespace unnamed (internal)
namespace helper_image_internal {
//! size of PGM file header
const unsigned int PGMHeaderSize = 0x40;
// types
//! Data converter from unsigned char / unsigned byte to type T
template <class T>
struct ConverterFromUByte;
//! Data converter from unsigned char / unsigned byte
template <>
struct ConverterFromUByte<unsigned char> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
float operator()(const unsigned char &val) {
return static_cast<unsigned char>(val);
}
};
//! Data converter from unsigned char / unsigned byte to float
template <>
struct ConverterFromUByte<float> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
float operator()(const unsigned char &val) {
return static_cast<float>(val) / 255.0f;
}
};
//! Data converter from unsigned char / unsigned byte to type T
template <class T>
struct ConverterToUByte;
//! Data converter from unsigned char / unsigned byte to unsigned int
template <>
struct ConverterToUByte<unsigned char> {
//! Conversion operator (essentially a passthru
//! @return converted value
//! @param val value to convert
unsigned char operator()(const unsigned char &val) { return val; }
};
//! Data converter from unsigned char / unsigned byte to unsigned int
template <>
struct ConverterToUByte<float> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
unsigned char operator()(const float &val) {
return static_cast<unsigned char>(val * 255.0f);
}
};
} // namespace helper_image_internal
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode)
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result != 0)
#endif
#ifndef SSCANF
#define SSCANF sscanf_s
#endif
#else
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode))
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result == NULL)
#endif
#ifndef SSCANF
#define SSCANF sscanf
#endif
#endif
inline bool __loadPPM(const char *file, unsigned char **data, unsigned int *w,
unsigned int *h, unsigned int *channels) {
FILE *fp = NULL;
if (FOPEN_FAIL(FOPEN(fp, file, "rb"))) {
std::cerr << "__LoadPPM() : Failed to open file: " << file << std::endl;
return false;
}
// check header
char header[helper_image_internal::PGMHeaderSize];
if (fgets(header, helper_image_internal::PGMHeaderSize, fp) == NULL) {
std::cerr << "__LoadPPM() : reading PGM header returned NULL" << std::endl;
return false;
}
if (strncmp(header, "P5", 2) == 0) {
*channels = 1;
} else if (strncmp(header, "P6", 2) == 0) {
*channels = 3;
} else {
std::cerr << "__LoadPPM() : File is not a PPM or PGM image" << std::endl;
*channels = 0;
return false;
}
// parse header, read maxval, width and height
unsigned int width = 0;
unsigned int height = 0;
unsigned int maxval = 0;
unsigned int i = 0;
while (i < 3) {
if (fgets(header, helper_image_internal::PGMHeaderSize, fp) == NULL) {
std::cerr << "__LoadPPM() : reading PGM header returned NULL"
<< std::endl;
return false;
}
if (header[0] == '#') {
continue;
}
if (i == 0) {
i += SSCANF(header, "%u %u %u", &width, &height, &maxval);
} else if (i == 1) {
i += SSCANF(header, "%u %u", &height, &maxval);
} else if (i == 2) {
i += SSCANF(header, "%u", &maxval);
}
}
// check if given handle for the data is initialized
if (NULL != *data) {
if (*w != width || *h != height) {
std::cerr << "__LoadPPM() : Invalid image dimensions." << std::endl;
}
} else {
*data = (unsigned char *)malloc(sizeof(unsigned char) * width * height *
*channels);
*w = width;
*h = height;
}
// read and close file
if (fread(*data, sizeof(unsigned char), width * height * *channels, fp) ==
0) {
std::cerr << "__LoadPPM() read data returned error." << std::endl;
}
fclose(fp);
return true;
}
template <class T>
inline bool sdkLoadPGM(const char *file, T **data, unsigned int *w,
unsigned int *h) {
unsigned char *idata = NULL;
unsigned int channels;
if (true != __loadPPM(file, &idata, w, h, &channels)) {
return false;
}
unsigned int size = *w * *h * channels;
// initialize mem if necessary
// the correct size is checked / set in loadPGMc()
if (NULL == *data) {
*data = reinterpret_cast<T *>(malloc(sizeof(T) * size));
}
// copy and cast data
std::transform(idata, idata + size, *data,
helper_image_internal::ConverterFromUByte<T>());
free(idata);
return true;
}
template <class T>
inline bool sdkLoadPPM4(const char *file, T **data, unsigned int *w,
unsigned int *h) {
unsigned char *idata = 0;
unsigned int channels;
if (__loadPPM(file, &idata, w, h, &channels)) {
// pad 4th component
int size = *w * *h;
// keep the original pointer
unsigned char *idata_orig = idata;
*data = reinterpret_cast<T *>(malloc(sizeof(T) * size * 4));
unsigned char *ptr = *data;
for (int i = 0; i < size; i++) {
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = 0;
}
free(idata_orig);
return true;
} else {
free(idata);
return false;
}
}
inline bool __savePPM(const char *file, unsigned char *data, unsigned int w,
unsigned int h, unsigned int channels) {
assert(NULL != data);
assert(w > 0);
assert(h > 0);
std::fstream fh(file, std::fstream::out | std::fstream::binary);
if (fh.bad()) {
std::cerr << "__savePPM() : Opening file failed." << std::endl;
return false;
}
if (channels == 1) {
fh << "P5\n";
} else if (channels == 3) {
fh << "P6\n";
} else {
std::cerr << "__savePPM() : Invalid number of channels." << std::endl;
return false;
}
fh << w << "\n" << h << "\n" << 0xff << std::endl;
for (unsigned int i = 0; (i < (w * h * channels)) && fh.good(); ++i) {
fh << data[i];
}
fh.flush();
if (fh.bad()) {
std::cerr << "__savePPM() : Writing data failed." << std::endl;
return false;
}
fh.close();
return true;
}
template <class T>
inline bool sdkSavePGM(const char *file, T *data, unsigned int w,
unsigned int h) {
unsigned int size = w * h;
unsigned char *idata = (unsigned char *)malloc(sizeof(unsigned char) * size);
std::transform(data, data + size, idata,
helper_image_internal::ConverterToUByte<T>());
// write file
bool result = __savePPM(file, idata, w, h, 1);
// cleanup
free(idata);
return result;
}
inline bool sdkSavePPM4ub(const char *file, unsigned char *data, unsigned int w,
unsigned int h) {
// strip 4th component
int size = w * h;
unsigned char *ndata =
(unsigned char *)malloc(sizeof(unsigned char) * size * 3);
unsigned char *ptr = ndata;
for (int i = 0; i < size; i++) {
*ptr++ = *data++;
*ptr++ = *data++;
*ptr++ = *data++;
data++;
}
bool result = __savePPM(file, ndata, w, h, 3);
free(ndata);
return result;
}
//////////////////////////////////////////////////////////////////////////////
//! Read file \filename and return the data
//! @return bool if reading the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data uninitialized pointer, returned initialized and pointing to
//! the data read
//! @param len number of data elements in data, -1 on error
//////////////////////////////////////////////////////////////////////////////
template <class T>
inline bool sdkReadFile(const char *filename, T **data, unsigned int *len,
bool verbose) {
// check input arguments
assert(NULL != filename);
assert(NULL != len);
// intermediate storage for the data read
std::vector<T> data_read;
// open file for reading
FILE *fh = NULL;
// check if filestream is valid
if (FOPEN_FAIL(FOPEN(fh, filename, "r"))) {
printf("Unable to open input file: %s\n", filename);
return false;
}
// read all data elements
T token;
while (!feof(fh)) {
fscanf(fh, "%f", &token);
data_read.push_back(token);
}
// the last element is read twice
data_read.pop_back();
fclose(fh);
// check if the given handle is already initialized
if (NULL != *data) {
if (*len != data_read.size()) {
std::cerr << "sdkReadFile() : Initialized memory given but "
<< "size mismatch with signal read "
<< "(data read / data init = " << (unsigned int)data_read.size()
<< " / " << *len << ")" << std::endl;
return false;
}
} else {
// allocate storage for the data read
*data = reinterpret_cast<T *>(malloc(sizeof(T) * data_read.size()));
// store signal size
*len = static_cast<unsigned int>(data_read.size());
}
// copy data
memcpy(*data, &data_read.front(), sizeof(T) * data_read.size());
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Read file \filename and return the data
//! @return bool if reading the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data uninitialized pointer, returned initialized and pointing to
//! the data read
//! @param len number of data elements in data, -1 on error
//////////////////////////////////////////////////////////////////////////////
template <class T>
inline bool sdkReadFileBlocks(const char *filename, T **data, unsigned int *len,
unsigned int block_num, unsigned int block_size,
bool verbose) {
// check input arguments
assert(NULL != filename);
assert(NULL != len);
// open file for reading
FILE *fh = fopen(filename, "rb");
if (fh == NULL && verbose) {
std::cerr << "sdkReadFile() : Opening file failed." << std::endl;
return false;
}
// check if the given handle is already initialized
// allocate storage for the data read
data[block_num] = reinterpret_cast<T *>(malloc(block_size));
// read all data elements
fseek(fh, block_num * block_size, SEEK_SET);
*len = fread(data[block_num], sizeof(T), block_size / sizeof(T), fh);
fclose(fh);
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Write a data file \filename
//! @return true if writing the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data data to write
//! @param len number of data elements in data, -1 on error
//! @param epsilon epsilon for comparison
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool sdkWriteFile(const char *filename, const T *data, unsigned int len,
const S epsilon, bool verbose, bool append = false) {
assert(NULL != filename);
assert(NULL != data);
// open file for writing
// if (append) {
std::fstream fh(filename, std::fstream::out | std::fstream::ate);
if (verbose) {
std::cerr << "sdkWriteFile() : Open file " << filename
<< " for write/append." << std::endl;
}
/* } else {
std::fstream fh(filename, std::fstream::out);
if (verbose) {
std::cerr << "sdkWriteFile() : Open file " << filename << " for
write." << std::endl;
}
}
*/
// check if filestream is valid
if (!fh.good()) {
if (verbose) {
std::cerr << "sdkWriteFile() : Opening file failed." << std::endl;
}
return false;
}
// first write epsilon
fh << "# " << epsilon << "\n";
// write data
for (unsigned int i = 0; (i < len) && (fh.good()); ++i) {
fh << data[i] << ' ';
}
// Check if writing succeeded
if (!fh.good()) {
if (verbose) {
std::cerr << "sdkWriteFile() : Writing file failed." << std::endl;
}
return false;
}
// file ends with nl
fh << std::endl;
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Compare two arrays of arbitrary type
//! @return true if \a reference and \a data are identical, otherwise false
//! @param reference timer_interface to the reference data / gold image
//! @param data handle to the computed data
//! @param len number of elements in reference and data
//! @param epsilon epsilon to use for the comparison
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool compareData(const T *reference, const T *data,
const unsigned int len, const S epsilon,
const float threshold) {
assert(epsilon >= 0);
bool result = true;
unsigned int error_count = 0;
for (unsigned int i = 0; i < len; ++i) {
float diff = static_cast<float>(reference[i]) - static_cast<float>(data[i]);
bool comp = (diff <= epsilon) && (diff >= -epsilon);
result &= comp;
error_count += !comp;
#if 0
if (!comp) {
std::cerr << "ERROR, i = " << i << ",\t "
<< reference[i] << " / "
<< data[i]
<< " (reference / data)\n";
}
#endif
}
if (threshold == 0.0f) {
return (result) ? true : false;
} else {
if (error_count) {
printf("%4.2f(%%) of bytes mismatched (count=%d)\n",
static_cast<float>(error_count) * 100 / static_cast<float>(len),
error_count);
}
return (len * threshold > error_count) ? true : false;
}
}
#ifndef __MIN_EPSILON_ERROR
#define __MIN_EPSILON_ERROR 1e-3f
#endif
//////////////////////////////////////////////////////////////////////////////
//! Compare two arrays of arbitrary type
//! @return true if \a reference and \a data are identical, otherwise false
//! @param reference handle to the reference data / gold image
//! @param data handle to the computed data
//! @param len number of elements in reference and data
//! @param epsilon epsilon to use for the comparison
//! @param epsilon threshold % of (# of bytes) for pass/fail
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool compareDataAsFloatThreshold(const T *reference, const T *data,
const unsigned int len, const S epsilon,
const float threshold) {
assert(epsilon >= 0);
// If we set epsilon to be 0, let's set a minimum threshold
float max_error = MAX((float)epsilon, __MIN_EPSILON_ERROR);
int error_count = 0;
bool result = true;
for (unsigned int i = 0; i < len; ++i) {
float diff =
fabs(static_cast<float>(reference[i]) - static_cast<float>(data[i]));
bool comp = (diff < max_error);
result &= comp;
if (!comp) {
error_count++;
}
}
if (threshold == 0.0f) {
if (error_count) {
printf("total # of errors = %d\n", error_count);
}
return (error_count == 0) ? true : false;
} else {
if (error_count) {
printf("%4.2f(%%) of bytes mismatched (count=%d)\n",
static_cast<float>(error_count) * 100 / static_cast<float>(len),
error_count);
}
return ((len * threshold > error_count) ? true : false);
}
}
inline void sdkDumpBin(void *data, unsigned int bytes, const char *filename) {
printf("sdkDumpBin: <%s>\n", filename);
FILE *fp;
FOPEN(fp, filename, "wb");
fwrite(data, bytes, 1, fp);
fflush(fp);
fclose(fp);
}
inline bool sdkCompareBin2BinUint(const char *src_file, const char *ref_file,
unsigned int nelements, const float epsilon,
const float threshold, char *exec_path) {
unsigned int *src_buffer, *ref_buffer;
FILE *src_fp = NULL, *ref_fp = NULL;
uint64_t error_count = 0;
size_t fsize = 0;
if (FOPEN_FAIL(FOPEN(src_fp, src_file, "rb"))) {
printf("compareBin2Bin <unsigned int> unable to open src_file: %s\n",
src_file);
error_count++;
}
char *ref_file_path = sdkFindFilePath(ref_file, exec_path);
if (ref_file_path == NULL) {
printf("compareBin2Bin <unsigned int> unable to find <%s> in <%s>\n",
ref_file, exec_path);
printf(">>> Check info.xml and [project//data] folder <%s> <<<\n",
ref_file);
printf("Aborting comparison!\n");
printf(" FAILED\n");
error_count++;
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
} else {
if (FOPEN_FAIL(FOPEN(ref_fp, ref_file_path, "rb"))) {
printf(
"compareBin2Bin <unsigned int>"
" unable to open ref_file: %s\n",
ref_file_path);
error_count++;
}
if (src_fp && ref_fp) {
src_buffer = (unsigned int *)malloc(nelements * sizeof(unsigned int));
ref_buffer = (unsigned int *)malloc(nelements * sizeof(unsigned int));
fsize = fread(src_buffer, nelements, sizeof(unsigned int), src_fp);
fsize = fread(ref_buffer, nelements, sizeof(unsigned int), ref_fp);
printf(
"> compareBin2Bin <unsigned int> nelements=%d,"
" epsilon=%4.2f, threshold=%4.2f\n",
nelements, epsilon, threshold);
printf(" src_file <%s>, size=%d bytes\n", src_file,
static_cast<int>(fsize));
printf(" ref_file <%s>, size=%d bytes\n", ref_file_path,
static_cast<int>(fsize));
if (!compareData<unsigned int, float>(ref_buffer, src_buffer, nelements,
epsilon, threshold)) {
error_count++;
}
fclose(src_fp);
fclose(ref_fp);
free(src_buffer);
free(ref_buffer);
} else {
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
}
}
if (error_count == 0) {
printf(" OK\n");
} else {
printf(" FAILURE: %d errors...\n", (unsigned int)error_count);
}
return (error_count == 0); // returns true if all pixels pass
}
inline bool sdkCompareBin2BinFloat(const char *src_file, const char *ref_file,
unsigned int nelements, const float epsilon,
const float threshold, char *exec_path) {
float *src_buffer = NULL, *ref_buffer = NULL;
FILE *src_fp = NULL, *ref_fp = NULL;
size_t fsize = 0;
uint64_t error_count = 0;
if (FOPEN_FAIL(FOPEN(src_fp, src_file, "rb"))) {
printf("compareBin2Bin <float> unable to open src_file: %s\n", src_file);
error_count = 1;
}
char *ref_file_path = sdkFindFilePath(ref_file, exec_path);
if (ref_file_path == NULL) {
printf("compareBin2Bin <float> unable to find <%s> in <%s>\n", ref_file,
exec_path);
printf(">>> Check info.xml and [project//data] folder <%s> <<<\n",
exec_path);
printf("Aborting comparison!\n");
printf(" FAILED\n");
error_count++;
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
} else {
if (FOPEN_FAIL(FOPEN(ref_fp, ref_file_path, "rb"))) {
printf("compareBin2Bin <float> unable to open ref_file: %s\n",
ref_file_path);
error_count = 1;
}
if (src_fp && ref_fp) {
src_buffer = reinterpret_cast<float *>(malloc(nelements * sizeof(float)));
ref_buffer = reinterpret_cast<float *>(malloc(nelements * sizeof(float)));
printf(
"> compareBin2Bin <float> nelements=%d, epsilon=%4.2f,"
" threshold=%4.2f\n",
nelements, epsilon, threshold);
fsize = fread(src_buffer, sizeof(float), nelements, src_fp);
printf(" src_file <%s>, size=%d bytes\n", src_file,
static_cast<int>(fsize * sizeof(float)));
fsize = fread(ref_buffer, sizeof(float), nelements, ref_fp);
printf(" ref_file <%s>, size=%d bytes\n", ref_file_path,
static_cast<int>(fsize * sizeof(float)));
if (!compareDataAsFloatThreshold<float, float>(
ref_buffer, src_buffer, nelements, epsilon, threshold)) {
error_count++;
}
fclose(src_fp);
fclose(ref_fp);
free(src_buffer);
free(ref_buffer);
} else {
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
}
}
if (error_count == 0) {
printf(" OK\n");
} else {
printf(" FAILURE: %d errors...\n", (unsigned int)error_count);
}
return (error_count == 0); // returns true if all pixels pass
}
inline bool sdkCompareL2fe(const float *reference, const float *data,
const unsigned int len, const float epsilon) {
assert(epsilon >= 0);
float error = 0;
float ref = 0;
for (unsigned int i = 0; i < len; ++i) {
float diff = reference[i] - data[i];
error += diff * diff;
ref += reference[i] * reference[i];
}
float normRef = sqrtf(ref);
if (fabs(ref) < 1e-7) {
#ifdef _DEBUG
std::cerr << "ERROR, reference l2-norm is 0\n";
#endif
return false;
}
float normError = sqrtf(error);
error = normError / normRef;
bool result = error < epsilon;
#ifdef _DEBUG
if (!result) {
std::cerr << "ERROR, l2-norm error " << error << " is greater than epsilon "
<< epsilon << "\n";
}
#endif
return result;
}
inline bool sdkLoadPPMub(const char *file, unsigned char **data,
unsigned int *w, unsigned int *h) {
unsigned int channels;
return __loadPPM(file, data, w, h, &channels);
}
inline bool sdkLoadPPM4ub(const char *file, unsigned char **data,
unsigned int *w, unsigned int *h) {
unsigned char *idata = 0;
unsigned int channels;
if (__loadPPM(file, &idata, w, h, &channels)) {
// pad 4th component
int size = *w * *h;
// keep the original pointer
unsigned char *idata_orig = idata;
*data = (unsigned char *)malloc(sizeof(unsigned char) * size * 4);
unsigned char *ptr = *data;
for (int i = 0; i < size; i++) {
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = 0;
}
free(idata_orig);
return true;
} else {
free(idata);
return false;
}
}
inline bool sdkComparePPM(const char *src_file, const char *ref_file,
const float epsilon, const float threshold,
bool verboseErrors) {
unsigned char *src_data, *ref_data;
uint64_t error_count = 0;
unsigned int ref_width, ref_height;
unsigned int src_width, src_height;
if (src_file == NULL || ref_file == NULL) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: src_file or ref_file is NULL."
" Aborting comparison\n";
}
return false;
}
if (verboseErrors) {
std::cerr << "> Compare (a)rendered: <" << src_file << ">\n";
std::cerr << "> (b)reference: <" << ref_file << ">\n";
}
if (sdkLoadPPM4ub(ref_file, &ref_data, &ref_width, &ref_height) != true) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: unable to load ref image file: " << ref_file
<< "\n";
}
return false;
}
if (sdkLoadPPM4ub(src_file, &src_data, &src_width, &src_height) != true) {
std::cerr << "PPMvsPPM: unable to load src image file: " << src_file
<< "\n";
return false;
}
if (src_height != ref_height || src_width != ref_width) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: source and ref size mismatch (" << src_width
<< "," << src_height << ")vs(" << ref_width << "," << ref_height
<< ")\n";
}
}
if (verboseErrors) {
std::cerr << "PPMvsPPM: comparing images size (" << src_width << ","
<< src_height << ") epsilon(" << epsilon << "), threshold("
<< threshold * 100 << "%)\n";
}
if (compareData(ref_data, src_data, src_width * src_height * 4, epsilon,
threshold) == false) {
error_count = 1;
}
if (error_count == 0) {
if (verboseErrors) {
std::cerr << " OK\n\n";
}
} else {
if (verboseErrors) {
std::cerr << " FAILURE! " << error_count << " errors...\n\n";
}
}
// returns true if all pixels pass
return (error_count == 0) ? true : false;
}
inline bool sdkComparePGM(const char *src_file, const char *ref_file,
const float epsilon, const float threshold,
bool verboseErrors) {
unsigned char *src_data = 0, *ref_data = 0;
uint64_t error_count = 0;
unsigned int ref_width, ref_height;
unsigned int src_width, src_height;
if (src_file == NULL || ref_file == NULL) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: src_file or ref_file is NULL."
" Aborting comparison\n";
}
return false;
}
if (verboseErrors) {
std::cerr << "> Compare (a)rendered: <" << src_file << ">\n";
std::cerr << "> (b)reference: <" << ref_file << ">\n";
}
if (sdkLoadPPMub(ref_file, &ref_data, &ref_width, &ref_height) != true) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: unable to load ref image file: " << ref_file
<< "\n";
}
return false;
}
if (sdkLoadPPMub(src_file, &src_data, &src_width, &src_height) != true) {
std::cerr << "PGMvsPGM: unable to load src image file: " << src_file
<< "\n";
return false;
}
if (src_height != ref_height || src_width != ref_width) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: source and ref size mismatch (" << src_width
<< "," << src_height << ")vs(" << ref_width << "," << ref_height
<< ")\n";
}
}
if (verboseErrors)
std::cerr << "PGMvsPGM: comparing images size (" << src_width << ","
<< src_height << ") epsilon(" << epsilon << "), threshold("
<< threshold * 100 << "%)\n";
if (compareData(ref_data, src_data, src_width * src_height, epsilon,
threshold) == false) {
error_count = 1;
}
if (error_count == 0) {
if (verboseErrors) {
std::cerr << " OK\n\n";
}
} else {
if (verboseErrors) {
std::cerr << " FAILURE! " << error_count << " errors...\n\n";
}
}
// returns true if all pixels pass
return (error_count == 0) ? true : false;
}
#endif // COMMON_HELPER_IMAGE_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/Common/exception.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* CUda UTility Library */
#ifndef COMMON_EXCEPTION_H_
#define COMMON_EXCEPTION_H_
// includes, system
#include <stdlib.h>
#include <exception>
#include <iostream>
#include <stdexcept>
#include <string>
//! Exception wrapper.
//! @param Std_Exception Exception out of namespace std for easy typing.
template <class Std_Exception>
class Exception : public Std_Exception {
public:
//! @brief Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const char *detailed = "-");
//! Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const std::string &detailed);
//! Destructor
virtual ~Exception() throw();
private:
//! Constructor, default (private)
Exception();
//! Constructor, standard
//! @param str string returned by what()
explicit Exception(const std::string &str);
};
////////////////////////////////////////////////////////////////////////////////
//! Exception handler function for arbitrary exceptions
//! @param ex exception to handle
////////////////////////////////////////////////////////////////////////////////
template <class Exception_Typ>
inline void handleException(const Exception_Typ &ex) {
std::cerr << ex.what() << std::endl;
exit(EXIT_FAILURE);
}
//! Convenience macros
//! Exception caused by dynamic program behavior, e.g. file does not exist
#define RUNTIME_EXCEPTION(msg) \
Exception<std::runtime_error>::throw_it(__FILE__, __LINE__, msg)
//! Logic exception in program, e.g. an assert failed
#define LOGIC_EXCEPTION(msg) \
Exception<std::logic_error>::throw_it(__FILE__, __LINE__, msg)
//! Out of range exception
#define RANGE_EXCEPTION(msg) \
Exception<std::range_error>::throw_it(__FILE__, __LINE__, msg)
////////////////////////////////////////////////////////////////////////////////
//! Implementation
// includes, system
#include <sstream>
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const char *detailed) {
std::stringstream s;
// Quiet heavy-weight but exceptions are not for
// performance / release versions
s << "Exception in file '" << file << "' in line " << line << "\n"
<< "Detailed description: " << detailed << "\n";
throw Exception(s.str());
}
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const std::string &msg) {
throw_it(file, line, msg.c_str());
}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, default (private).
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception() : Std_Exception("Unknown Exception.\n") {}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, standard (private).
//! String returned by what().
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception(const std::string &s) : Std_Exception(s) {}
////////////////////////////////////////////////////////////////////////////////
//! Destructor
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::~Exception() throw() {}
// functions, exported
#endif // COMMON_EXCEPTION_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/Common/helper_cuda.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions for initialization and error checking
#ifndef COMMON_HELPER_CUDA_H_
#define COMMON_HELPER_CUDA_H_
#pragma once
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <helper_string.h>
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// Note, it is required that your SDK sample to include the proper header
// files, please refer the CUDA examples for examples of the needed CUDA
// headers, which may change depending on which CUDA functions are used.
// CUDA Runtime error messages
#ifdef __DPCT_HPP__
static const char *_cudaGetErrorEnum(dpct::err0 error) {
/*
DPCT1009:16: SYCL uses exceptions to report errors and does not use the error
codes. The original code was commented out and a warning string was inserted.
You need to rewrite this code.
*/
return "cudaGetErrorName is not supported" /*cudaGetErrorName(error)*/;
}
#endif
#ifdef CUDA_DRIVER_API
// CUDA Driver API errors
static const char *_cudaGetErrorEnum(CUresult error) {
static char unknown[] = "<unknown>";
const char *ret = NULL;
cuGetErrorName(error, &ret);
return ret ? ret : unknown;
}
#endif
#ifdef CUBLAS_API_H_
// cuBLAS API errors
static const char *_cudaGetErrorEnum(cublasStatus_t error) {
switch (error) {
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
case CUBLAS_STATUS_NOT_SUPPORTED:
return "CUBLAS_STATUS_NOT_SUPPORTED";
case CUBLAS_STATUS_LICENSE_ERROR:
return "CUBLAS_STATUS_LICENSE_ERROR";
}
return "<unknown>";
}
#endif
#ifdef _CUFFT_H_
// cuFFT API errors
static const char *_cudaGetErrorEnum(cufftResult error) {
switch (error) {
case CUFFT_SUCCESS:
return "CUFFT_SUCCESS";
case CUFFT_INVALID_PLAN:
return "CUFFT_INVALID_PLAN";
case CUFFT_ALLOC_FAILED:
return "CUFFT_ALLOC_FAILED";
case CUFFT_INVALID_TYPE:
return "CUFFT_INVALID_TYPE";
case CUFFT_INVALID_VALUE:
return "CUFFT_INVALID_VALUE";
case CUFFT_INTERNAL_ERROR:
return "CUFFT_INTERNAL_ERROR";
case CUFFT_EXEC_FAILED:
return "CUFFT_EXEC_FAILED";
case CUFFT_SETUP_FAILED:
return "CUFFT_SETUP_FAILED";
case CUFFT_INVALID_SIZE:
return "CUFFT_INVALID_SIZE";
case CUFFT_UNALIGNED_DATA:
return "CUFFT_UNALIGNED_DATA";
case CUFFT_INCOMPLETE_PARAMETER_LIST:
return "CUFFT_INCOMPLETE_PARAMETER_LIST";
case CUFFT_INVALID_DEVICE:
return "CUFFT_INVALID_DEVICE";
case CUFFT_PARSE_ERROR:
return "CUFFT_PARSE_ERROR";
case CUFFT_NO_WORKSPACE:
return "CUFFT_NO_WORKSPACE";
case CUFFT_NOT_IMPLEMENTED:
return "CUFFT_NOT_IMPLEMENTED";
case CUFFT_LICENSE_ERROR:
return "CUFFT_LICENSE_ERROR";
case CUFFT_NOT_SUPPORTED:
return "CUFFT_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSPARSEAPI
// cuSPARSE API errors
static const char *_cudaGetErrorEnum(cusparseStatus_t error) {
switch (error) {
case CUSPARSE_STATUS_SUCCESS:
return "CUSPARSE_STATUS_SUCCESS";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "CUSPARSE_STATUS_NOT_INITIALIZED";
case CUSPARSE_STATUS_ALLOC_FAILED:
return "CUSPARSE_STATUS_ALLOC_FAILED";
case CUSPARSE_STATUS_INVALID_VALUE:
return "CUSPARSE_STATUS_INVALID_VALUE";
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "CUSPARSE_STATUS_ARCH_MISMATCH";
case CUSPARSE_STATUS_MAPPING_ERROR:
return "CUSPARSE_STATUS_MAPPING_ERROR";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "CUSPARSE_STATUS_EXECUTION_FAILED";
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "CUSPARSE_STATUS_INTERNAL_ERROR";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSOLVER_COMMON_H_
// cuSOLVER API errors
static const char *_cudaGetErrorEnum(cusolverStatus_t error) {
switch (error) {
case CUSOLVER_STATUS_SUCCESS:
return "CUSOLVER_STATUS_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED:
return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED:
return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE:
return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH:
return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_MAPPING_ERROR:
return "CUSOLVER_STATUS_MAPPING_ERROR";
case CUSOLVER_STATUS_EXECUTION_FAILED:
return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR:
return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case CUSOLVER_STATUS_NOT_SUPPORTED:
return "CUSOLVER_STATUS_NOT_SUPPORTED ";
case CUSOLVER_STATUS_ZERO_PIVOT:
return "CUSOLVER_STATUS_ZERO_PIVOT";
case CUSOLVER_STATUS_INVALID_LICENSE:
return "CUSOLVER_STATUS_INVALID_LICENSE";
}
return "<unknown>";
}
#endif
#ifdef CURAND_H_
// cuRAND API errors
static const char *_cudaGetErrorEnum(int error) {
switch (error) {
case 0:
return "CURAND_STATUS_SUCCESS";
case 100:
return "CURAND_STATUS_VERSION_MISMATCH";
case 101:
return "CURAND_STATUS_NOT_INITIALIZED";
case 102:
return "CURAND_STATUS_ALLOCATION_FAILED";
case 103:
return "CURAND_STATUS_TYPE_ERROR";
case 104:
return "CURAND_STATUS_OUT_OF_RANGE";
case 105:
return "CURAND_STATUS_LENGTH_NOT_MULTIPLE";
case 106:
return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case 201:
return "CURAND_STATUS_LAUNCH_FAILURE";
case 202:
return "CURAND_STATUS_PREEXISTING_FAILURE";
case 203:
return "CURAND_STATUS_INITIALIZATION_FAILED";
case 204:
return "CURAND_STATUS_ARCH_MISMATCH";
case 999:
return "CURAND_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NVJPEGAPI
// nvJPEG API errors
static const char *_cudaGetErrorEnum(nvjpegStatus_t error) {
switch (error) {
case NVJPEG_STATUS_SUCCESS:
return "NVJPEG_STATUS_SUCCESS";
case NVJPEG_STATUS_NOT_INITIALIZED:
return "NVJPEG_STATUS_NOT_INITIALIZED";
case NVJPEG_STATUS_INVALID_PARAMETER:
return "NVJPEG_STATUS_INVALID_PARAMETER";
case NVJPEG_STATUS_BAD_JPEG:
return "NVJPEG_STATUS_BAD_JPEG";
case NVJPEG_STATUS_JPEG_NOT_SUPPORTED:
return "NVJPEG_STATUS_JPEG_NOT_SUPPORTED";
case NVJPEG_STATUS_ALLOCATOR_FAILURE:
return "NVJPEG_STATUS_ALLOCATOR_FAILURE";
case NVJPEG_STATUS_EXECUTION_FAILED:
return "NVJPEG_STATUS_EXECUTION_FAILED";
case NVJPEG_STATUS_ARCH_MISMATCH:
return "NVJPEG_STATUS_ARCH_MISMATCH";
case NVJPEG_STATUS_INTERNAL_ERROR:
return "NVJPEG_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NV_NPPIDEFS_H
// NPP API errors
static const char *_cudaGetErrorEnum(NppStatus error) {
switch (error) {
case NPP_NOT_SUPPORTED_MODE_ERROR:
return "NPP_NOT_SUPPORTED_MODE_ERROR";
case NPP_ROUND_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ROUND_MODE_NOT_SUPPORTED_ERROR";
case NPP_RESIZE_NO_OPERATION_ERROR:
return "NPP_RESIZE_NO_OPERATION_ERROR";
case NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY:
return "NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_BAD_ARG_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFF_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECT_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUAD_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEM_ALLOC_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTO_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_INPUT:
return "NPP_INVALID_INPUT";
case NPP_POINTER_ERROR:
return "NPP_POINTER_ERROR";
case NPP_WARNING:
return "NPP_WARNING";
case NPP_ODD_ROI_WARNING:
return "NPP_ODD_ROI_WARNING";
#else
// These are for CUDA 5.5 or higher
case NPP_BAD_ARGUMENT_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFFICIENT_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECTANGLE_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUADRANGLE_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEMORY_ALLOCATION_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_HOST_POINTER_ERROR:
return "NPP_INVALID_HOST_POINTER_ERROR";
case NPP_INVALID_DEVICE_POINTER_ERROR:
return "NPP_INVALID_DEVICE_POINTER_ERROR";
#endif
case NPP_LUT_NUMBER_OF_LEVELS_ERROR:
return "NPP_LUT_NUMBER_OF_LEVELS_ERROR";
case NPP_TEXTURE_BIND_ERROR:
return "NPP_TEXTURE_BIND_ERROR";
case NPP_WRONG_INTERSECTION_ROI_ERROR:
return "NPP_WRONG_INTERSECTION_ROI_ERROR";
case NPP_NOT_EVEN_STEP_ERROR:
return "NPP_NOT_EVEN_STEP_ERROR";
case NPP_INTERPOLATION_ERROR:
return "NPP_INTERPOLATION_ERROR";
case NPP_RESIZE_FACTOR_ERROR:
return "NPP_RESIZE_FACTOR_ERROR";
case NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR:
return "NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_MEMFREE_ERR:
return "NPP_MEMFREE_ERR";
case NPP_MEMSET_ERR:
return "NPP_MEMSET_ERR";
case NPP_MEMCPY_ERR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERR:
return "NPP_MIRROR_FLIP_ERR";
#else
case NPP_MEMFREE_ERROR:
return "NPP_MEMFREE_ERROR";
case NPP_MEMSET_ERROR:
return "NPP_MEMSET_ERROR";
case NPP_MEMCPY_ERROR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERROR:
return "NPP_MIRROR_FLIP_ERROR";
#endif
case NPP_ALIGNMENT_ERROR:
return "NPP_ALIGNMENT_ERROR";
case NPP_STEP_ERROR:
return "NPP_STEP_ERROR";
case NPP_SIZE_ERROR:
return "NPP_SIZE_ERROR";
case NPP_NULL_POINTER_ERROR:
return "NPP_NULL_POINTER_ERROR";
case NPP_CUDA_KERNEL_EXECUTION_ERROR:
return "NPP_CUDA_KERNEL_EXECUTION_ERROR";
case NPP_NOT_IMPLEMENTED_ERROR:
return "NPP_NOT_IMPLEMENTED_ERROR";
case NPP_ERROR:
return "NPP_ERROR";
case NPP_SUCCESS:
return "NPP_SUCCESS";
case NPP_WRONG_INTERSECTION_QUAD_WARNING:
return "NPP_WRONG_INTERSECTION_QUAD_WARNING";
case NPP_MISALIGNED_DST_ROI_WARNING:
return "NPP_MISALIGNED_DST_ROI_WARNING";
case NPP_AFFINE_QUAD_INCORRECT_WARNING:
return "NPP_AFFINE_QUAD_INCORRECT_WARNING";
case NPP_DOUBLE_SIZE_WARNING:
return "NPP_DOUBLE_SIZE_WARNING";
case NPP_WRONG_INTERSECTION_ROI_WARNING:
return "NPP_WRONG_INTERSECTION_ROI_WARNING";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x6000
/* These are 6.0 or higher */
case NPP_LUT_PALETTE_BITSIZE_ERROR:
return "NPP_LUT_PALETTE_BITSIZE_ERROR";
case NPP_ZC_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ZC_MODE_NOT_SUPPORTED_ERROR";
case NPP_QUALITY_INDEX_ERROR:
return "NPP_QUALITY_INDEX_ERROR";
case NPP_CHANNEL_ORDER_ERROR:
return "NPP_CHANNEL_ORDER_ERROR";
case NPP_ZERO_MASK_VALUE_ERROR:
return "NPP_ZERO_MASK_VALUE_ERROR";
case NPP_NUMBER_OF_CHANNELS_ERROR:
return "NPP_NUMBER_OF_CHANNELS_ERROR";
case NPP_COI_ERROR:
return "NPP_COI_ERROR";
case NPP_DIVISOR_ERROR:
return "NPP_DIVISOR_ERROR";
case NPP_CHANNEL_ERROR:
return "NPP_CHANNEL_ERROR";
case NPP_STRIDE_ERROR:
return "NPP_STRIDE_ERROR";
case NPP_ANCHOR_ERROR:
return "NPP_ANCHOR_ERROR";
case NPP_MASK_SIZE_ERROR:
return "NPP_MASK_SIZE_ERROR";
case NPP_MOMENT_00_ZERO_ERROR:
return "NPP_MOMENT_00_ZERO_ERROR";
case NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR:
return "NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR";
case NPP_THRESHOLD_ERROR:
return "NPP_THRESHOLD_ERROR";
case NPP_CONTEXT_MATCH_ERROR:
return "NPP_CONTEXT_MATCH_ERROR";
case NPP_FFT_FLAG_ERROR:
return "NPP_FFT_FLAG_ERROR";
case NPP_FFT_ORDER_ERROR:
return "NPP_FFT_ORDER_ERROR";
case NPP_SCALE_RANGE_ERROR:
return "NPP_SCALE_RANGE_ERROR";
case NPP_DATA_TYPE_ERROR:
return "NPP_DATA_TYPE_ERROR";
case NPP_OUT_OFF_RANGE_ERROR:
return "NPP_OUT_OFF_RANGE_ERROR";
case NPP_DIVIDE_BY_ZERO_ERROR:
return "NPP_DIVIDE_BY_ZERO_ERROR";
case NPP_RANGE_ERROR:
return "NPP_RANGE_ERROR";
case NPP_NO_MEMORY_ERROR:
return "NPP_NO_MEMORY_ERROR";
case NPP_ERROR_RESERVED:
return "NPP_ERROR_RESERVED";
case NPP_NO_OPERATION_WARNING:
return "NPP_NO_OPERATION_WARNING";
case NPP_DIVIDE_BY_ZERO_WARNING:
return "NPP_DIVIDE_BY_ZERO_WARNING";
#endif
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x7000
/* These are 7.0 or higher */
case NPP_OVERFLOW_ERROR:
return "NPP_OVERFLOW_ERROR";
case NPP_CORRUPTED_DATA_ERROR:
return "NPP_CORRUPTED_DATA_ERROR";
#endif
}
return "<unknown>";
}
#endif
template <typename T>
void check(T result, char const *const func, const char *const file,
int const line) {
}
#ifdef __DPCT_HPP__
// This will output the proper CUDA error strings in the event
// that a CUDA host call returns an error
#define checkCudaErrors(val) check((val), #val, __FILE__, __LINE__)
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError(msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:17: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
// This will only print the proper error string when calling cudaGetLastError
// but not exit program incase error detected.
#define printLastCudaError(msg) __printLastCudaError(msg, __FILE__, __LINE__)
inline void __printLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:19: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
#endif
#ifndef MAX
#define MAX(a, b) (a > b ? a : b)
#endif
// Float To Int conversion
inline int ftoi(float value) {
return (value >= 0 ? static_cast<int>(value + 0.5)
: static_cast<int>(value - 0.5));
}
// Beginning of GPU Architecture definitions
inline int _ConvertSMVer2Cores(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the # of cores per SM
typedef struct dpct_type_113531 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{0x30, 192},
{0x32, 192},
{0x35, 192},
{0x37, 192},
{0x50, 128},
{0x52, 128},
{0x53, 128},
{0x60, 64},
{0x61, 128},
{0x62, 128},
{0x70, 64},
{0x72, 64},
{0x75, 64},
{0x80, 64},
{0x86, 128},
{0x87, 128},
{0x89, 128},
{0x90, 128},
{-1, -1}};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoCores for SM %d.%d is undefined."
" Default to use %d Cores/SM\n",
major, minor, nGpuArchCoresPerSM[index - 1].Cores);
return nGpuArchCoresPerSM[index - 1].Cores;
}
inline const char* _ConvertSMVer2ArchName(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the GPU Arch name)
typedef struct dpct_type_281558 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
const char* name;
} sSMtoArchName;
sSMtoArchName nGpuArchNameSM[] = {
{0x30, "Kepler"},
{0x32, "Kepler"},
{0x35, "Kepler"},
{0x37, "Kepler"},
{0x50, "Maxwell"},
{0x52, "Maxwell"},
{0x53, "Maxwell"},
{0x60, "Pascal"},
{0x61, "Pascal"},
{0x62, "Pascal"},
{0x70, "Volta"},
{0x72, "Xavier"},
{0x75, "Turing"},
{0x80, "Ampere"},
{0x86, "Ampere"},
{0x87, "Ampere"},
{0x89, "Ada"},
{0x90, "Hopper"},
{-1, "Graphics Device"}};
int index = 0;
while (nGpuArchNameSM[index].SM != -1) {
if (nGpuArchNameSM[index].SM == ((major << 4) + minor)) {
return nGpuArchNameSM[index].name;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoArchName for SM %d.%d is undefined."
" Default to use %s\n",
major, minor, nGpuArchNameSM[index - 1].name);
return nGpuArchNameSM[index - 1].name;
}
// end of GPU Architecture definitions
#ifdef __DPCT_HPP__
// General GPU Device CUDA Initialization
inline int gpuDeviceInit(int devID) {
int device_count;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr,
"gpuDeviceInit() CUDA error: "
"no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
if (devID < 0) {
devID = 0;
}
if (devID > device_count - 1) {
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n",
device_count);
fprintf(stderr,
">> gpuDeviceInit (-device=%d) is not a valid"
" GPU device. <<\n",
devID);
fprintf(stderr, "\n");
return -devID;
}
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:21: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(DPCT_CHECK_ERROR(
major = dpct::dev_mgr::instance().get_device(devID).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor = dpct::dev_mgr::instance().get_device(devID).get_minor_version()));
/*
DPCT1035:22: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
if (computeMode == 0) {
fprintf(stderr,
"Error: device is running in <Compute Mode "
"Prohibited>, no threads can use cudaSetDevice().\n");
return -1;
}
if (major < 1) {
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(EXIT_FAILURE);
}
/*
DPCT1093:23: The "devID" device may be not the one intended for use. Adjust
the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID)));
printf("gpuDeviceInit() CUDA Device [%d]: \"%s\n", devID, _ConvertSMVer2ArchName(major, minor));
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
inline int gpuGetMaxGflopsDeviceId() try {
int current_device = 0, sm_per_multiproc = 0;
int max_perf_device = 0;
int device_count = 0;
int devices_prohibited = 0;
uint64_t max_compute_perf = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the best CUDA capable GPU device
current_device = 0;
while (current_device < device_count) {
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:24: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version()));
// If this GPU is not running on Compute Mode prohibited,
// then we can add it to the list
/*
DPCT1035:25: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (computeMode != 0) {
if (major == 9999 && minor == 9999) {
sm_per_multiproc = 1;
} else {
sm_per_multiproc =
_ConvertSMVer2Cores(major, minor);
}
int multiProcessorCount = 0, clockRate = 0;
checkCudaErrors(
DPCT_CHECK_ERROR(multiProcessorCount = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_compute_units()));
dpct::err0 result =
DPCT_CHECK_ERROR(clockRate = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_clock_frequency());
uint64_t compute_perf = (uint64_t)multiProcessorCount * sm_per_multiproc * clockRate;
if (compute_perf > max_compute_perf) {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
} else {
devices_prohibited++;
}
++current_device;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" all devices have compute mode prohibited.\n");
exit(EXIT_FAILURE);
}
return max_perf_device;
}
catch (sycl::exception const &exc) {
std::cerr << exc.what() << "Exception caught at file:" << __FILE__
<< ", line:" << __LINE__ << std::endl;
std::exit(1);
}
// Initialization code to find the best CUDA Device
inline int findCudaDevice(int argc, const char **argv) {
int devID = 0;
// If the command-line has a device number specified, use it
if (checkCmdLineFlag(argc, argv, "device")) {
devID = getCmdLineArgumentInt(argc, argv, "device=");
if (devID < 0) {
printf("Invalid command line parameter\n ");
exit(EXIT_FAILURE);
} else {
devID = gpuDeviceInit(devID);
if (devID < 0) {
printf("exiting...\n");
exit(EXIT_FAILURE);
}
}
} else {
// Otherwise pick the device with highest Gflops/s
devID = gpuGetMaxGflopsDeviceId();
/*
DPCT1093:26: The "devID" device may be not the one intended for use. Adjust
the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID)));
int major = 0, minor = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
major =
dpct::dev_mgr::instance().get_device(devID).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor =
dpct::dev_mgr::instance().get_device(devID).get_minor_version()));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
devID, _ConvertSMVer2ArchName(major, minor), major, minor);
}
return devID;
}
inline int findIntegratedGPU() {
int current_device = 0;
int device_count = 0;
int devices_prohibited = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr, "CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the integrated GPU which is compute capable
while (current_device < device_count) {
int computeMode = -1, integrated = -1;
/*
DPCT1035:27: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(
DPCT_CHECK_ERROR(integrated = dpct::dev_mgr::instance()
.get_device(current_device)
.get_integrated()));
// If GPU is integrated and is not running on Compute Mode prohibited,
// then cuda can map to GLES resource
/*
DPCT1035:28: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (integrated && (computeMode != 0)) {
/*
DPCT1093:29: The "current_device" device may be not the one intended for
use. Adjust the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(current_device)));
int major = 0, minor = 0;
checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version()));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
current_device, _ConvertSMVer2ArchName(major, minor), major, minor);
return current_device;
} else {
devices_prohibited++;
}
current_device++;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"CUDA error:"
" No GLES-CUDA Interop capable GPU found.\n");
exit(EXIT_FAILURE);
}
return -1;
}
// General check for CUDA GPU SM Capabilities
inline bool checkCudaCapabilities(int major_version, int minor_version) {
int dev;
int major = 0, minor = 0;
checkCudaErrors(dev = dpct::dev_mgr::instance().current_device_id());
checkCudaErrors(DPCT_CHECK_ERROR(
major = dpct::dev_mgr::instance().get_device(dev).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor = dpct::dev_mgr::instance().get_device(dev).get_minor_version()));
if ((major > major_version) ||
(major == major_version &&
minor >= minor_version)) {
printf(" Device %d: <%16s >, Compute SM %d.%d detected\n", dev,
_ConvertSMVer2ArchName(major, minor), major, minor);
return true;
} else {
printf(
" No GPU device was found that can support "
"CUDA compute capability %d.%d.\n",
major_version, minor_version);
return false;
}
}
#endif
// end of CUDA Helper Functions
#endif // COMMON_HELPER_CUDA_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/Common/helper_functions.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (string parsing,
// timers, image helpers, etc)
#ifndef COMMON_HELPER_FUNCTIONS_H_
#define COMMON_HELPER_FUNCTIONS_H_
#ifdef WIN32
#pragma warning(disable : 4996)
#endif
// includes, project
#include <assert.h>
#include <exception.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
// includes, timer, string parsing, image helpers
#include <helper_image.h> // helper functions for image compare, dump, data comparisons
#include <helper_string.h> // helper functions for string parsing
#include <helper_timer.h> // helper functions for timers
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
#endif // COMMON_HELPER_FUNCTIONS_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/Samples/5_Domain_Specific/HSOpticalFlow/addKernel.dp.hpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include "common.h"
///////////////////////////////////////////////////////////////////////////////
/// \brief add two vectors of size _count_
///
/// CUDA kernel
/// \param[in] op1 term one
/// \param[in] op2 term two
/// \param[in] count vector size
/// \param[out] sum result
///////////////////////////////////////////////////////////////////////////////
void AddKernel(const float *op1, const float *op2, int count,
float *sum, const sycl::nd_item<3> &item_ct1) {
const int pos = item_ct1.get_local_id(2) +
item_ct1.get_group(2) * item_ct1.get_local_range(2);
if (pos >= count) return;
sum[pos] = op1[pos] + op2[pos];
}
///////////////////////////////////////////////////////////////////////////////
/// \brief add two vectors of size _count_
/// \param[in] op1 term one
/// \param[in] op2 term two
/// \param[in] count vector size
/// \param[out] sum result
///////////////////////////////////////////////////////////////////////////////
static void Add(const float *op1, const float *op2, int count, float *sum, sycl::queue q) {
sycl::range<3> threads(1, 1, 256);
sycl::range<3> blocks(1, 1, iDivUp(count, threads[2]));
q.parallel_for(
sycl::nd_range<3>(blocks * threads, threads),
[=](sycl::nd_item<3> item_ct1) {
AddKernel(op1, op2, count, sum, item_ct1);
});
}
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/Samples/5_Domain_Specific/HSOpticalFlow/flowSYCL.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef FLOW_CUDA_H
#define FLOW_CUDA_H
void ComputeFlowCUDA(
const float *I0, // source frame
const float *I1, // tracked frame
int width, // frame width
int height, // frame height
int stride, // row access stride
float alpha, // smoothness coefficient
int nLevels, // number of levels in pyramid
int nWarpIters, // number of warping iterations per pyramid level
int nSolverIters, // number of solver iterations (for linear system)
float *u, // output horizontal flow
float *v); // output vertical flow
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/Samples/5_Domain_Specific/HSOpticalFlow/flowGold.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef FLOW_GOLD_H
#define FLOW_GOLD_H
void ComputeFlowGold(
const float *I0, // source frame
const float *I1, // tracked frame
int width, // frame width
int height, // frame height
int stride, // row access stride
float alpha, // smoothness coefficient
int nLevels, // number of levels in pyramid
int nWarpIters, // number of warping iterations per pyramid level
int nIters, // number of solver iterations (for linear system)
float *u, // output horizontal flow
float *v); // output vertical flow
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/Samples/5_Domain_Specific/HSOpticalFlow/flowSYCL.dp.cpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include "common.h"
// include kernels
#include "downscaleKernel.dp.hpp"
#include "upscaleKernel.dp.hpp"
#include "warpingKernel.dp.hpp"
#include "derivativesKernel.dp.hpp"
#include "solverKernel.dp.hpp"
#include "addKernel.dp.hpp"
///////////////////////////////////////////////////////////////////////////////
/// \brief method logic
///
/// handles memory allocations, control flow
/// \param[in] I0 source image
/// \param[in] I1 tracked image
/// \param[in] width images width
/// \param[in] height images height
/// \param[in] stride images stride
/// \param[in] alpha degree of displacement field smoothness
/// \param[in] nLevels number of levels in a pyramid
/// \param[in] nWarpIters number of warping iterations per pyramid level
/// \param[in] nSolverIters number of solver iterations (Jacobi iterations)
/// \param[out] u horizontal displacement
/// \param[out] v vertical displacement
///////////////////////////////////////////////////////////////////////////////
void ComputeFlowCUDA(const float *I0, const float *I1, int width, int height,
int stride, float alpha, int nLevels, int nWarpIters,
int nSolverIters, float *u, float *v) {
printf("Computing optical flow on Device...\n");
sycl::queue q{aspect_selector(sycl::aspect::ext_intel_legacy_image), sycl::property::queue::in_order()};
std::cout << "\nRunning on "
<< q.get_device().get_info<sycl::info::device::name>() << "\n";
// pI0 and pI1 will hold device pointers
const float **pI0 = new const float *[nLevels];
const float **pI1 = new const float *[nLevels];
int *pW = new int[nLevels];
int *pH = new int[nLevels];
int *pS = new int[nLevels];
// device memory pointers
float *d_tmp;
float *d_du0;
float *d_dv0;
float *d_du1;
float *d_dv1;
float *d_Ix;
float *d_Iy;
float *d_Iz;
float *d_u;
float *d_v;
float *d_nu;
float *d_nv;
const int dataSize = stride * height * sizeof(float);
checkCudaErrors(DPCT_CHECK_ERROR(d_tmp = (float *)sycl::malloc_device(
dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(d_du0 = (float *)sycl::malloc_device(
dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(d_dv0 = (float *)sycl::malloc_device(
dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(d_du1 = (float *)sycl::malloc_device(
dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(d_dv1 = (float *)sycl::malloc_device(
dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(d_Ix = (float *)sycl::malloc_device(
dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(d_Iy = (float *)sycl::malloc_device(
dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(d_Iz = (float *)sycl::malloc_device(
dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(
d_u = (float *)sycl::malloc_device(dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(
d_v = (float *)sycl::malloc_device(dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(d_nu = (float *)sycl::malloc_device(
dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(d_nv = (float *)sycl::malloc_device(
dataSize, q)));
// prepare pyramid
int currentLevel = nLevels - 1;
// allocate GPU memory for input images
checkCudaErrors(DPCT_CHECK_ERROR(
*(pI0 + currentLevel) = (const float *)sycl::malloc_device(
dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(
*(pI1 + currentLevel) = (const float *)sycl::malloc_device(
dataSize, q)));
checkCudaErrors(
DPCT_CHECK_ERROR(q.memcpy((void *)pI0[currentLevel], I0, dataSize).wait()));
checkCudaErrors(
DPCT_CHECK_ERROR(q.memcpy((void *)pI1[currentLevel], I1, dataSize).wait()));
pW[currentLevel] = width;
pH[currentLevel] = height;
pS[currentLevel] = stride;
for (; currentLevel > 0; --currentLevel) {
int nw = pW[currentLevel] / 2;
int nh = pH[currentLevel] / 2;
int ns = iAlignUp(nw);
checkCudaErrors(DPCT_CHECK_ERROR(
*(pI0 + currentLevel - 1) = (const float *)sycl::malloc_device(
ns * nh * sizeof(float), q)));
checkCudaErrors(DPCT_CHECK_ERROR(
*(pI1 + currentLevel - 1) = (const float *)sycl::malloc_device(
ns * nh * sizeof(float), q)));
Downscale(pI0[currentLevel], pW[currentLevel],
pH[currentLevel], pS[currentLevel], nw, nh, ns,
(float *)pI0[currentLevel - 1], q);
Downscale(pI1[currentLevel], pW[currentLevel],
pH[currentLevel], pS[currentLevel], nw, nh, ns,
(float *)pI1[currentLevel - 1], q);
pW[currentLevel - 1] = nw;
pH[currentLevel - 1] = nh;
pS[currentLevel - 1] = ns;
}
checkCudaErrors(
DPCT_CHECK_ERROR(q.memset(d_u, 0, stride * height * sizeof(float))));
checkCudaErrors(
DPCT_CHECK_ERROR(q.memset(d_v, 0, stride * height * sizeof(float))));
checkCudaErrors(
DPCT_CHECK_ERROR(q.wait()));
// compute flow
for (; currentLevel < nLevels; ++currentLevel) {
for (int warpIter = 0; warpIter < nWarpIters; ++warpIter) {
checkCudaErrors(DPCT_CHECK_ERROR(
q.memset(d_du0, 0, dataSize)));
checkCudaErrors(DPCT_CHECK_ERROR(
q.memset(d_dv0, 0, dataSize)));
checkCudaErrors(DPCT_CHECK_ERROR(
q.memset(d_du1, 0, dataSize)));
checkCudaErrors(DPCT_CHECK_ERROR(
q.memset(d_dv1, 0, dataSize)));
// on current level we compute optical flow
// between frame 0 and warped frame 1
WarpImage(pI1[currentLevel], pW[currentLevel], pH[currentLevel],
pS[currentLevel], d_u, d_v, d_tmp, q);
ComputeDerivatives(pI0[currentLevel], d_tmp, pW[currentLevel],
pH[currentLevel], pS[currentLevel], d_Ix, d_Iy, d_Iz, q);
for (int iter = 0; iter < nSolverIters; ++iter) {
SolveForUpdate(d_du0, d_dv0, d_Ix, d_Iy, d_Iz, pW[currentLevel],
pH[currentLevel], pS[currentLevel], alpha, d_du1, d_dv1, q);
Swap(d_du0, d_du1);
Swap(d_dv0, d_dv1);
}
// update u, v
Add(d_u, d_du0, pH[currentLevel] * pS[currentLevel], d_u, q);
Add(d_v, d_dv0, pH[currentLevel] * pS[currentLevel], d_v, q);
}
if (currentLevel != nLevels - 1) {
// prolongate solution
float scaleX = (float)pW[currentLevel + 1] / (float)pW[currentLevel];
Upscale(d_u, pW[currentLevel], pH[currentLevel], pS[currentLevel],
pW[currentLevel + 1], pH[currentLevel + 1], pS[currentLevel + 1],
scaleX, d_nu, q);
float scaleY = (float)pH[currentLevel + 1] / (float)pH[currentLevel];
Upscale(d_v, pW[currentLevel], pH[currentLevel], pS[currentLevel],
pW[currentLevel + 1], pH[currentLevel + 1], pS[currentLevel + 1],
scaleY, d_nv, q);
Swap(d_u, d_nu);
Swap(d_v, d_nv);
}
}
checkCudaErrors(DPCT_CHECK_ERROR(
q.memcpy(u, d_u, dataSize)));
checkCudaErrors(DPCT_CHECK_ERROR(
q.memcpy(v, d_v, dataSize)));
checkCudaErrors(
DPCT_CHECK_ERROR(q.wait()));
// cleanup
for (int i = 0; i < nLevels; ++i) {
checkCudaErrors(DPCT_CHECK_ERROR(
sycl::free((void *)pI0[i], q)));
checkCudaErrors(DPCT_CHECK_ERROR(
sycl::free((void *)pI1[i], q)));
}
delete[] pI0;
delete[] pI1;
delete[] pW;
delete[] pH;
delete[] pS;
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_tmp, q)));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_du0, q)));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_dv0, q)));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_du1, q)));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_dv1, q)));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_Ix, q)));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_Iy, q)));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_Iz, q)));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_nu, q)));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_nv, q)));
checkCudaErrors(DPCT_CHECK_ERROR(sycl::free(d_u, q)));
checkCudaErrors(DPCT_CHECK_ERROR(sycl::free(d_v, q)));
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/Samples/5_Domain_Specific/HSOpticalFlow/common.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
///////////////////////////////////////////////////////////////////////////////
// Header for common includes and utility functions
///////////////////////////////////////////////////////////////////////////////
#ifndef COMMON_H
#define COMMON_H
///////////////////////////////////////////////////////////////////////////////
// Common includes
///////////////////////////////////////////////////////////////////////////////
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <memory.h>
#include <math.h>
#include <helper_cuda.h>
///////////////////////////////////////////////////////////////////////////////
// Common constants
///////////////////////////////////////////////////////////////////////////////
const int StrideAlignment = 8;
///////////////////////////////////////////////////////////////////////////////
// Common functions
///////////////////////////////////////////////////////////////////////////////
// Align up n to the nearest multiple of m
inline int iAlignUp(int n, int m = StrideAlignment) {
int mod = n % m;
if (mod)
return n + m - mod;
else
return n;
}
// round up n/m
inline int iDivUp(int n, int m) { return (n + m - 1) / m; }
// swap two values
template <typename T>
inline void Swap(T &a, T &b) {
T t = a;
a = b;
b = t;
}
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/Samples/5_Domain_Specific/HSOpticalFlow/downscaleKernel.dp.hpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include "common.h"
///////////////////////////////////////////////////////////////////////////////
/// \brief downscale image
///
/// CUDA kernel, relies heavily on texture unit
/// \param[in] width image width
/// \param[in] height image height
/// \param[in] stride image stride
/// \param[out] out result
///////////////////////////////////////////////////////////////////////////////
void DownscaleKernel(int width, int height, int stride, float *out,
sycl::accessor<sycl::float4, 2, sycl::access::mode::read,
sycl::access::target::image>
tex_acc,
sycl::sampler texDesc,
const sycl::nd_item<3> &item_ct1) {
const int ix = item_ct1.get_local_id(2) +
item_ct1.get_group(2) * item_ct1.get_local_range(2);
const int iy = item_ct1.get_local_id(1) +
item_ct1.get_group(1) * item_ct1.get_local_range(1);
if (ix >= width || iy >= height) {
return;
}
int srcx = ix * 2;
int srcy = iy * 2;
auto inputCoords1 = sycl::float2(srcx + 0, srcy + 0);
auto inputCoords2 = sycl::float2(srcx + 0, srcy + 1);
auto inputCoords3 = sycl::float2(srcx + 1, srcy + 0);
auto inputCoords4 = sycl::float2(srcx + 1, srcy + 1);
out[ix + iy * stride] = 0.25f * (tex_acc.read(inputCoords1, texDesc)[0] +
tex_acc.read(inputCoords2, texDesc)[0] +
tex_acc.read(inputCoords3, texDesc)[0] +
tex_acc.read(inputCoords4, texDesc)[0]);
}
///////////////////////////////////////////////////////////////////////////////
/// \brief downscale image
///
/// \param[in] src image to downscale
/// \param[in] width image width
/// \param[in] height image height
/// \param[in] stride image stride
/// \param[out] out result
///////////////////////////////////////////////////////////////////////////////
static void Downscale(const float *src, int width, int height, int stride,
int newWidth, int newHeight, int newStride, float *out, sycl::queue q) {
sycl::range<3> threads(1, 8, 32);
sycl::range<3> blocks(1, iDivUp(newHeight, threads[1]),
iDivUp(newWidth, threads[2]));
int dataSize = height * stride * sizeof(float);
float *src_h = (float *)malloc(dataSize);
q.memcpy(src_h, src, dataSize).wait();
float *src_p =
(float *)sycl::malloc_shared(height * stride * sizeof(sycl::float4), q);
for (int i = 0; i < 4 * height * stride; i++) src_p[i] = 0.f;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
int index = i * stride + j;
src_p[index * 4 + 0] = src_h[index];
src_p[index * 4 + 1] = src_p[index * 4 + 2] = src_p[index * 4 + 3] = 0.f;
}
}
auto texDescr = sycl::sampler(
sycl::coordinate_normalization_mode::unnormalized,
sycl::addressing_mode::clamp_to_edge, sycl::filtering_mode::nearest);
auto texFine = sycl::image<2>(src_p, sycl::image_channel_order::rgba,
sycl::image_channel_type::fp32,
sycl::range<2>(width, height),
sycl::range<1>(stride * sizeof(sycl::float4)));
q.submit([&](sycl::handler &cgh) {
auto tex_acc =
texFine.template get_access<sycl::float4,
sycl::access::mode::read>(cgh);
cgh.parallel_for(sycl::nd_range<3>(blocks * threads, threads),
[=](sycl::nd_item<3> item_ct1) {
DownscaleKernel(newWidth, newHeight, newStride, out,
tex_acc, texDescr, item_ct1);
});
});
} | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/Samples/5_Domain_Specific/HSOpticalFlow/main.cpp.dp.cpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
const static char *const sSDKsample = "HSOpticalFlow";
// CPU-GPU discrepancy threshold for self-test
const float THRESHOLD = 0.05f;
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include "common.h"
#include "flowGold.h"
#include "flowSYCL.h"
#include <helper_functions.h>
#include <cmath>
#include <chrono>
using Time = std::chrono::steady_clock;
using ms = std::chrono::milliseconds;
using float_ms = std::chrono::duration<float, ms::period>;
///////////////////////////////////////////////////////////////////////////////
/// \brief save optical flow in format described on vision.middlebury.edu/flow
/// \param[in] name output file name
/// \param[in] w optical flow field width
/// \param[in] h optical flow field height
/// \param[in] s optical flow field row stride
/// \param[in] u horizontal displacement
/// \param[in] v vertical displacement
///////////////////////////////////////////////////////////////////////////////
void WriteFloFile(const char *name, int w, int h, int s, const float *u,
const float *v) {
FILE *stream;
stream = fopen(name, "wb");
if (stream == 0) {
printf("Could not save flow to \"%s\"\n", name);
return;
}
float data = 202021.25f;
fwrite(&data, sizeof(float), 1, stream);
fwrite(&w, sizeof(w), 1, stream);
fwrite(&h, sizeof(h), 1, stream);
for (int i = 0; i < h; ++i) {
for (int j = 0; j < w; ++j) {
const int pos = j + i * s;
fwrite(u + pos, sizeof(float), 1, stream);
fwrite(v + pos, sizeof(float), 1, stream);
}
}
fclose(stream);
}
///////////////////////////////////////////////////////////////////////////////
/// \brief
/// load 4-channel unsigned byte image
/// and convert it to single channel FP32 image
/// \param[out] img_data pointer to raw image data
/// \param[out] img_w image width
/// \param[out] img_h image height
/// \param[out] img_s image row stride
/// \param[in] name image file name
/// \param[in] exePath executable file path
/// \return true if image is successfully loaded or false otherwise
///////////////////////////////////////////////////////////////////////////////
bool LoadImageAsFP32(float *&img_data, int &img_w, int &img_h, int &img_s,
const char *name, const char *exePath) {
printf("Loading \"%s\" ...\n", name);
char *name_ = sdkFindFilePath(name, exePath);
if (!name_) {
printf("File not found\n");
return false;
}
unsigned char *data = 0;
unsigned int w = 0, h = 0;
bool result = sdkLoadPPM4ub(name_, &data, &w, &h);
if (result == false) {
printf("Invalid file format\n");
return false;
}
img_w = w;
img_h = h;
img_s = iAlignUp(img_w);
img_data = new float[img_s * h];
// source is 4 channel image
const int widthStep = 4 * img_w;
for (int i = 0; i < img_h; ++i) {
for (int j = 0; j < img_w; ++j) {
img_data[j + i * img_s] = ((float)data[j * 4 + i * widthStep]) / 255.0f;
}
}
return true;
}
///////////////////////////////////////////////////////////////////////////////
/// \brief compare given flow field with gold (L1 norm)
/// \param[in] width optical flow field width
/// \param[in] height optical flow field height
/// \param[in] stride optical flow field row stride
/// \param[in] h_uGold horizontal displacement, gold
/// \param[in] h_vGold vertical displacement, gold
/// \param[in] h_u horizontal displacement
/// \param[in] h_v vertical displacement
/// \return true if discrepancy is lower than a given threshold
///////////////////////////////////////////////////////////////////////////////
bool CompareWithGold(int width, int height, int stride, const float *h_uGold,
const float *h_vGold, const float *h_u, const float *h_v) {
float error = 0.0f;
for (int i = 0; i < height; ++i) {
for (int j = 0; j < width; ++j) {
const int pos = j + i * stride;
error += fabsf(h_u[pos] - h_uGold[pos]) + fabsf(h_v[pos] - h_vGold[pos]);
}
}
error /= (float)(width * height);
printf("L1 error : %.6f\n", error);
return (error < 1.0f);
}
///////////////////////////////////////////////////////////////////////////////
/// application entry point
///////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
// welcome message
printf("%s Starting...\n\n", sSDKsample);
// find images
const char *const sourceFrameName = "frame10.ppm";
const char *const targetFrameName = "frame11.ppm";
// image dimensions
int width;
int height;
// row access stride
int stride;
// flow is computed from source image to target image
float *h_source; // source image, host memory
float *h_target; // target image, host memory
// load image from file
if (!LoadImageAsFP32(h_source, width, height, stride, sourceFrameName,
argv[0])) {
exit(EXIT_FAILURE);
}
if (!LoadImageAsFP32(h_target, width, height, stride, targetFrameName,
argv[0])) {
exit(EXIT_FAILURE);
}
// allocate host memory for CPU results
float *h_uGold = new float[stride * height];
float *h_vGold = new float[stride * height];
// allocate host memory for GPU results
float *h_u = new float[stride * height];
float *h_v = new float[stride * height];
// smoothness
// if image brightness is not within [0,1]
// this paramter should be scaled appropriately
const float alpha = 0.2f;
// number of pyramid levels
const int nLevels = 5;
// number of solver iterations on each level
const int nSolverIters = 500;
// number of warping iterations
const int nWarpIters = 3;
// start Host Timer
auto startGoldTime = Time::now();
ComputeFlowGold(h_source, h_target, width, height, stride, alpha, nLevels,
nWarpIters, nSolverIters, h_uGold, h_vGold);
// stop Host timer
auto stopGoldTime = Time::now();
// start Device Timer
auto startSYCLTime = Time::now();
ComputeFlowCUDA(h_source, h_target, width, height, stride, alpha, nLevels,
nWarpIters, nSolverIters, h_u, h_v);
// stop Device Timer
auto stopSYCLTime = Time::now();
auto Gold_duration =
std::chrono::duration_cast<float_ms>(stopGoldTime - startGoldTime)
.count();
printf("Processing time on CPU: %f (ms)\n", Gold_duration);
auto SYCL_duration =
std::chrono::duration_cast<float_ms>(stopSYCLTime - startSYCLTime)
.count();
printf("Processing time on Device: %f (ms)\n", SYCL_duration);
// compare results (L1 norm)
bool status =
CompareWithGold(width, height, stride, h_uGold, h_vGold, h_u, h_v);
WriteFloFile("FlowGPU.flo", width, height, stride, h_u, h_v);
WriteFloFile("FlowCPU.flo", width, height, stride, h_uGold, h_vGold);
// free resources
delete[] h_uGold;
delete[] h_vGold;
delete[] h_u;
delete[] h_v;
delete[] h_source;
delete[] h_target;
// report self-test status
exit(status ? EXIT_SUCCESS : EXIT_FAILURE);
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/Samples/5_Domain_Specific/HSOpticalFlow/flowGold.cpp.dp.cpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "common.h"
#include "flowGold.h"
#include <cmath>
///////////////////////////////////////////////////////////////////////////////
/// \brief host texture fetch
///
/// read from arbitrary position within image using bilinear interpolation
/// out of range coords are mirrored
/// \param[in] t texture raw data
/// \param[in] w texture width
/// \param[in] h texture height
/// \param[in] s texture stride
/// \param[in] x x coord of the point to fetch value at
/// \param[in] y y coord of the point to fetch value at
/// \return fetched value
///////////////////////////////////////////////////////////////////////////////
inline float Tex2D(const float *t, int w, int h, int s, float x, float y) {
// integer parts in floating point format
float intPartX, intPartY;
x -= 0.5f;
y -= 0.5f;
// get fractional parts of coordinates
float dx = fabsf(modff(x, &intPartX));
float dy = fabsf(modff(y, &intPartY));
// assume pixels are squares
// one of the corners
int ix0 = (int)intPartX;
int iy0 = (int)intPartY;
// mirror out-of-range position
if (ix0 < 0) ix0 = 0;
if (iy0 < 0) iy0 = 0;
if (ix0 >= w) ix0 = w - 1;
if (iy0 >= h) iy0 = h - 1;
// corner which is opposite to (ix0, iy0)
int ix1 = ix0 + 1;
int iy1 = iy0 + 1;
if (ix1 >= w) ix1 = w - 1;
if (iy1 >= h) iy1 = h - 1;
float res = t[ix0 + iy0 * s] * (1.0f - dx) * (1.0f - dy);
res += t[ix1 + iy0 * s] * dx * (1.0f - dy);
res += t[ix0 + iy1 * s] * (1.0f - dx) * dy;
res += t[ix1 + iy1 * s] * dx * dy;
return res;
}
///////////////////////////////////////////////////////////////////////////////
/// \brief host texture fetch
///
/// read specific texel value
/// out of range coords are mirrored
/// \param[in] t texture raw data
/// \param[in] w texture width
/// \param[in] h texture height
/// \param[in] s texture stride
/// \param[in] x x coord of the point to fetch value at
/// \param[in] y y coord of the point to fetch value at
/// \return fetched value
///////////////////////////////////////////////////////////////////////////////
inline float Tex2Di(const float *src, int w, int h, int s, int x, int y) {
if (x < 0) x = 0;
if (y < 0) y = 0;
if (x >= w) x = w - 1;
if (y >= h) y = h - 1;
return src[x + y * s];
}
///////////////////////////////////////////////////////////////////////////////
/// \brief resize image
/// \param[in] src image to downscale
/// \param[in] width image width
/// \param[in] height image height
/// \param[in] stride image stride
/// \param[in] newWidth image new width
/// \param[in] newHeight image new height
/// \param[in] newStride image new stride
/// \param[out] out downscaled image data
///////////////////////////////////////////////////////////////////////////////
static void Downscale(const float *src, int width, int height, int stride,
int newWidth, int newHeight, int newStride, float *out) {
for (int i = 0; i < newHeight; ++i) {
for (int j = 0; j < newWidth; ++j) {
const int srcX = j * 2;
const int srcY = i * 2;
// average 4 neighbouring pixels
float sum;
sum = Tex2Di(src, width, height, stride, srcX + 0, srcY + 0);
sum += Tex2Di(src, width, height, stride, srcX + 0, srcY + 1);
sum += Tex2Di(src, width, height, stride, srcX + 1, srcY + 0);
sum += Tex2Di(src, width, height, stride, srcX + 1, srcY + 1);
// normalize
sum *= 0.25f;
out[j + i * newStride] = sum;
}
}
}
///////////////////////////////////////////////////////////////////////////////
/// \brief upscale one component of a displacement field
/// \param[in] src field component to upscale
/// \param[in] width field current width
/// \param[in] height field current height
/// \param[in] stride field current stride
/// \param[in] newWidth field new width
/// \param[in] newHeight field new height
/// \param[in] newStride field new stride
/// \param[in] scale value scale factor (multiplier)
/// \param[out] out upscaled field component
///////////////////////////////////////////////////////////////////////////////
static void Upscale(const float *src, int width, int height, int stride,
int newWidth, int newHeight, int newStride, float scale,
float *out) {
for (int i = 0; i < newHeight; ++i) {
for (int j = 0; j < newWidth; ++j) {
// position within smaller image
float x = ((float)j - 0.5f) * 0.5f;
float y = ((float)i - 0.5f) * 0.5f;
out[j + i * newStride] = Tex2D(src, width, height, stride, x, y) * scale;
}
}
}
///////////////////////////////////////////////////////////////////////////////
/// \brief warp image with provided vector field
///
/// For each output pixel there is a vector which tells which pixel
/// from a source image should be mapped to this particular output
/// pixel.
/// It is assumed that images and the vector field have the same stride and
/// resolution.
/// \param[in] src source image
/// \param[in] w width
/// \param[in] h height
/// \param[in] s stride
/// \param[in] u horizontal displacement
/// \param[in] v vertical displacement
/// \param[out] out warped image
///////////////////////////////////////////////////////////////////////////////
static void WarpImage(const float *src, int w, int h, int s, const float *u,
const float *v, float *out) {
for (int i = 0; i < h; ++i) {
for (int j = 0; j < w; ++j) {
const int pos = j + i * s;
// warped coords
float x = (float)j + u[pos];
float y = (float)i + v[pos];
out[pos] = Tex2D(src, w, h, s, x, y);
}
}
}
///////////////////////////////////////////////////////////////////////////////
/// \brief computes image derivatives for a pair of images
/// \param[in] I0 source image
/// \param[in] I1 tracked image
/// \param[in] w images width
/// \param[in] h images height
/// \param[in] s images stride
/// \param[out] Ix x derivative
/// \param[out] Iy y derivative
/// \param[out] Iz temporal derivative
///////////////////////////////////////////////////////////////////////////////
static void ComputeDerivatives(const float *I0, const float *I1, int w, int h,
int s, float *Ix, float *Iy, float *Iz) {
for (int i = 0; i < h; ++i) {
for (int j = 0; j < w; ++j) {
const int pos = j + i * s;
float t0, t1;
// derivative filter is (1, -8, 0, 8, -1)/12
// x derivative
t0 = Tex2Di(I0, w, h, s, j - 2, i);
t0 -= Tex2Di(I0, w, h, s, j - 1, i) * 8.0f;
t0 += Tex2Di(I0, w, h, s, j + 1, i) * 8.0f;
t0 -= Tex2Di(I0, w, h, s, j + 2, i);
t0 /= 12.0f;
t1 = Tex2Di(I1, w, h, s, j - 2, i);
t1 -= Tex2Di(I1, w, h, s, j - 1, i) * 8.0f;
t1 += Tex2Di(I1, w, h, s, j + 1, i) * 8.0f;
t1 -= Tex2Di(I1, w, h, s, j + 2, i);
t1 /= 12.0f;
// spatial derivatives are averaged
Ix[pos] = (t0 + t1) * 0.5f;
// t derivative
Iz[pos] = I1[pos] - I0[pos];
// y derivative
t0 = Tex2Di(I0, w, h, s, j, i - 2);
t0 -= Tex2Di(I0, w, h, s, j, i - 1) * 8.0f;
t0 += Tex2Di(I0, w, h, s, j, i + 1) * 8.0f;
t0 -= Tex2Di(I0, w, h, s, j, i + 2);
t0 /= 12.0f;
t1 = Tex2Di(I1, w, h, s, j, i - 2);
t1 -= Tex2Di(I1, w, h, s, j, i - 1) * 8.0f;
t1 += Tex2Di(I1, w, h, s, j, i + 1) * 8.0f;
t1 -= Tex2Di(I1, w, h, s, j, i + 2);
t1 /= 12.0f;
Iy[pos] = (t0 + t1) * 0.5f;
}
}
}
///////////////////////////////////////////////////////////////////////////////
/// \brief one iteration of classical Horn-Schunck method
///
/// It is one iteration of Jacobi method for a corresponding linear system
/// \param[in] du0 current horizontal displacement approximation
/// \param[in] dv0 current vertical displacement approximation
/// \param[in] Ix image x derivative
/// \param[in] Iy image y derivative
/// \param[in] Iz temporal derivative
/// \param[in] w width
/// \param[in] h height
/// \param[in] s stride
/// \param[in] alpha degree of smoothness
/// \param[out] du1 new horizontal displacement approximation
/// \param[out] dv1 new vertical displacement approximation
///////////////////////////////////////////////////////////////////////////////
static void SolveForUpdate(const float *du0, const float *dv0, const float *Ix,
const float *Iy, const float *Iz, int w, int h,
int s, float alpha, float *du1, float *dv1) {
for (int i = 0; i < h; ++i) {
for (int j = 0; j < w; ++j) {
const int pos = j + i * s;
int left, right, up, down;
// handle borders
if (j != 0)
left = pos - 1;
else
left = pos;
if (j != w - 1)
right = pos + 1;
else
right = pos;
if (i != 0)
down = pos - s;
else
down = pos;
if (i != h - 1)
up = pos + s;
else
up = pos;
float sumU = (du0[left] + du0[right] + du0[up] + du0[down]) * 0.25f;
float sumV = (dv0[left] + dv0[right] + dv0[up] + dv0[down]) * 0.25f;
float frac = (Ix[pos] * sumU + Iy[pos] * sumV + Iz[pos]) /
(Ix[pos] * Ix[pos] + Iy[pos] * Iy[pos] + alpha);
du1[pos] = sumU - Ix[pos] * frac;
dv1[pos] = sumV - Iy[pos] * frac;
}
}
}
///////////////////////////////////////////////////////////////////////////////
/// \brief method logic
///
/// handles memory allocation and control flow
/// \param[in] I0 source image
/// \param[in] I1 tracked image
/// \param[in] width images width
/// \param[in] height images height
/// \param[in] stride images stride
/// \param[in] alpha degree of displacement field smoothness
/// \param[in] nLevels number of levels in a pyramid
/// \param[in] nWarpIters number of warping iterations per pyramid level
/// \param[in] nSolverIters number of solver iterations (Jacobi iterations)
/// \param[out] u horizontal displacement
/// \param[out] v vertical displacement
///////////////////////////////////////////////////////////////////////////////
void ComputeFlowGold(const float *I0, const float *I1, int width, int height,
int stride, float alpha, int nLevels, int nWarpIters,
int nSolverIters, float *u, float *v) {
printf("Computing optical flow on CPU...\n");
float *u0 = u;
float *v0 = v;
const float **pI0 = new const float *[nLevels];
const float **pI1 = new const float *[nLevels];
int *pW = new int[nLevels];
int *pH = new int[nLevels];
int *pS = new int[nLevels];
const int pixelCountAligned = height * stride;
float *tmp = new float[pixelCountAligned];
float *du0 = new float[pixelCountAligned];
float *dv0 = new float[pixelCountAligned];
float *du1 = new float[pixelCountAligned];
float *dv1 = new float[pixelCountAligned];
float *Ix = new float[pixelCountAligned];
float *Iy = new float[pixelCountAligned];
float *Iz = new float[pixelCountAligned];
float *nu = new float[pixelCountAligned];
float *nv = new float[pixelCountAligned];
// prepare pyramid
int currentLevel = nLevels - 1;
pI0[currentLevel] = I0;
pI1[currentLevel] = I1;
pW[currentLevel] = width;
pH[currentLevel] = height;
pS[currentLevel] = stride;
for (; currentLevel > 0; --currentLevel) {
int nw = pW[currentLevel] / 2;
int nh = pH[currentLevel] / 2;
int ns = iAlignUp(nw);
pI0[currentLevel - 1] = new float[ns * nh];
pI1[currentLevel - 1] = new float[ns * nh];
Downscale(pI0[currentLevel], pW[currentLevel], pH[currentLevel],
pS[currentLevel], nw, nh, ns, (float *)pI0[currentLevel - 1]);
Downscale(pI1[currentLevel], pW[currentLevel], pH[currentLevel],
pS[currentLevel], nw, nh, ns, (float *)pI1[currentLevel - 1]);
pW[currentLevel - 1] = nw;
pH[currentLevel - 1] = nh;
pS[currentLevel - 1] = ns;
}
// initial approximation
memset(u, 0, stride * height * sizeof(float));
memset(v, 0, stride * height * sizeof(float));
// compute flow
for (; currentLevel < nLevels; ++currentLevel) {
for (int warpIter = 0; warpIter < nWarpIters; ++warpIter) {
memset(du0, 0, pixelCountAligned * sizeof(float));
memset(dv0, 0, pixelCountAligned * sizeof(float));
memset(du1, 0, pixelCountAligned * sizeof(float));
memset(dv1, 0, pixelCountAligned * sizeof(float));
WarpImage(pI1[currentLevel], pW[currentLevel], pH[currentLevel],
pS[currentLevel], u, v, tmp);
// on current level we compute optical flow
// between frame 0 and warped frame 1
ComputeDerivatives(pI0[currentLevel], tmp, pW[currentLevel],
pH[currentLevel], pS[currentLevel], Ix, Iy, Iz);
for (int iter = 0; iter < nSolverIters; ++iter) {
SolveForUpdate(du0, dv0, Ix, Iy, Iz, pW[currentLevel], pH[currentLevel],
pS[currentLevel], alpha, du1, dv1);
Swap(du0, du1);
Swap(dv0, dv1);
}
// update u, v
for (int i = 0; i < pH[currentLevel] * pS[currentLevel]; ++i) {
u[i] += du0[i];
v[i] += dv0[i];
}
} // end for (int warpIter = 0; warpIter < nWarpIters; ++warpIter)
if (currentLevel != nLevels - 1) {
// prolongate solution
float scaleX = (float)pW[currentLevel + 1] / (float)pW[currentLevel];
Upscale(u, pW[currentLevel], pH[currentLevel], pS[currentLevel],
pW[currentLevel + 1], pH[currentLevel + 1], pS[currentLevel + 1],
scaleX, nu);
float scaleY = (float)pH[currentLevel + 1] / (float)pH[currentLevel];
Upscale(v, pW[currentLevel], pH[currentLevel], pS[currentLevel],
pW[currentLevel + 1], pH[currentLevel + 1], pS[currentLevel + 1],
scaleY, nv);
Swap(u, nu);
Swap(v, nv);
}
} // end for (; currentLevel < nLevels; ++currentLevel)
if (u != u0) {
// solution is not in the specified array
// copy
memcpy(u0, u, pixelCountAligned * sizeof(float));
memcpy(v0, v, pixelCountAligned * sizeof(float));
Swap(u, nu);
Swap(v, nv);
}
// cleanup
// last level is not being freed here
// because it refers to input images
for (int i = 0; i < nLevels - 1; ++i) {
delete[] pI0[i];
delete[] pI1[i];
}
delete[] pI0;
delete[] pI1;
delete[] pW;
delete[] pH;
delete[] pS;
delete[] tmp;
delete[] du0;
delete[] dv0;
delete[] du1;
delete[] dv1;
delete[] Ix;
delete[] Iy;
delete[] Iz;
delete[] nu;
delete[] nv;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/Samples/5_Domain_Specific/HSOpticalFlow/warpingKernel.dp.hpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include "common.h"
///////////////////////////////////////////////////////////////////////////////
/// \brief warp image with a given displacement field, CUDA kernel.
/// \param[in] width image width
/// \param[in] height image height
/// \param[in] stride image stride
/// \param[in] u horizontal displacement
/// \param[in] v vertical displacement
/// \param[out] out result
///////////////////////////////////////////////////////////////////////////////
void WarpingKernel(int width, int height, int stride, const float *u,
const float *v, float *out,
sycl::accessor<sycl::float4, 2, sycl::access::mode::read,
sycl::access::target::image>
texToWarp,
sycl::sampler texDesc,
const sycl::nd_item<3> &item_ct1) {
const int ix = item_ct1.get_local_id(2) +
item_ct1.get_group(2) * item_ct1.get_local_range(2);
const int iy = item_ct1.get_local_id(1) +
item_ct1.get_group(1) * item_ct1.get_local_range(1);
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float x = ((float)ix + u[pos]);
float y = ((float)iy + v[pos]);
auto inputCoord = sycl::float2(x, y);
out[pos] = texToWarp.read(inputCoord, texDesc)[0];
}
///////////////////////////////////////////////////////////////////////////////
/// \brief warp image with provided vector field, CUDA kernel wrapper.
///
/// For each output pixel there is a vector which tells which pixel
/// from a source image should be mapped to this particular output
/// pixel.
/// It is assumed that images and the vector field have the same stride and
/// resolution.
/// \param[in] src source image
/// \param[in] w width
/// \param[in] h height
/// \param[in] s stride
/// \param[in] u horizontal displacement
/// \param[in] v vertical displacement
/// \param[out] out warped image
///////////////////////////////////////////////////////////////////////////////
static void WarpImage(const float *src, int w, int h, int s, const float *u,
const float *v, float *out, sycl::queue q) {
sycl::range<3> threads(1, 6, 32);
sycl::range<3> blocks(1, iDivUp(h, threads[1]), iDivUp(w, threads[2]));
int dataSize = s * h * sizeof(float);
float *src_h = (float *)malloc(dataSize);
q.memcpy(src_h, src, dataSize).wait();
float *src_p = (float *)sycl::malloc_shared(h * s * sizeof(sycl::float4), q);
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
int index = i * s + j;
src_p[index * 4 + 0] = src_h[index];
src_p[index * 4 + 1] = src_p[index * 4 + 2] = src_p[index * 4 + 3] = 0.f;
}
}
auto texDescr = sycl::sampler(
sycl::coordinate_normalization_mode::unnormalized,
sycl::addressing_mode::clamp_to_edge, sycl::filtering_mode::linear);
auto texToWarp =
sycl::image<2>(src_p, sycl::image_channel_order::rgba,
sycl::image_channel_type::fp32, sycl::range<2>(w, h),
sycl::range<1>(s * sizeof(sycl::float4)));
q.submit([&](sycl::handler &cgh) {
auto texToWarp_acc =
texToWarp.template get_access<sycl::float4,
sycl::access::mode::read>(cgh);
cgh.parallel_for(sycl::nd_range<3>(blocks * threads, threads),
[=](sycl::nd_item<3> item_ct1) {
WarpingKernel(w, h, s, u, v, out,
texToWarp_acc, texDescr, item_ct1);
});
});
} | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/Samples/5_Domain_Specific/HSOpticalFlow/derivativesKernel.dp.hpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include "common.h"
///////////////////////////////////////////////////////////////////////////////
/// \brief compute image derivatives
///
/// CUDA kernel, relies heavily on texture unit
/// \param[in] width image width
/// \param[in] height image height
/// \param[in] stride image stride
/// \param[out] Ix x derivative
/// \param[out] Iy y derivative
/// \param[out] Iz temporal derivative
///////////////////////////////////////////////////////////////////////////////
void ComputeDerivativesKernel(int width, int height, int stride, float *Ix,
float *Iy, float *Iz,
sycl::accessor<sycl::float4, 2, sycl::access::mode::read,
sycl::access::target::image>
texSource,
sycl::accessor<sycl::float4, 2, sycl::access::mode::read,
sycl::access::target::image>
texTarget,
sycl::sampler texDesc,
const sycl::nd_item<3> &item_ct1) {
const int ix = item_ct1.get_local_id(2) +
item_ct1.get_group(2) * item_ct1.get_local_range(2);
const int iy = item_ct1.get_local_id(1) +
item_ct1.get_group(1) * item_ct1.get_local_range(1);
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float t0, t1;
auto x_inputCoords1 = sycl::float2(ix - 2.0f, iy);
auto x_inputCoords2 = sycl::float2(ix - 1.0f, iy);
auto x_inputCoords3 = sycl::float2(ix + 1.0f, iy);
auto x_inputCoords4 = sycl::float2(ix + 2.0f, iy);
t0 = texSource.read(x_inputCoords1, texDesc)[0];
t0 -= texSource.read(x_inputCoords2, texDesc)[0] * 8.0f;
t0 += texSource.read(x_inputCoords3, texDesc)[0] * 8.0f;
t0 -= texSource.read(x_inputCoords4, texDesc)[0];
t0 /= 12.0f;
t1 = texTarget.read(x_inputCoords1, texDesc)[0];
t1 -= texTarget.read(x_inputCoords2, texDesc)[0] * 8.0f;
t1 += texTarget.read(x_inputCoords3, texDesc)[0] * 8.0f;
t1 -= texTarget.read(x_inputCoords4, texDesc)[0];
t1 /= 12.0f;
Ix[pos] = (t0 + t1) * 0.5f;
// t derivative
auto inputCoord = sycl::float2(ix, iy);
Iz[pos] = texTarget.read(inputCoord, texDesc)[0] -
texSource.read(inputCoord, texDesc)[0];
// y derivative
auto y_inputCoords1 = sycl::float2(ix, iy - 2.0f);
auto y_inputCoords2 = sycl::float2(ix, iy - 1.0f);
auto y_inputCoords3 = sycl::float2(ix, iy + 1.0f);
auto y_inputCoords4 = sycl::float2(ix, iy + 2.0f);
t0 = texSource.read(y_inputCoords1, texDesc)[0];
t0 -= texSource.read(y_inputCoords2, texDesc)[0] * 8.0f;
t0 += texSource.read(y_inputCoords3, texDesc)[0] * 8.0f;
t0 -= texSource.read(y_inputCoords4, texDesc)[0];
t0 /= 12.0f;
t1 = texTarget.read(y_inputCoords1, texDesc)[0];
t1 -= texTarget.read(y_inputCoords2, texDesc)[0] * 8.0f;
t1 += texTarget.read(y_inputCoords3, texDesc)[0] * 8.0f;
t1 -= texTarget.read(y_inputCoords4, texDesc)[0];
t1 /= 12.0f;
Iy[pos] = (t0 + t1) * 0.5f;
}
///////////////////////////////////////////////////////////////////////////////
/// \brief compute image derivatives
///
/// \param[in] I0 source image
/// \param[in] I1 tracked image
/// \param[in] w image width
/// \param[in] h image height
/// \param[in] s image stride
/// \param[out] Ix x derivative
/// \param[out] Iy y derivative
/// \param[out] Iz temporal derivative
///////////////////////////////////////////////////////////////////////////////
static void ComputeDerivatives(const float *I0, const float *I1, int w, int h, int s, float *Ix, float *Iy, float *Iz, sycl::queue q) {
sycl::range<3> threads(1, 6, 32);
sycl::range<3> blocks(1, iDivUp(h, threads[1]), iDivUp(w, threads[2]));
int dataSize = s * h * sizeof(float);
float *I0_h = (float *)malloc(dataSize);
float *I1_h = (float *)malloc(dataSize);
q.memcpy(I0_h, I0, dataSize).wait();
q.memcpy(I1_h, I1, dataSize).wait();
float *I0_p = (float *)sycl::malloc_shared(h * s * sizeof(sycl::float4), q);
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
int index = i * s + j;
I0_p[index * 4 + 0] = I0_h[index];
I0_p[index * 4 + 1] = I0_p[index * 4 + 2] = I0_p[index * 4 + 3] = 0.f;
}
}
float *I1_p = (float *)sycl::malloc_shared(h * s * sizeof(sycl::float4), q);
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
int index = i * s + j;
I1_p[index * 4 + 0] = I1_h[index];
I1_p[index * 4 + 1] = I1_p[index * 4 + 2] = I1_p[index * 4 + 3] = 0.f;
}
}
auto texDescr = sycl::sampler(
sycl::coordinate_normalization_mode::unnormalized,
sycl::addressing_mode::clamp_to_edge, sycl::filtering_mode::nearest);
auto texSource =
sycl::image<2>(I0_p, sycl::image_channel_order::rgba,
sycl::image_channel_type::fp32, sycl::range<2>(w, h),
sycl::range<1>(s * sizeof(sycl::float4)));
auto texTarget =
sycl::image<2>(I1_p, sycl::image_channel_order::rgba,
sycl::image_channel_type::fp32, sycl::range<2>(w, h),
sycl::range<1>(s * sizeof(sycl::float4)));
q.submit([&](sycl::handler &cgh) {
auto texSource_acc =
texSource.template get_access<sycl::float4,
sycl::access::mode::read>(cgh);
auto texTarget_acc =
texTarget.template get_access<sycl::float4,
sycl::access::mode::read>(cgh);
cgh.parallel_for(
sycl::nd_range<3>(blocks * threads, threads),
[=](sycl::nd_item<3> item_ct1) {
ComputeDerivativesKernel(
w, h, s, Ix, Iy, Iz,
texSource_acc, texTarget_acc,
texDescr, item_ct1);
});
});
} | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/Samples/5_Domain_Specific/HSOpticalFlow/solverKernel.dp.hpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include "common.h"
///////////////////////////////////////////////////////////////////////////////
/// \brief one iteration of classical Horn-Schunck method, CUDA kernel.
///
/// It is one iteration of Jacobi method for a corresponding linear system.
/// Template parameters are describe CTA size
/// \param[in] du0 current horizontal displacement approximation
/// \param[in] dv0 current vertical displacement approximation
/// \param[in] Ix image x derivative
/// \param[in] Iy image y derivative
/// \param[in] Iz temporal derivative
/// \param[in] w width
/// \param[in] h height
/// \param[in] s stride
/// \param[in] alpha degree of smoothness
/// \param[out] du1 new horizontal displacement approximation
/// \param[out] dv1 new vertical displacement approximation
///////////////////////////////////////////////////////////////////////////////
template <int bx, int by>
void JacobiIteration(const float *du0, const float *dv0,
const float *Ix, const float *Iy,
const float *Iz, int w, int h, int s,
float alpha, float *du1, float *dv1,
const sycl::nd_item<3> &item_ct1,
volatile float *du, volatile float *dv) {
// Handle to thread block group
auto cta = item_ct1.get_group();
const int ix = item_ct1.get_local_id(2) +
item_ct1.get_group(2) * item_ct1.get_local_range(2);
const int iy = item_ct1.get_local_id(1) +
item_ct1.get_group(1) * item_ct1.get_local_range(1);
// position within global memory array
const int pos = sycl::min(ix, w - 1) + sycl::min(iy, h - 1) * s;
// position within shared memory array
const int shMemPos =
item_ct1.get_local_id(2) + 1 + (item_ct1.get_local_id(1) + 1) * (bx + 2);
// Load data to shared memory.
// load tile being processed
du[shMemPos] = du0[pos];
dv[shMemPos] = dv0[pos];
// load necessary neighbouring elements
// We clamp out-of-range coordinates.
// It is equivalent to mirroring
// because we access data only one step away from borders.
if (item_ct1.get_local_id(1) == 0) {
// beginning of the tile
const int bsx = item_ct1.get_group(2) * item_ct1.get_local_range(2);
const int bsy = item_ct1.get_group(1) * item_ct1.get_local_range(1);
// element position within matrix
int x, y;
// element position within linear array
// gm - global memory
// sm - shared memory
int gmPos, smPos;
/*
DPCT1064:30: Migrated min call is used in a macro/template definition and
may not be valid for all macro/template uses. Adjust the code.
*/
x = dpct::min((unsigned int)(bsx + item_ct1.get_local_id(2)), w - 1);
// row just below the tile
y = sycl::max(bsy - 1, 0);
gmPos = y * s + x;
smPos = item_ct1.get_local_id(2) + 1;
du[smPos] = du0[gmPos];
dv[smPos] = dv0[gmPos];
// row above the tile
y = sycl::min(bsy + by, h - 1);
smPos += (by + 1) * (bx + 2);
gmPos = y * s + x;
du[smPos] = du0[gmPos];
dv[smPos] = dv0[gmPos];
} else if (item_ct1.get_local_id(1) == 1) {
// beginning of the tile
const int bsx = item_ct1.get_group(2) * item_ct1.get_local_range(2);
const int bsy = item_ct1.get_group(1) * item_ct1.get_local_range(1);
// element position within matrix
int x, y;
// element position within linear array
// gm - global memory
// sm - shared memory
int gmPos, smPos;
/*
DPCT1064:31: Migrated min call is used in a macro/template definition and
may not be valid for all macro/template uses. Adjust the code.
*/
y = dpct::min((unsigned int)(bsy + item_ct1.get_local_id(2)), h - 1);
// column to the left
x = sycl::max(bsx - 1, 0);
smPos = bx + 2 + item_ct1.get_local_id(2) * (bx + 2);
gmPos = x + y * s;
// check if we are within tile
if (item_ct1.get_local_id(2) < by) {
du[smPos] = du0[gmPos];
dv[smPos] = dv0[gmPos];
// column to the right
x = sycl::min(bsx + bx, w - 1);
gmPos = y * s + x;
smPos += bx + 1;
du[smPos] = du0[gmPos];
dv[smPos] = dv0[gmPos];
}
}
/*
DPCT1065:13: Consider replacing sycl::nd_item::barrier() with
sycl::nd_item::barrier(sycl::access::fence_space::local_space) for better
performance if there is no access to global memory.
*/
item_ct1.barrier();
if (ix >= w || iy >= h) return;
// now all necessary data are loaded to shared memory
int left, right, up, down;
left = shMemPos - 1;
right = shMemPos + 1;
up = shMemPos + bx + 2;
down = shMemPos - bx - 2;
float sumU = (du[left] + du[right] + du[up] + du[down]) * 0.25f;
float sumV = (dv[left] + dv[right] + dv[up] + dv[down]) * 0.25f;
float frac = (Ix[pos] * sumU + Iy[pos] * sumV + Iz[pos]) /
(Ix[pos] * Ix[pos] + Iy[pos] * Iy[pos] + alpha);
du1[pos] = sumU - Ix[pos] * frac;
dv1[pos] = sumV - Iy[pos] * frac;
}
///////////////////////////////////////////////////////////////////////////////
/// \brief one iteration of classical Horn-Schunck method, CUDA kernel wrapper.
///
/// It is one iteration of Jacobi method for a corresponding linear system.
/// \param[in] du0 current horizontal displacement approximation
/// \param[in] dv0 current vertical displacement approximation
/// \param[in] Ix image x derivative
/// \param[in] Iy image y derivative
/// \param[in] Iz temporal derivative
/// \param[in] w width
/// \param[in] h height
/// \param[in] s stride
/// \param[in] alpha degree of smoothness
/// \param[out] du1 new horizontal displacement approximation
/// \param[out] dv1 new vertical displacement approximation
///////////////////////////////////////////////////////////////////////////////
static void SolveForUpdate(const float *du0, const float *dv0, const float *Ix,
const float *Iy, const float *Iz, int w, int h,
int s, float alpha, float *du1, float *dv1, sycl::queue q) {
// CTA size
sycl::range<3> threads(1, 6, 32);
// grid size
sycl::range<3> blocks(1, iDivUp(h, threads[1]), iDivUp(w, threads[2]));
/*
DPCT1049:14: The work-group size passed to the SYCL kernel may exceed the
limit. To get the device limit, query info::device::max_work_group_size.
Adjust the work-group size if needed.
*/
q.submit([&](sycl::handler &cgh) {
sycl::local_accessor<float, 1> du_acc_ct1(
sycl::range<1>((32 + 2) * (6 + 2)), cgh);
sycl::local_accessor<float, 1> dv_acc_ct1(
sycl::range<1>((32 + 2) * (6 + 2)), cgh);
cgh.parallel_for(sycl::nd_range<3>(blocks * threads, threads),
[=](sycl::nd_item<3> item_ct1) {
JacobiIteration<32, 6>(du0, dv0, Ix, Iy, Iz, w, h, s,
alpha, du1, dv1, item_ct1,
du_acc_ct1.get_pointer(),
dv_acc_ct1.get_pointer());
});
});
}
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/Samples/5_Domain_Specific/HSOpticalFlow/upscaleKernel.dp.hpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include "common.h"
///////////////////////////////////////////////////////////////////////////////
/// \brief upscale one component of a displacement field, CUDA kernel
/// \param[in] width field width
/// \param[in] height field height
/// \param[in] stride field stride
/// \param[in] scale scale factor (multiplier)
/// \param[out] out result
///////////////////////////////////////////////////////////////////////////////
void UpscaleKernel(int width, int height, int stride, float scale, float *out,
sycl::accessor<sycl::float4, 2, sycl::access::mode::read,
sycl::access::target::image>
texCoarse_acc,
sycl::sampler texDesc,
const sycl::nd_item<3> &item_ct1) {
const int ix = item_ct1.get_local_id(2) +
item_ct1.get_group(2) * item_ct1.get_local_range(2);
const int iy = item_ct1.get_local_id(1) +
item_ct1.get_group(1) * item_ct1.get_local_range(1);
if (ix >= width || iy >= height) return;
float x = ((float)ix - 0.5f) * 0.5f;
float y = ((float)iy - 0.5f) * 0.5f;
auto inputCoord = sycl::float2(x, y);
// exploit hardware interpolation
// and scale interpolated vector to match next pyramid level resolution
out[ix + iy * stride] = texCoarse_acc.read(inputCoord, texDesc)[0] * scale;
}
///////////////////////////////////////////////////////////////////////////////
/// \brief upscale one component of a displacement field, kernel wrapper
/// \param[in] src field component to upscale
/// \param[in] width field current width
/// \param[in] height field current height
/// \param[in] stride field current stride
/// \param[in] newWidth field new width
/// \param[in] newHeight field new height
/// \param[in] newStride field new stride
/// \param[in] scale value scale factor (multiplier)
/// \param[out] out upscaled field component
///////////////////////////////////////////////////////////////////////////////
static void Upscale(const float *src, int width, int height, int stride,
int newWidth, int newHeight, int newStride, float scale,
float *out, sycl::queue q) {
sycl::range<3> threads(1, 8, 32);
sycl::range<3> blocks(1, iDivUp(newHeight, threads[1]),
iDivUp(newWidth, threads[2]));
int dataSize = stride * height * sizeof(float);
float *src_h = (float *)malloc(dataSize);
q.memcpy(src_h, src, dataSize).wait();
float *src_p =
(float *)sycl::malloc_shared(height * stride * sizeof(sycl::float4), q);
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
int index = i * stride + j;
src_p[index * 4 + 0] = src_h[index];
src_p[index * 4 + 1] = src_p[index * 4 + 2] = src_p[index * 4 + 3] = 0.f;
}
}
auto texDescr = sycl::sampler(
sycl::coordinate_normalization_mode::unnormalized,
sycl::addressing_mode::clamp_to_edge, sycl::filtering_mode::linear);
auto texCoarse = sycl::image<2>(
src_p, sycl::image_channel_order::rgba,
sycl::image_channel_type::fp32, sycl::range<2>(width, height),
sycl::range<1>(stride * sizeof(sycl::float4)));
q.submit([&](sycl::handler &cgh) {
auto texCoarse_acc =
texCoarse.template get_access<sycl::float4,
sycl::access::mode::read>(cgh);
cgh.parallel_for(sycl::nd_range<3>(blocks * threads, threads),
[=](sycl::nd_item<3> item_ct1) {
UpscaleKernel(newWidth, newHeight, newStride, scale, out,
texCoarse_acc, texDescr, item_ct1);
});
});
} | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/ccl_utils.hpp | //==---- ccl_utils.hpp----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_CCL_UTILS_HPP__
#define __DPCT_CCL_UTILS_HPP__
#include <sycl/sycl.hpp>
#include <oneapi/ccl.hpp>
#include <unordered_map>
#include <memory>
#include "device.hpp"
namespace dpct {
namespace ccl {
namespace detail {
/// Get stored kvs with specified kvs address.
inline std::shared_ptr<oneapi::ccl::kvs> &
get_kvs(const oneapi::ccl::kvs::address_type &addr) {
struct hash {
std::size_t operator()(const oneapi::ccl::kvs::address_type &in) const {
return std::hash<std::string_view>()(std::string_view(in.data(), in.size()));
}
};
static std::unordered_map<oneapi::ccl::kvs::address_type,
std::shared_ptr<oneapi::ccl::kvs>, hash>
kvs_map;
return kvs_map[addr];
}
/// Help class to init ccl environment.
class ccl_init_helper {
public:
ccl_init_helper() { oneapi::ccl::init(); }
};
} // namespace detail
/// Get concatenated library version as an integer.
static inline int get_version() {
oneapi::ccl::init();
auto ver = oneapi::ccl::get_library_version();
return ver.major * 10000 + ver.minor * 100 + ver.update;
}
/// Create main kvs and return its address.
static inline oneapi::ccl::kvs::address_type create_kvs_address() {
oneapi::ccl::init();
auto ptr = oneapi::ccl::create_main_kvs();
auto addr = ptr->get_address();
detail::get_kvs(addr) = ptr;
return addr;
}
/// Get stored kvs with /p addr if exist. Otherwise, create kvs with /p addr.
static inline std::shared_ptr<oneapi::ccl::kvs>
create_kvs(const oneapi::ccl::kvs::address_type &addr) {
oneapi::ccl::init();
auto &ptr = detail::get_kvs(addr);
if (!ptr)
ptr = oneapi::ccl::create_kvs(addr);
return ptr;
}
/// dpct communicator extension
class communicator_wrapper : public dpct::ccl::detail::ccl_init_helper {
public:
communicator_wrapper(
int size, int rank, oneapi::ccl::kvs::address_type id,
const oneapi::ccl::comm_attr &attr = oneapi::ccl::default_comm_attr)
: _device_comm(oneapi::ccl::create_device(
static_cast<sycl::device &>(dpct::get_current_device()))),
_context_comm(oneapi::ccl::create_context(dpct::get_default_context())),
_comm(oneapi::ccl::create_communicator(
size, rank, _device_comm, _context_comm, dpct::ccl::create_kvs(id),
attr)) {
_queue_init = false;
_ccl_stream_ptr = nullptr;
}
~communicator_wrapper() {
delete _ccl_stream_ptr;
};
/// Return the rank in a oneapi::ccl::communicator
/// \returns The rank corresponding to communicator object
int rank() const {
return _comm.rank();
}
/// Retrieves the number of rank in oneapi::ccl::communicator
/// \returns The number of the ranks
int size() const {
return _comm.size();
}
/// Return underlying native device, which was used in oneapi::ccl::communicator
sycl::device get_device() const {
return _comm.get_device().get_native();
}
/// \brief allreduce is a collective communication operation that performs the global reduction operation
/// on values from all ranks of communicator and distributes the result back to all ranks.
/// \param send_buf the buffer with @c count elements of @c dtype that stores local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result, must have the same dimension as @c send_buf
/// \param count the number of elements of type @c dtype in @c send_buf and @c recv_buf
/// \param dtype the datatype of elements in @c send_buf and @c recv_buf
/// \param rtype the type of the reduction operation to be applied
/// \param queue_ptr a sycl::queue ptr associated with the operation
/// \return @ref void
void allreduce(const void *sendbuff, void *recvbuff, size_t count,
oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype,
sycl::queue *queue_ptr) {
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::allreduce(sendbuff, recvbuff, count, dtype, rtype,
_comm, stream);
},
queue_ptr);
}
/// \brief reduce is a collective communication operation that performs the
/// global reduction operation on values from all ranks of the communicator
/// and returns the result to the root rank.
/// \param send_buf the buffer with @c count elements of @c dtype that stores
/// local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result,
/// must have the same dimension as @c send_buf
/// \param count the number of elements of type @c dtype in @c send_buf and @c recv_buf
/// \param dtype the datatype of elements in @c send_buf and @c recv_buf
/// \param root the rank that gets the result of reduction
/// \param rtype the type of the reduction operation to be applied
/// \param queue_ptr a sycl::queue ptr associated with the operation
/// \return @ref void
void reduce(const void *sendbuff, void *recvbuff, size_t count,
oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype,
int root, sycl::queue *queue_ptr) {
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::reduce(sendbuff, recvbuff, count, dtype, rtype,
root, _comm, stream);
},
queue_ptr);
}
/// \brief broadcast is a collective communication operation that broadcasts data
/// from one rank of communicator (denoted as root) to all other ranks.
/// Only support in-place operation
/// \param send_buf the buffer with @c count elements of @c dtype that stores
/// local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result
/// \param count the number of elements of type @c dtype in @c buf
/// \param dtype thedatatype of elements in @c buf
/// \param root the rank that broadcasts @c buf
/// \param queue_ptr a sycl::queue ptr associated with the operation
/// \return @ref void
void broadcast(void *sendbuff, void *recvbuff, size_t count,
oneapi::ccl::datatype dtype, int root,
sycl::queue *queue_ptr) {
if (sendbuff != recvbuff) {
throw std::runtime_error(
"oneCCL broadcast only support in-place operation. "
"send_buf and recv_buf must be same.");
return;
}
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::broadcast(recvbuff, count, dtype, root, _comm,
stream);
},
queue_ptr);
}
/// \brief reduce_scatter is a collective communication operation that performs the global reduction operation
/// on values from all ranks of the communicator and scatters the result in blocks back to all ranks.
/// \param send_buf the buffer with @c count elements of @c dtype that stores local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result, must have the same dimension as @c send_buf
/// \param recv_count the number of elements of type @c dtype in receive block
/// \param dtype the datatype of elements in @c send_buf and @c recv_buf
/// \param rtype the type of the reduction operation to be applied
/// \param queue_ptr a sycl::queue ptr associated with the operation
/// \return @ref void
void reduce_scatter(const void *sendbuff, void *recvbuff, size_t recv_count,
oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype,
sycl::queue *queue_ptr) {
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::reduce_scatter(sendbuff, recvbuff, recv_count,
dtype, rtype, _comm, stream);
},
queue_ptr);
}
private:
oneapi::ccl::device _device_comm;
oneapi::ccl::context _context_comm;
oneapi::ccl::communicator _comm;
sycl::queue _queue;
bool _queue_init;
oneapi::ccl::stream *_ccl_stream_ptr;
template <class Fn>
void call_func_wrapper(Fn func, sycl::queue *qptr) {
if (_queue_init && *qptr != _queue) {
call_func_async(func, qptr);
} else {
if(!_queue_init) {
_queue = *qptr;
_queue_init = true;
_ccl_stream_ptr = new oneapi::ccl::stream(oneapi::ccl::create_stream(_queue));
}
std::invoke(func, *_ccl_stream_ptr);
}
}
class call_func_async {
sycl::queue *_q_ptr;
struct call_async_impl {
oneapi::ccl::stream _ccl_stream_impl;
oneapi::ccl::event _ccl_event_impl;
template <class Fn>
explicit call_async_impl(Fn func, sycl::queue *qptr)
: _ccl_stream_impl(oneapi::ccl::create_stream(*qptr)),
_ccl_event_impl(std::invoke(func, _ccl_stream_impl)) {}
};
call_async_impl *_imp;
public:
template <class Fn>
explicit call_func_async(Fn func, sycl::queue *qptr)
: _q_ptr(qptr),
_imp(new call_async_impl(func, qptr)) {}
~call_func_async() {
_q_ptr->submit([&](sycl::handler &cgh)
{ cgh.host_task([=]
{
_imp->_ccl_event_impl.wait();
delete _imp; }); });
}
};
};
typedef dpct::ccl::communicator_wrapper *comm_ptr;
} // namespace ccl
} // namespace dpct
#endif // __DPCT_CCL_UTILS_HPP__ | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/util.hpp | //==---- util.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_UTIL_HPP__
#define __DPCT_UTIL_HPP__
#include <sycl/sycl.hpp>
#include <complex>
#include <type_traits>
#include <cassert>
#include <cstdint>
// TODO: Remove these function definitions once they exist in the DPC++ compiler
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
template <typename T>
__SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT __attribute__((noduplicate))
T __spirv_GroupNonUniformShuffle(__spv::Scope::Flag, T, unsigned) noexcept;
template <typename T>
__SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT __attribute__((noduplicate))
T __spirv_GroupNonUniformShuffleDown(__spv::Scope::Flag, T, unsigned) noexcept;
template <typename T>
__SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT __attribute__((noduplicate))
T __spirv_GroupNonUniformShuffleUp(__spv::Scope::Flag, T, unsigned) noexcept;
#endif
namespace dpct {
namespace detail {
template <typename tag, typename T> class generic_error_type {
public:
generic_error_type() = default;
generic_error_type(T value) : value{value} {}
operator T() const { return value; }
private:
T value;
};
} // namespace detail
using err0 = detail::generic_error_type<struct err0_tag, int>;
using err1 = detail::generic_error_type<struct err1_tag, int>;
template <int... Ints> struct integer_sequence {};
template <int Size, int... Ints>
struct make_index_sequence
: public make_index_sequence<Size - 1, Size - 1, Ints...> {};
template <int... Ints>
struct make_index_sequence<0, Ints...> : public integer_sequence<Ints...> {};
template <typename T> struct DataType { using T2 = T; };
template <typename T> struct DataType<sycl::vec<T, 2>> {
using T2 = std::complex<T>;
};
inline void matrix_mem_copy(void *to_ptr, const void *from_ptr, int to_ld,
int from_ld, int rows, int cols, int elem_size,
memcpy_direction direction = automatic,
sycl::queue &queue = dpct::get_default_queue(),
bool async = false) {
if (to_ptr == from_ptr && to_ld == from_ld) {
return;
}
if (to_ld == from_ld) {
size_t copy_size = elem_size * ((cols - 1) * (size_t)to_ld + rows);
if (async)
detail::dpct_memcpy(queue, (void *)to_ptr, (void *)from_ptr,
copy_size, direction);
else
detail::dpct_memcpy(queue, (void *)to_ptr, (void *)from_ptr,
copy_size, direction).wait();
} else {
if (async)
detail::dpct_memcpy(queue, to_ptr, from_ptr, elem_size * to_ld,
elem_size * from_ld, elem_size * rows, cols,
direction);
else
sycl::event::wait(detail::dpct_memcpy(
queue, to_ptr, from_ptr, elem_size * to_ld, elem_size * from_ld,
elem_size * rows, cols, direction));
}
}
/// Copy matrix data. The default leading dimension is column.
/// \param [out] to_ptr A pointer points to the destination location.
/// \param [in] from_ptr A pointer points to the source location.
/// \param [in] to_ld The leading dimension the destination matrix.
/// \param [in] from_ld The leading dimension the source matrix.
/// \param [in] rows The number of rows of the source matrix.
/// \param [in] cols The number of columns of the source matrix.
/// \param [in] direction The direction of the data copy.
/// \param [in] queue The queue where the routine should be executed.
/// \param [in] async If this argument is true, the return of the function
/// does NOT guarantee the copy is completed.
template <typename T>
inline void matrix_mem_copy(T *to_ptr, const T *from_ptr, int to_ld,
int from_ld, int rows, int cols,
memcpy_direction direction = automatic,
sycl::queue &queue = dpct::get_default_queue(),
bool async = false) {
using Ty = typename DataType<T>::T2;
matrix_mem_copy((void *)to_ptr, (void *)from_ptr, to_ld, from_ld, rows, cols,
sizeof(Ty), direction, queue, async);
}
/// Cast the high or low 32 bits of a double to an integer.
/// \param [in] d The double value.
/// \param [in] use_high32 Cast the high 32 bits of the double if true;
/// otherwise cast the low 32 bits.
inline int cast_double_to_int(double d, bool use_high32 = true) {
sycl::vec<double, 1> v0{d};
auto v1 = v0.as<sycl::int2>();
if (use_high32)
return v1[1];
return v1[0];
}
/// Combine two integers, the first as the high 32 bits and the second
/// as the low 32 bits, into a double.
/// \param [in] high32 The integer as the high 32 bits
/// \param [in] low32 The integer as the low 32 bits
inline double cast_ints_to_double(int high32, int low32) {
sycl::int2 v0{low32, high32};
auto v1 = v0.as<sycl::vec<double, 1>>();
return v1;
}
/// Reverse the bit order of an unsigned integer
/// \param [in] a Input unsigned integer value
/// \returns Value of a with the bit order reversed
template <typename T> inline T reverse_bits(T a) {
static_assert(std::is_unsigned<T>::value && std::is_integral<T>::value,
"unsigned integer required");
if (!a)
return 0;
T mask = 0;
size_t count = 4 * sizeof(T);
mask = ~mask >> count;
while (count) {
a = ((a & mask) << count) | ((a & ~mask) >> count);
count = count >> 1;
mask = mask ^ (mask << count);
}
return a;
}
/// \param [in] a The first value contains 4 bytes
/// \param [in] b The second value contains 4 bytes
/// \param [in] s The selector value, only lower 16bit used
/// \returns the permutation result of 4 bytes selected in the way
/// specified by \p s from \p a and \p b
inline unsigned int byte_level_permute(unsigned int a, unsigned int b,
unsigned int s) {
unsigned int ret;
ret =
((((std::uint64_t)b << 32 | a) >> (s & 0x7) * 8) & 0xff) |
(((((std::uint64_t)b << 32 | a) >> ((s >> 4) & 0x7) * 8) & 0xff) << 8) |
(((((std::uint64_t)b << 32 | a) >> ((s >> 8) & 0x7) * 8) & 0xff) << 16) |
(((((std::uint64_t)b << 32 | a) >> ((s >> 12) & 0x7) * 8) & 0xff) << 24);
return ret;
}
/// Find position of first least significant set bit in an integer.
/// ffs(0) returns 0.
///
/// \param [in] a Input integer value
/// \returns The position
template <typename T> inline int ffs(T a) {
static_assert(std::is_integral<T>::value, "integer required");
return (sycl::ctz(a) + 1) % (sizeof(T) * 8 + 1);
}
/// select_from_sub_group allows work-items to obtain a copy of a value held by
/// any other work-item in the sub_group. The input sub_group will be divided
/// into several logical sub_groups with id range [0, \p logical_sub_group_size
/// - 1]. Each work-item in logical sub_group gets value from another work-item
/// whose id is \p remote_local_id. If \p remote_local_id is outside the
/// logical sub_group id range, \p remote_local_id will modulo with \p
/// logical_sub_group_size. The \p logical_sub_group_size must be a power of 2
/// and not exceed input sub_group size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] remote_local_id Input source work item id
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T select_from_sub_group(sycl::sub_group g, T x, int remote_local_id,
int logical_sub_group_size = 32) {
unsigned int start_index =
g.get_local_linear_id() / logical_sub_group_size * logical_sub_group_size;
return sycl::select_from_group(
g, x, start_index + remote_local_id % logical_sub_group_size);
}
/// shift_sub_group_left move values held by the work-items in a sub_group
/// directly to another work-item in the sub_group, by shifting values a fixed
/// number of work-items to the left. The input sub_group will be divided into
/// several logical sub_groups with id range [0, \p logical_sub_group_size - 1].
/// Each work-item in logical sub_group gets value from another work-item whose
/// id is caller's id adds \p delta. If calculated id is outside the logical
/// sub_group id range, the work-item will get value from itself. The \p
/// logical_sub_group_size must be a power of 2 and not exceed input sub_group
/// size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_left(sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int end_index =
(id / logical_sub_group_size + 1) * logical_sub_group_size;
T result = sycl::shift_group_left(g, x, delta);
if ((id + delta) >= end_index) {
result = x;
}
return result;
}
/// shift_sub_group_right move values held by the work-items in a sub_group
/// directly to another work-item in the sub_group, by shifting values a fixed
/// number of work-items to the right. The input sub_group will be divided into
/// several logical_sub_groups with id range [0, \p logical_sub_group_size - 1].
/// Each work-item in logical_sub_group gets value from another work-item whose
/// id is caller's id subtracts \p delta. If calculated id is outside the
/// logical sub_group id range, the work-item will get value from itself. The \p
/// logical_sub_group_size must be a power of 2 and not exceed input sub_group
/// size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_right(sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
T result = sycl::shift_group_right(g, x, delta);
if ((id - start_index) < delta) {
result = x;
}
return result;
}
/// permute_sub_group_by_xor permutes values by exchanging values held by pairs
/// of work-items identified by computing the bitwise exclusive OR of the
/// work-item id and some fixed mask. The input sub_group will be divided into
/// several logical sub_groups with id range [0, \p logical_sub_group_size - 1].
/// Each work-item in logical sub_group gets value from another work-item whose
/// id is bitwise exclusive OR of the caller's id and \p mask. If calculated id
/// is outside the logical sub_group id range, the work-item will get value from
/// itself. The \p logical_sub_group_size must be a power of 2 and not exceed
/// input sub_group size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] mask Input mask
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T permute_sub_group_by_xor(sycl::sub_group g, T x, unsigned int mask,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
unsigned int target_offset = (id % logical_sub_group_size) ^ mask;
return sycl::select_from_group(g, x,
target_offset < logical_sub_group_size
? start_index + target_offset
: id);
}
namespace experimental {
/// Masked version of select_from_sub_group, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] remote_local_id Input source work item id
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T select_from_sub_group(unsigned int member_mask,
sycl::sub_group g, T x, int remote_local_id,
int logical_sub_group_size = 32) {
unsigned int start_index =
g.get_local_linear_id() / logical_sub_group_size * logical_sub_group_size;
unsigned logical_remote_id =
start_index + remote_local_id % logical_sub_group_size;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
return __spirv_GroupNonUniformShuffle(__spv::Scope::Subgroup, x, logical_remote_id);
#else
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)remote_local_id;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER
}
/// Masked version of shift_sub_group_left, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_left(unsigned int member_mask,
sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int end_index =
(id / logical_sub_group_size + 1) * logical_sub_group_size;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
T result = __spirv_GroupNonUniformShuffleDown(__spv::Scope::Subgroup, x, delta);
if ((id + delta) >= end_index) {
result = x;
}
return result;
#else
throw sycl::exception(sycl::errc::runtime, "Masked version of shift_sub_group_left "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)delta;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER
}
/// Masked version of shift_sub_group_right, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_right(unsigned int member_mask,
sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
T result = __spirv_GroupNonUniformShuffleUp(__spv::Scope::Subgroup, x, delta);
if ((id - start_index) < delta) {
result = x;
}
return result;
#else
throw sycl::exception(sycl::errc::runtime, "Masked version of shift_sub_group_right "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)delta;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY && __INTEL_LLVM_COMPILER
}
/// Masked version of permute_sub_group_by_xor, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] mask Input mask
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T permute_sub_group_by_xor(unsigned int member_mask,
sycl::sub_group g, T x, unsigned int mask,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
unsigned int target_offset = (id % logical_sub_group_size) ^ mask;
unsigned logical_remote_id = (target_offset < logical_sub_group_size) ? start_index + target_offset : id;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
return __spirv_GroupNonUniformShuffle(__spv::Scope::Subgroup, x, logical_remote_id);
#else
throw sycl::exception(sycl::errc::runtime, "Masked version of permute_sub_group_by_xor "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)mask;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER
}
} // namespace experimental
/// Computes the multiplication of two complex numbers.
/// \tparam T Complex element type
/// \param [in] x The first input complex number
/// \param [in] y The second input complex number
/// \returns The result
template <typename T>
sycl::vec<T, 2> cmul(sycl::vec<T, 2> x, sycl::vec<T, 2> y) {
std::complex<T> t1(x[0], x[1]), t2(y[0], y[1]);
t1 = t1 * t2;
return sycl::vec<T, 2>(t1.real(), t1.imag());
}
/// Computes the division of two complex numbers.
/// \tparam T Complex element type
/// \param [in] x The first input complex number
/// \param [in] y The second input complex number
/// \returns The result
template <typename T>
sycl::vec<T, 2> cdiv(sycl::vec<T, 2> x, sycl::vec<T, 2> y) {
std::complex<T> t1(x[0], x[1]), t2(y[0], y[1]);
t1 = t1 / t2;
return sycl::vec<T, 2>(t1.real(), t1.imag());
}
/// Computes the magnitude of a complex number.
/// \tparam T Complex element type
/// \param [in] x The input complex number
/// \returns The result
template <typename T>
T cabs(sycl::vec<T, 2> x) {
std::complex<T> t(x[0], x[1]);
return std::abs(t);
}
/// Computes the complex conjugate of a complex number.
/// \tparam T Complex element type
/// \param [in] x The input complex number
/// \returns The result
template <typename T>
sycl::vec<T, 2> conj(sycl::vec<T, 2> x) {
std::complex<T> t(x[0], x[1]);
t = std::conj(t);
return sycl::vec<T, 2>(t.real(), t.imag());
}
inline int get_sycl_language_version() {
#ifdef SYCL_LANGUAGE_VERSION
return SYCL_LANGUAGE_VERSION;
#else
return 202000;
#endif
}
namespace experimental {
/// Synchronize work items from all work groups within a SYCL kernel.
/// \param [in] item: Represents a work group.
/// \param [in] counter: An atomic object defined on a device memory which can
/// be accessed by work items in all work groups. The initial value of the
/// counter should be zero.
/// Note: Please make sure that all the work items of all work groups within
/// a SYCL kernel can be scheduled actively at the same time on a device.
template <int dimensions = 3>
inline void
nd_range_barrier(const sycl::nd_item<dimensions> &item,
sycl::atomic_ref<
unsigned int, sycl::memory_order::seq_cst,
sycl::memory_scope::device,
sycl::access::address_space::global_space> &counter) {
static_assert(dimensions == 3, "dimensions must be 3.");
unsigned int num_groups = item.get_group_range(2) * item.get_group_range(1) *
item.get_group_range(0);
item.barrier();
if (item.get_local_linear_id() == 0) {
unsigned int inc = 1;
unsigned int old_arrive = 0;
bool is_group0 =
(item.get_group(2) + item.get_group(1) + item.get_group(0) == 0);
if (is_group0) {
inc = 0x80000000 - (num_groups - 1);
}
old_arrive = counter.fetch_add(inc);
// Synchronize all the work groups
while (((old_arrive ^ counter.load()) & 0x80000000) == 0)
;
}
item.barrier();
}
/// Synchronize work items from all work groups within a SYCL kernel.
/// \param [in] item: Represents a work group.
/// \param [in] counter: An atomic object defined on a device memory which can
/// be accessed by work items in all work groups. The initial value of the
/// counter should be zero.
/// Note: Please make sure that all the work items of all work groups within
/// a SYCL kernel can be scheduled actively at the same time on a device.
template <>
inline void
nd_range_barrier(const sycl::nd_item<1> &item,
sycl::atomic_ref<
unsigned int, sycl::memory_order::seq_cst,
sycl::memory_scope::device,
sycl::access::address_space::global_space> &counter) {
unsigned int num_groups = item.get_group_range(0);
item.barrier();
if (item.get_local_linear_id() == 0) {
unsigned int inc = 1;
unsigned int old_arrive = 0;
bool is_group0 = (item.get_group(0) == 0);
if (is_group0) {
inc = 0x80000000 - (num_groups - 1);
}
old_arrive = counter.fetch_add(inc);
// Synchronize all the work groups
while (((old_arrive ^ counter.load()) & 0x80000000) == 0)
;
}
item.barrier();
}
/// The logical-group is a logical collection of some work-items within a
/// work-group.
/// Note: Please make sure that the logical-group size is a power of 2 in the
/// range [1, current_sub_group_size].
class logical_group {
sycl::nd_item<3> _item;
sycl::group<3> _g;
uint32_t _logical_group_size;
uint32_t _group_linear_range_in_parent;
public:
/// Dividing \p parent_group into several logical-groups.
/// \param [in] item Current work-item.
/// \param [in] parent_group The group to be divided.
/// \param [in] size The logical-group size.
logical_group(sycl::nd_item<3> item, sycl::group<3> parent_group,
uint32_t size)
: _item(item), _g(parent_group), _logical_group_size(size) {
_group_linear_range_in_parent =
(_g.get_local_linear_range() - 1) / _logical_group_size + 1;
}
/// Returns the index of the work-item within the logical-group.
uint32_t get_local_linear_id() const {
return _item.get_local_linear_id() % _logical_group_size;
}
/// Returns the index of the logical-group in the parent group.
uint32_t get_group_linear_id() const {
return _item.get_local_linear_id() / _logical_group_size;
}
/// Returns the number of work-items in the logical-group.
uint32_t get_local_linear_range() const {
if (_g.get_local_linear_range() % _logical_group_size == 0) {
return _logical_group_size;
}
uint32_t last_item_group_id =
_g.get_local_linear_range() / _logical_group_size;
uint32_t first_of_last_group = last_item_group_id * _logical_group_size;
if (_item.get_local_linear_id() >= first_of_last_group) {
return _g.get_local_linear_range() - first_of_last_group;
} else {
return _logical_group_size;
}
}
/// Returns the number of logical-group in the parent group.
uint32_t get_group_linear_range() const {
return _group_linear_range_in_parent;
}
};
// The original source of the function calculate_max_active_wg_per_xecore was
// under the license below:
//
// Copyright Intel Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
/// This function is used for occupancy calculation, it computes the max active
/// work-group number per Xe-Core. Ref to
/// https://github.com/oneapi-src/oneAPI-samples/tree/master/Tools/GPU-Occupancy-Calculator
/// \param [out] num_wg Active work-group number.
/// \param [in] wg_size Work-group size.
/// \param [in] slm_size Share local memory size.
/// \param [in] sg_size Sub-group size.
/// \param [in] used_barrier Whether barrier is used.
/// \param [in] used_large_grf Whether large General Register File is used.
/// \return If no error, returns 0.
/// If \p wg_size exceeds the max work-group size, the max work-group size will
/// be used instead of \p wg_size and returns -1.
inline int calculate_max_active_wg_per_xecore(int *num_wg, int wg_size,
int slm_size = 0,
int sg_size = 32,
bool used_barrier = false,
bool used_large_grf = false) {
int ret = 0;
const int slm_size_per_xe_core = 64 * 1024;
const int max_barrier_registers = 32;
dpct::device_ext &dev = dpct::get_current_device();
size_t max_wg_size = dev.get_info<sycl::info::device::max_work_group_size>();
if (wg_size > max_wg_size) {
wg_size = max_wg_size;
ret = -1;
}
int num_threads_ss = 56;
int max_num_wg = 56;
if (dev.has(sycl::aspect::ext_intel_gpu_eu_count_per_subslice) &&
dev.has(sycl::aspect::ext_intel_gpu_hw_threads_per_eu)) {
auto eu_count =
dev.get_info<sycl::info::device::ext_intel_gpu_eu_count_per_subslice>();
auto threads_count =
dev.get_info<sycl::ext::intel::info::device::gpu_hw_threads_per_eu>();
num_threads_ss = eu_count * threads_count;
max_num_wg = eu_count * threads_count;
}
if (used_barrier) {
max_num_wg = max_barrier_registers;
}
// Calculate num_wg_slm
int num_wg_slm = 0;
if (slm_size == 0) {
num_wg_slm = max_num_wg;
} else {
num_wg_slm = std::floor((float)slm_size_per_xe_core / slm_size);
}
// Calculate num_wg_threads
if (used_large_grf)
num_threads_ss = num_threads_ss / 2;
int num_threads = std::ceil((float)wg_size / sg_size);
int num_wg_threads = std::floor((float)num_threads_ss / num_threads);
// Calculate num_wg
*num_wg = std::min(num_wg_slm, num_wg_threads);
*num_wg = std::min(*num_wg, max_num_wg);
return ret;
}
} // namespace experimental
/// If x <= 2, then return a pointer to the deafult queue;
/// otherwise, return x reinterpreted as a dpct::queue_ptr.
inline queue_ptr int_as_queue_ptr(uintptr_t x) {
return x <= 2 ?
&get_default_queue()
: reinterpret_cast<queue_ptr>(x);
}
template <int n_nondefault_params, int n_default_params, typename T>
class args_selector;
/// args_selector is a helper class for extracting arguments from an
/// array of pointers to arguments or buffer of arguments to pass to a
/// kernel function.
///
/// \param R(Ts...) The type of the kernel
/// \param n_nondefault_params The number of nondefault parameters of the kernel
/// (excluding parameters that like sycl::nd_item, etc.)
/// \param n_default_params The number of default parameters of the kernel
///
/// Example usage:
/// With the following kernel:
/// void foo(sycl::float2 *x, int n, sycl::nd_item<3> item_ct1, float f=.1) {}
/// and with the declaration:
/// args_selector<2, 1, decltype(foo)> selector(kernelParams, extra);
/// we have:
/// selector.get<0>() returns a reference to sycl::float*,
/// selector.get<1>() returns a reference to int,
/// selector.get<2>() returns a reference to float
template <int n_nondefault_params, int n_default_params,
typename R, typename... Ts>
class args_selector<n_nondefault_params, n_default_params, R(Ts...)> {
private:
void **kernel_params;
char *args_buffer;
template <int i>
static constexpr int account_for_default_params() {
constexpr int n_total_params = sizeof...(Ts);
if constexpr (i >= n_nondefault_params) {
return n_total_params - n_default_params + (i - n_nondefault_params);
} else {
return i;
}
}
public:
/// Get the type of the ith argument of R(Ts...)
/// \param [in] i Index of parameter to get
/// \returns Type of ith parameter
template <int i>
using arg_type = std::tuple_element_t<account_for_default_params<i>(),
std::tuple<Ts...>>;
private:
template <int i>
static constexpr int get_offset() {
if constexpr (i == 0) {
// we can assume args_buffer is properly aligned to the
// first argument
return 0;
} else {
constexpr int prev_off = get_offset<i-1>();
constexpr int prev_past_end = prev_off + sizeof(arg_type<i-1>);
using T = arg_type<i>;
// is the past-the-end of the i-1st element properly aligned
// with the ith element's alignment?
if constexpr (prev_past_end % alignof(T) == 0) {
return prev_past_end;
}
// otherwise bump prev_past_end to match alignment
else {
return prev_past_end + (alignof(T) - (prev_past_end % alignof(T)));
}
}
}
static char *get_args_buffer(void **extra) {
if (!extra)
return nullptr;
for (; (std::size_t) *extra != 0; ++extra) {
if ((std::size_t) *extra == 1) {
return static_cast<char*>(*(extra+1));
}
}
return nullptr;
}
public:
/// If kernel_params is nonnull, then args_selector will
/// extract arguments from kernel_params. Otherwise, it
/// will extract them from extra.
/// \param [in] kernel_params Array of pointers to arguments
/// a or null pointer.
/// \param [in] extra Array containing pointer to argument buffer.
args_selector(void **kernel_params, void **extra)
: kernel_params(kernel_params),
args_buffer(get_args_buffer(extra))
{}
/// Get a reference to the ith argument extracted from kernel_params
/// or extra.
/// \param [in] i Index of argument to get
/// \returns Reference to the ith argument
template <int i>
arg_type<i> &get() {
if (kernel_params) {
return *static_cast<arg_type<i>*>(kernel_params[i]);
} else {
return *reinterpret_cast<arg_type<i>*>(args_buffer + get_offset<i>());
}
}
};
#ifdef _WIN32
#define DPCT_EXPORT __declspec(dllexport)
#else
#define DPCT_EXPORT
#endif
} // namespace dpct
#endif // __DPCT_UTIL_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/image.hpp | //==---- image.hpp --------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_IMAGE_HPP__
#define __DPCT_IMAGE_HPP__
#include <sycl/sycl.hpp>
#include "memory.hpp"
#include "util.hpp"
namespace dpct {
enum class image_channel_data_type {
signed_int,
unsigned_int,
fp,
};
class image_channel;
class image_wrapper_base;
namespace detail {
/// Image object type traits, with accessor type and sampled data type defined.
/// The data type of an image accessor must be one of sycl::int4, sycl::uint4,
/// sycl::float4 and sycl::half4. The data type of accessors with 8bits/16bits
/// channel width will be 32 bits. sycl::half is an exception.
template <class T> struct image_trait {
using acc_data_t = sycl::vec<T, 4>;
template <int dimensions>
using accessor_t =
sycl::accessor<acc_data_t, dimensions, sycl::access_mode::read,
sycl::access::target::image>;
template <int dimensions>
using array_accessor_t =
sycl::accessor<acc_data_t, dimensions, sycl::access_mode::read,
sycl::access::target::image_array>;
using data_t = T;
using elem_t = T;
static constexpr image_channel_data_type data_type =
std::is_integral<T>::value
? (std::is_signed<T>::value ? image_channel_data_type::signed_int
: image_channel_data_type::unsigned_int)
: image_channel_data_type::fp;
static constexpr int channel_num = 1;
};
template <>
struct image_trait<std::uint8_t> : public image_trait<std::uint32_t> {
using data_t = std::uint8_t;
using elem_t = data_t;
};
template <>
struct image_trait<std::uint16_t>
: public image_trait<std::uint32_t> {
using data_t = std::uint16_t;
using elem_t = data_t;
};
template <>
struct image_trait<std::int8_t> : public image_trait<std::int32_t> {
using data_t = std::int8_t;
using elem_t = data_t;
};
template <>
struct image_trait<std::int16_t> : public image_trait<std::int32_t> {
using data_t = std::int16_t;
using elem_t = data_t;
};
template <>
struct image_trait<char>
: public image_trait<typename std::conditional<
std::is_signed<char>::value, signed char, unsigned char>::type> {};
template <class T>
struct image_trait<sycl::vec<T, 1>> : public image_trait<T> {};
template <class T>
struct image_trait<sycl::vec<T, 2>> : public image_trait<T> {
using data_t = sycl::vec<T, 2>;
static constexpr int channel_num = 2;
};
template <class T>
struct image_trait<sycl::vec<T, 3>>
: public image_trait<sycl::vec<T, 4>> {
static constexpr int channel_num = 3;
};
template <class T>
struct image_trait<sycl::vec<T, 4>> : public image_trait<T> {
using data_t = sycl::vec<T, 4>;
static constexpr int channel_num = 4;
};
/// Functor to fetch data from read result of an image accessor.
template <class T> struct fetch_data {
using return_t = typename image_trait<T>::data_t;
using acc_data_t = typename image_trait<T>::acc_data_t;
return_t operator()(acc_data_t &&original_data) {
return (return_t)original_data.r();
}
};
template <class T>
struct fetch_data<sycl::vec<T, 1>> : public fetch_data<T> {};
template <class T> struct fetch_data<sycl::vec<T, 2>> {
using return_t = typename image_trait<sycl::vec<T, 2>>::data_t;
using acc_data_t = typename image_trait<sycl::vec<T, 2>>::acc_data_t;
return_t operator()(acc_data_t &&origin_data) {
return return_t(origin_data.r(), origin_data.g());
}
};
template <class T>
struct fetch_data<sycl::vec<T, 3>>
: public fetch_data<sycl::vec<T, 4>> {};
template <class T> struct fetch_data<sycl::vec<T, 4>> {
using return_t = typename image_trait<sycl::vec<T, 4>>::data_t;
using acc_data_t = typename image_trait<sycl::vec<T, 4>>::acc_data_t;
return_t operator()(acc_data_t &&origin_data) {
return return_t(origin_data.r(), origin_data.g(), origin_data.b(),
origin_data.a());
}
};
/// Create image according with given type \p T and \p dims.
template <class T> static image_wrapper_base *create_image_wrapper(int dims);
/// Create image with given data type \p T, channel order and dims
template <class T>
static image_wrapper_base *create_image_wrapper(unsigned channel_num, int dims);
/// Create image with channel info and specified dimensions.
static image_wrapper_base *create_image_wrapper(image_channel channel, int dims);
} // namespace detail
/// Image channel info, include channel number, order, data width and type
class image_channel {
image_channel_data_type _type = image_channel_data_type::signed_int;
/// Number of channels.
unsigned _channel_num = 0;
/// Total size of all channels in bytes.
unsigned _total_size = 0;
/// Size of each channel in bytes.
unsigned _channel_size = 0;
public:
/// Create image channel info according to template argument \p T.
template <class T> static image_channel create() {
image_channel channel;
channel.set_channel_size(detail::image_trait<T>::channel_num,
sizeof(typename detail::image_trait<T>::elem_t) *
8);
channel.set_channel_data_type(detail::image_trait<T>::data_type);
return channel;
}
image_channel() = default;
image_channel_data_type get_channel_data_type() { return _type; }
void set_channel_data_type(image_channel_data_type type) { _type = type; }
unsigned get_total_size() { return _total_size; }
unsigned get_channel_num() { return _channel_num; }
void set_channel_num(unsigned channel_num) {
_channel_num = channel_num;
_total_size = _channel_size * _channel_num;
}
/// image_channel constructor.
/// \param r Channel r width in bits.
/// \param g Channel g width in bits. Should be same with \p r, or zero.
/// \param b Channel b width in bits. Should be same with \p g, or zero.
/// \param a Channel a width in bits. Should be same with \p b, or zero.
/// \param data_type Image channel data type: signed_nt, unsigned_int or fp.
image_channel(int r, int g, int b, int a, image_channel_data_type data_type) {
_type = data_type;
if (a) {
assert(r == a && "SYCL doesn't support different channel size");
assert(r == b && "SYCL doesn't support different channel size");
assert(r == g && "SYCL doesn't support different channel size");
set_channel_size(4, a);
} else if (b) {
assert(r == b && "SYCL doesn't support different channel size");
assert(r == g && "SYCL doesn't support different channel size");
set_channel_size(3, b);
} else if (g) {
assert(r == g && "SYCL doesn't support different channel size");
set_channel_size(2, g);
} else {
set_channel_size(1, r);
}
}
sycl::image_channel_type get_channel_type() const {
if (_channel_size == 4) {
if (_type == image_channel_data_type::signed_int)
return sycl::image_channel_type::signed_int32;
else if (_type == image_channel_data_type::unsigned_int)
return sycl::image_channel_type::unsigned_int32;
else if (_type == image_channel_data_type::fp)
return sycl::image_channel_type::fp32;
} else if (_channel_size == 2) {
if (_type == image_channel_data_type::signed_int)
return sycl::image_channel_type::signed_int16;
else if (_type == image_channel_data_type::unsigned_int)
return sycl::image_channel_type::unsigned_int16;
else if (_type == image_channel_data_type::fp)
return sycl::image_channel_type::fp16;
} else {
if (_type == image_channel_data_type::signed_int)
return sycl::image_channel_type::signed_int8;
else if (_type == image_channel_data_type::unsigned_int)
return sycl::image_channel_type::unsigned_int8;
}
assert(false && "unexpected channel data kind and channel size");
return sycl::image_channel_type::signed_int32;
}
void set_channel_type(sycl::image_channel_type type) {
switch (type) {
case sycl::image_channel_type::unsigned_int8:
_type = image_channel_data_type::unsigned_int;
_channel_size = 1;
break;
case sycl::image_channel_type::unsigned_int16:
_type = image_channel_data_type::unsigned_int;
_channel_size = 2;
break;
case sycl::image_channel_type::unsigned_int32:
_type = image_channel_data_type::unsigned_int;
_channel_size = 4;
break;
case sycl::image_channel_type::signed_int8:
_type = image_channel_data_type::signed_int;
_channel_size = 1;
break;
case sycl::image_channel_type::signed_int16:
_type = image_channel_data_type::signed_int;
_channel_size = 2;
break;
case sycl::image_channel_type::signed_int32:
_type = image_channel_data_type::signed_int;
_channel_size = 4;
break;
case sycl::image_channel_type::fp16:
_type = image_channel_data_type::fp;
_channel_size = 2;
break;
case sycl::image_channel_type::fp32:
_type = image_channel_data_type::fp;
_channel_size = 4;
break;
default:
break;
}
_total_size = _channel_size * _channel_num;
}
sycl::image_channel_order get_channel_order() const {
switch (_channel_num) {
case 1:
return sycl::image_channel_order::r;
case 2:
return sycl::image_channel_order::rg;
case 3:
return sycl::image_channel_order::rgb;
case 4:
return sycl::image_channel_order::rgba;
default:
return sycl::image_channel_order::r;
}
}
/// Get the size for each channel in bits.
unsigned get_channel_size() const { return _channel_size * 8; }
/// Set channel size.
/// \param in_channel_num Channels number to set.
/// \param channel_size Size for each channel in bits.
void set_channel_size(unsigned in_channel_num,
unsigned channel_size) {
if (in_channel_num < _channel_num)
return;
_channel_num = in_channel_num;
_channel_size = channel_size / 8;
_total_size = _channel_size * _channel_num;
}
};
/// 2D or 3D matrix data for image.
class image_matrix {
image_channel _channel;
int _range[3] = {1, 1, 1};
int _dims = 0;
void *_host_data = nullptr;
/// Set range of each dimension.
template <int dimensions> void set_range(sycl::range<dimensions> range) {
for (int i = 0; i < dimensions; ++i)
_range[i] = range[i];
_dims = dimensions;
}
template <int... DimIdx>
sycl::range<sizeof...(DimIdx)> get_range(integer_sequence<DimIdx...>) {
return sycl::range<sizeof...(DimIdx)>(_range[DimIdx]...);
}
public:
/// Constructor with channel info and dimension size info.
template <int dimensions>
image_matrix(image_channel channel, sycl::range<dimensions> range)
: _channel(channel) {
set_range(range);
_host_data = std::malloc(range.size() * _channel.get_total_size());
}
image_matrix(sycl::image_channel_type channel_type, unsigned channel_num,
size_t x, size_t y) {
_channel.set_channel_type(channel_type);
_channel.set_channel_num(channel_num);
_dims = 1;
_range[0] = x;
if (y) {
_dims = 2;
_range[1] = y;
}
_host_data = std::malloc(_range[0] * _range[1] * _channel.get_total_size());
}
/// Construct a new image class with the matrix data.
template <int dimensions> sycl::image<dimensions> *create_image() {
return create_image<dimensions>(_channel);
}
/// Construct a new image class with the matrix data.
template <int dimensions>
sycl::image<dimensions> *create_image(image_channel channel) {
return new sycl::image<dimensions>(
_host_data, channel.get_channel_order(), channel.get_channel_type(),
get_range(make_index_sequence<dimensions>()),
sycl::property::image::use_host_ptr());
}
/// Get channel info.
inline image_channel get_channel() { return _channel; }
/// Get range of the image.
sycl::range<3> get_range() {
return sycl::range<3>(_range[0], _range[1], _range[2]);
}
/// Get matrix dims.
inline int get_dims() { return _dims; }
/// Convert to pitched data.
pitched_data to_pitched_data() {
return pitched_data(_host_data, _range[0], _range[0], _range[1]);
}
~image_matrix() {
if (_host_data)
std::free(_host_data);
_host_data = nullptr;
}
};
using image_matrix_p = image_matrix *;
enum class image_data_type { matrix, linear, pitch, unsupport };
/// Image data info.
class image_data {
public:
image_data() { _type = image_data_type::unsupport; }
image_data(image_matrix_p matrix_data) { set_data(matrix_data); }
image_data(void *data_ptr, size_t x_size, image_channel channel) {
set_data(data_ptr, x_size, channel);
}
image_data(void *data_ptr, size_t x_size, size_t y_size, size_t pitch_size,
image_channel channel) {
set_data(data_ptr, x_size, y_size, pitch_size, channel);
}
void set_data(image_matrix_p matrix_data) {
_type = image_data_type::matrix;
_data = matrix_data;
_channel = matrix_data->get_channel();
}
void set_data(void *data_ptr, size_t x_size, image_channel channel) {
_type = image_data_type::linear;
_data = data_ptr;
_x = x_size;
_channel = channel;
}
void set_data(void *data_ptr, size_t x_size, size_t y_size, size_t pitch_size,
image_channel channel) {
_type = image_data_type::pitch;
_data = data_ptr;
_x = x_size;
_y = y_size;
_pitch = pitch_size;
_channel = channel;
}
image_data_type get_data_type() const { return _type; }
void set_data_type(image_data_type type) { _type = type; }
void *get_data_ptr() const { return _data; }
void set_data_ptr(void *data) { _data = data; }
size_t get_x() const { return _x; }
void set_x(size_t x) { _x = x; }
size_t get_y() const { return _y; }
void set_y(size_t y) { _y = y; }
size_t get_pitch() const { return _pitch; }
void set_pitch(size_t pitch) { _pitch = pitch; }
image_channel get_channel() const { return _channel; }
void set_channel(image_channel channel) { _channel = channel; }
image_channel_data_type get_channel_data_type() {
return _channel.get_channel_data_type();
}
void set_channel_data_type(image_channel_data_type type) {
_channel.set_channel_data_type(type);
}
unsigned get_channel_size() { return _channel.get_channel_size(); }
void set_channel_size(unsigned channel_num, unsigned channel_size) {
return _channel.set_channel_size(channel_num, channel_size);
}
unsigned get_channel_num() { return _channel.get_channel_num(); }
void set_channel_num(unsigned num) {
return _channel.set_channel_num(num);
}
sycl::image_channel_type get_channel_type() {
return _channel.get_channel_type();
}
void set_channel_type(sycl::image_channel_type type) {
return _channel.set_channel_type(type);
}
private:
image_data_type _type;
void *_data = nullptr;
size_t _x, _y, _pitch;
image_channel _channel;
};
/// Image sampling info, include addressing mode, filtering mode and
/// normalization info.
class sampling_info {
sycl::addressing_mode _addressing_mode =
sycl::addressing_mode::clamp_to_edge;
sycl::filtering_mode _filtering_mode = sycl::filtering_mode::nearest;
sycl::coordinate_normalization_mode _coordinate_normalization_mode =
sycl::coordinate_normalization_mode::unnormalized;
public:
sycl::addressing_mode get_addressing_mode() { return _addressing_mode; }
void set(sycl::addressing_mode addressing_mode) { _addressing_mode = addressing_mode; }
sycl::filtering_mode get_filtering_mode() { return _filtering_mode; }
void set(sycl::filtering_mode filtering_mode) { _filtering_mode = filtering_mode; }
sycl::coordinate_normalization_mode get_coordinate_normalization_mode() {
return _coordinate_normalization_mode;
}
void set(sycl::coordinate_normalization_mode coordinate_normalization_mode) {
_coordinate_normalization_mode = coordinate_normalization_mode;
}
bool is_coordinate_normalized() {
return _coordinate_normalization_mode ==
sycl::coordinate_normalization_mode::normalized;
}
void set_coordinate_normalization_mode(int is_normalized) {
_coordinate_normalization_mode =
is_normalized ? sycl::coordinate_normalization_mode::normalized
: sycl::coordinate_normalization_mode::unnormalized;
}
void
set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode,
sycl::coordinate_normalization_mode coordinate_normalization_mode) {
set(addressing_mode);
set(filtering_mode);
set(coordinate_normalization_mode);
}
void set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode, int is_normalized) {
set(addressing_mode);
set(filtering_mode);
set_coordinate_normalization_mode(is_normalized);
}
sycl::sampler get_sampler() {
return sycl::sampler(_coordinate_normalization_mode, _addressing_mode,
_filtering_mode);
}
};
/// Image base class.
class image_wrapper_base {
sampling_info _sampling_info;
image_data _data;
public:
virtual ~image_wrapper_base() = 0;
void attach(image_data data) { set_data(data); }
/// Attach matrix data to this class.
void attach(image_matrix *matrix) {
detach();
image_wrapper_base::set_data(image_data(matrix));
}
/// Attach matrix data to this class.
void attach(image_matrix *matrix, image_channel channel) {
attach(matrix);
image_wrapper_base::set_channel(channel);
}
/// Attach linear data to this class.
void attach(const void *ptr, size_t count) {
attach(ptr, count, get_channel());
}
/// Attach linear data to this class.
void attach(const void *ptr, size_t count, image_channel channel) {
detach();
image_wrapper_base::set_data(image_data(const_cast<void *>(ptr), count, channel));
}
/// Attach 2D data to this class.
void attach(const void *data, size_t x, size_t y, size_t pitch) {
attach(data, x, y, pitch, get_channel());
}
/// Attach 2D data to this class.
void attach(const void *data, size_t x, size_t y, size_t pitch,
image_channel channel) {
detach();
image_wrapper_base::set_data(
image_data(const_cast<void *>(data), x, y, pitch, channel));
}
/// Detach data.
virtual void detach() {}
sampling_info get_sampling_info() { return _sampling_info; }
void set_sampling_info(sampling_info info) {
_sampling_info = info;
}
const image_data &get_data() { return _data; }
void set_data(image_data data) { _data = data; }
image_channel get_channel() { return _data.get_channel(); }
void set_channel(image_channel channel) { _data.set_channel(channel); }
image_channel_data_type get_channel_data_type() {
return _data.get_channel_data_type();
}
void set_channel_data_type(image_channel_data_type type) {
_data.set_channel_data_type(type);
}
unsigned get_channel_size() { return _data.get_channel_size(); }
void set_channel_size(unsigned channel_num, unsigned channel_size) {
return _data.set_channel_size(channel_num, channel_size);
}
sycl::addressing_mode get_addressing_mode() {
return _sampling_info.get_addressing_mode();
}
void set(sycl::addressing_mode addressing_mode) {
_sampling_info.set(addressing_mode);
}
sycl::filtering_mode get_filtering_mode() {
return _sampling_info.get_filtering_mode();
}
void set(sycl::filtering_mode filtering_mode) {
_sampling_info.set(filtering_mode);
}
sycl::coordinate_normalization_mode get_coordinate_normalization_mode() {
return _sampling_info.get_coordinate_normalization_mode();
}
void
set(sycl::coordinate_normalization_mode coordinate_normalization_mode) {
_sampling_info.set(coordinate_normalization_mode);
}
bool is_coordinate_normalized() {
return _sampling_info.is_coordinate_normalized();
}
void set_coordinate_normalization_mode(int is_normalized) {
_sampling_info.set_coordinate_normalization_mode(is_normalized);
}
void
set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode,
sycl::coordinate_normalization_mode coordinate_normalization_mode) {
set(addressing_mode);
set(filtering_mode);
set(coordinate_normalization_mode);
}
void set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode, int is_normalized) {
set(addressing_mode);
set(filtering_mode);
set_coordinate_normalization_mode(is_normalized);
}
unsigned get_channel_num() { return _data.get_channel_num(); }
void set_channel_num(unsigned num) {
return _data.set_channel_num(num);
}
sycl::image_channel_type get_channel_type() {
return _data.get_channel_type();
}
void set_channel_type(sycl::image_channel_type type) {
return _data.set_channel_type(type);
}
sycl::sampler get_sampler() { return _sampling_info.get_sampler(); }
};
inline image_wrapper_base::~image_wrapper_base() {}
using image_wrapper_base_p = image_wrapper_base *;
template <class T, int dimensions, bool IsImageArray> class image_accessor_ext;
/// Image class, wrapper of sycl::image.
template <class T, int dimensions, bool IsImageArray = false> class image_wrapper : public image_wrapper_base {
sycl::image<dimensions> *_image = nullptr;
#ifndef DPCT_USM_LEVEL_NONE
std::vector<char> _host_buffer;
#endif
void create_image(sycl::queue q) {
auto &data = get_data();
if (data.get_data_type() == image_data_type::matrix) {
_image = static_cast<image_matrix_p>(data.get_data_ptr())
->create_image<dimensions>(data.get_channel());
return;
}
auto ptr = data.get_data_ptr();
auto channel = data.get_channel();
if (detail::get_pointer_attribute(q, ptr) == detail::pointer_access_attribute::device_only) {
#ifdef DPCT_USM_LEVEL_NONE
ptr = get_buffer(ptr)
.template get_access<sycl::access_mode::read_write>()
.get_pointer();
#else
auto sz = data.get_x();
if (data.get_data_type() == image_data_type::pitch)
sz *= channel.get_total_size() * data.get_y();
_host_buffer.resize(sz);
q.memcpy(_host_buffer.data(), ptr, sz).wait();
ptr = _host_buffer.data();
#endif
}
if constexpr (dimensions == 1) {
assert(data.get_data_type() == image_data_type::linear);
_image = new sycl::image<1>(
ptr, channel.get_channel_order(), channel.get_channel_type(),
sycl::range<1>(data.get_x() / channel.get_total_size()));
} else if constexpr (dimensions == 2) {
assert(data.get_data_type() == image_data_type::pitch);
_image = new sycl::image<2>(ptr, channel.get_channel_order(),
channel.get_channel_type(),
sycl::range<2>(data.get_x(), data.get_y()),
sycl::range<1>(data.get_pitch()));
} else {
throw std::runtime_error("3D image only support matrix data");
}
return;
}
public:
using acc_data_t = typename detail::image_trait<T>::acc_data_t;
using accessor_t =
typename image_accessor_ext<T, IsImageArray ? (dimensions - 1) : dimensions,
IsImageArray>::accessor_t;
image_wrapper() { set_channel(image_channel::create<T>()); }
~image_wrapper() { detach(); }
/// Get image accessor.
accessor_t get_access(sycl::handler &cgh, sycl::queue &q = get_default_queue()) {
if (!_image)
create_image(q);
return accessor_t(*_image, cgh);
}
/// Detach data.
void detach() override {
if (_image)
delete _image;
_image = nullptr;
}
};
/// Wrap sampler and image accessor together.
template <class T, int dimensions, bool IsImageArray = false>
class image_accessor_ext {
public:
using accessor_t =
typename detail::image_trait<T>::template accessor_t<dimensions>;
using data_t = typename detail::image_trait<T>::data_t;
sycl::sampler _sampler;
accessor_t _img_acc;
public:
image_accessor_ext(sycl::sampler sampler, accessor_t acc)
: _sampler(sampler), _img_acc(acc) {}
/// Read data from accessor.
template <bool Available = dimensions == 3>
typename std::enable_if<Available, data_t>::type read(float x, float y,
float z) {
return detail::fetch_data<T>()(
_img_acc.read(sycl::float4(x, y, z, 0), _sampler));
}
/// Read data from accessor.
template <class Coord0, class Coord1, class Coord2,
bool Available = dimensions == 3 &&
std::is_integral<Coord0>::value
&&std::is_integral<Coord1>::value
&&std::is_integral<Coord2>::value>
typename std::enable_if<Available, data_t>::type read(Coord0 x, Coord1 y,
Coord2 z) {
return detail::fetch_data<T>()(
_img_acc.read(sycl::int4(x, y, z, 0), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 2>
typename std::enable_if<Available, data_t>::type read(float x, float y) {
return detail::fetch_data<T>()(
_img_acc.read(sycl::float2(x, y), _sampler));
}
/// Read data from accessor.
template <class Coord0, class Coord1,
bool Available = dimensions == 2 &&
std::is_integral<Coord0>::value
&&std::is_integral<Coord1>::value>
typename std::enable_if<Available, data_t>::type read(Coord0 x, Coord1 y) {
return detail::fetch_data<T>()(
_img_acc.read(sycl::int2(x, y), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 1>
typename std::enable_if<Available, data_t>::type read(float x) {
return detail::fetch_data<T>()(_img_acc.read(x, _sampler));
}
/// Read data from accessor.
template <class CoordT,
bool Available = dimensions == 1 && std::is_integral<CoordT>::value>
typename std::enable_if<Available, data_t>::type read(CoordT x) {
return detail::fetch_data<T>()(_img_acc.read(x, _sampler));
}
};
template <class T, int dimensions> class image_accessor_ext<T, dimensions, true> {
public:
using accessor_t =
typename detail::image_trait<T>::template array_accessor_t<dimensions>;
using data_t = typename detail::image_trait<T>::data_t;
sycl::sampler _sampler;
accessor_t _img_acc;
public:
image_accessor_ext(sycl::sampler sampler, accessor_t acc)
: _sampler(sampler), _img_acc(acc) {}
/// Read data from accessor.
template <bool Available = dimensions == 2>
typename std::enable_if<Available, data_t>::type read(int index, float x,
float y) {
return detail::fetch_data<T>()(
_img_acc[index].read(sycl::float2(x, y), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 2>
typename std::enable_if<Available, data_t>::type read(int index, int x, int y) {
return detail::fetch_data<T>()(
_img_acc[index].read(sycl::int2(x, y), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 1>
typename std::enable_if<Available, data_t>::type read(int index, float x) {
return detail::fetch_data<T>()(
_img_acc[index].read(x, _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 1>
typename std::enable_if<Available, data_t>::type read(int index, int x) {
return detail::fetch_data<T>()(
_img_acc[index].read(x, _sampler));
}
};
/// Create image wrapper according to image data and sampling info.
/// \return Pointer to image wrapper base class.
/// \param data Image data used to create image wrapper.
/// \param info Image sampling info used to create image wrapper.
/// \returns Pointer to base class of created image wrapper object.
static inline image_wrapper_base *create_image_wrapper(image_data data,
sampling_info info) {
image_channel channel;
int dims = 1;
if (data.get_data_type() == image_data_type::matrix) {
auto matrix = (image_matrix_p)data.get_data_ptr();
channel = matrix->get_channel();
dims = matrix->get_dims();
} else {
if (data.get_data_type() == image_data_type::pitch) {
dims = 2;
}
channel = data.get_channel();
}
if (auto ret = detail::create_image_wrapper(channel, dims)) {
ret->set_sampling_info(info);
ret->set_data(data);
return ret;
}
return nullptr;
}
namespace detail {
/// Create image according with given type \p T and \p dims.
template <class T> static image_wrapper_base *create_image_wrapper(int dims) {
switch (dims) {
case 1:
return new image_wrapper<T, 1>();
case 2:
return new image_wrapper<T, 2>();
case 3:
return new image_wrapper<T, 3>();
default:
return nullptr;
}
}
/// Create image with given data type \p T, channel order and dims
template <class T>
static image_wrapper_base *create_image_wrapper(unsigned channel_num, int dims) {
switch (channel_num) {
case 1:
return create_image_wrapper<T>(dims);
case 2:
return create_image_wrapper<sycl::vec<T, 2>>(dims);
case 3:
return create_image_wrapper<sycl::vec<T, 3>>(dims);
case 4:
return create_image_wrapper<sycl::vec<T, 4>>(dims);
default:
return nullptr;
}
}
/// Create image with channel info and specified dimensions.
static image_wrapper_base *create_image_wrapper(image_channel channel, int dims) {
switch (channel.get_channel_type()) {
case sycl::image_channel_type::fp16:
return create_image_wrapper<sycl::half>(channel.get_channel_num(), dims);
case sycl::image_channel_type::fp32:
return create_image_wrapper<float>(channel.get_channel_num(), dims);
case sycl::image_channel_type::signed_int8:
return create_image_wrapper<std::int8_t>(channel.get_channel_num(), dims);
case sycl::image_channel_type::signed_int16:
return create_image_wrapper<std::int16_t>(channel.get_channel_num(), dims);
case sycl::image_channel_type::signed_int32:
return create_image_wrapper<std::int32_t>(channel.get_channel_num(), dims);
case sycl::image_channel_type::unsigned_int8:
return create_image_wrapper<std::uint8_t>(channel.get_channel_num(), dims);
case sycl::image_channel_type::unsigned_int16:
return create_image_wrapper<std::uint16_t>(channel.get_channel_num(), dims);
case sycl::image_channel_type::unsigned_int32:
return create_image_wrapper<std::uint32_t>(channel.get_channel_num(), dims);
default:
return nullptr;
}
}
} // namespace detail
} // namespace dpct
#endif // !__DPCT_IMAGE_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/kernel.hpp | //==---- kernel.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_KERNEL_HPP__
#define __DPCT_KERNEL_HPP__
#include <sycl/sycl.hpp>
#ifdef _WIN32
#include <unordered_set>
#include <windows.h>
#else
#include <dlfcn.h>
#endif
#if defined(__has_include) && __has_include(<filesystem>)
#include <filesystem>
#elif defined(__has_include) && __has_include(<experimental/filesystem>)
#include <experimental/filesystem>
#else
#error "SYCLomatic runtime requires C++ filesystem support"
#endif
#include <random>
#include <image.hpp>
#include <fstream>
namespace dpct {
typedef void (*kernel_functor)(sycl::queue &, const sycl::nd_range<3> &,
unsigned int, void **, void **);
struct kernel_function_info {
int max_work_group_size = 0;
};
static inline void get_kernel_function_info(kernel_function_info *kernel_info,
const void *function) {
kernel_info->max_work_group_size =
dpct::dev_mgr::instance()
.current_device()
.get_info<sycl::info::device::max_work_group_size>();
}
static inline kernel_function_info
get_kernel_function_info(const void *function) {
kernel_function_info kernel_info;
kernel_info.max_work_group_size =
dpct::dev_mgr::instance()
.current_device()
.get_info<sycl::info::device::max_work_group_size>();
return kernel_info;
}
namespace detail {
#if defined(__has_include) && __has_include(<filesystem>)
namespace fs = std::filesystem;
#else
namespace fs = std::experimental::filesystem;
#endif
/// Write data to temporary file and return absolute path to temporary file.
/// Temporary file is created in a temporary directory both of which have random
/// names with only the user having access permissions. Only one temporary file
/// will be created in the temporary directory.
static inline fs::path write_data_to_file(char const *const data, size_t size) {
std::error_code ec;
if (sizeof(size_t) >= sizeof(std::streamsize) &&
size > (std::numeric_limits<std::streamsize>::max)())
throw std::runtime_error("data file too large");
// random number generator
std::random_device dev;
std::mt19937 prng(dev());
std::uniform_int_distribution<uint64_t> rand(0);
// find temporary directory
auto tmp_dir = fs::temp_directory_path(ec);
if (ec)
throw std::runtime_error("could not find temporary directory");
// create private directory
std::stringstream directory;
fs::path directory_path;
constexpr int max_attempts = 5;
int i;
for (i = 0; i < max_attempts; i++) {
directory << std::hex << rand(prng);
directory_path = tmp_dir / directory.str();
if (fs::create_directory(directory_path)) {
break;
}
}
if (i == max_attempts)
throw std::runtime_error("could not create directory");
// only allow owner permissions to private directory
fs::permissions(directory_path, fs::perms::owner_all, ec);
if (ec)
throw std::runtime_error("could not set directory permissions");
// random filename in private directory
std::stringstream filename;
filename << std::hex << rand(prng);
#ifdef _WIN32
auto filepath = directory_path / (filename.str() + ".dll");
#else
auto filepath = directory_path / filename.str();
#endif
// write data to temporary file
auto outfile = std::ofstream(filepath, std::ios::out | std::ios::binary);
if (outfile) {
// only allow program to write file
fs::permissions(filepath, fs::perms::owner_write, ec);
if (ec)
throw std::runtime_error("could not set permissions");
outfile.write(data, size);
if (!outfile.good())
throw std::runtime_error("could not write data");
outfile.close();
// only allow program to read/execute file
fs::permissions(filepath, fs::perms::owner_read | fs::perms::owner_exec,
ec);
if (ec)
throw std::runtime_error("could not set permissions");
} else
throw std::runtime_error("could not write data");
// check temporary file contents
auto infile = std::ifstream(filepath, std::ios::in | std::ios::binary);
if (infile) {
bool mismatch = false;
size_t cnt = 0;
while (1) {
char c;
infile.get(c);
if (infile.eof())
break;
if (c != data[cnt++])
mismatch = true;
}
if (cnt != size || mismatch)
throw std::runtime_error("file contents not written correctly");
} else
throw std::runtime_error("could not validate file");
if (!filepath.is_absolute())
throw std::runtime_error("temporary filepath is not absolute");
return filepath;
}
static inline uint16_t extract16(unsigned char const *const ptr) {
uint16_t ret = 0;
ret |= static_cast<uint16_t>(ptr[0]) << 0;
ret |= static_cast<uint16_t>(ptr[1]) << 8;
return (ret);
}
static inline uint32_t extract32(unsigned char const *const ptr) {
uint32_t ret = 0;
ret |= static_cast<uint32_t>(ptr[0]) << 0;
ret |= static_cast<uint32_t>(ptr[1]) << 8;
ret |= static_cast<uint32_t>(ptr[2]) << 16;
ret |= static_cast<uint32_t>(ptr[3]) << 24;
return (ret);
}
static inline uint64_t extract64(unsigned char const *const ptr) {
uint64_t ret = 0;
ret |= static_cast<uint64_t>(ptr[0]) << 0;
ret |= static_cast<uint64_t>(ptr[1]) << 8;
ret |= static_cast<uint64_t>(ptr[2]) << 16;
ret |= static_cast<uint64_t>(ptr[3]) << 24;
ret |= static_cast<uint64_t>(ptr[4]) << 32;
ret |= static_cast<uint64_t>(ptr[5]) << 40;
ret |= static_cast<uint64_t>(ptr[6]) << 48;
ret |= static_cast<uint64_t>(ptr[7]) << 56;
return (ret);
}
static inline uint64_t get_lib_size(char const *const blob) {
#ifdef _WIN32
///////////////////////////////////////////////////////////////////////
// Analyze DOS stub
unsigned char const *const ublob =
reinterpret_cast<unsigned char const *const>(blob);
if (ublob[0] != 0x4d || ublob[1] != 0x5a) {
throw std::runtime_error("Blob is not a Windows DLL.");
}
uint32_t pe_header_offset = extract32(ublob + 0x3c);
///////////////////////////////////////////////////////////////////////
// Ananlyze PE-header
unsigned char const *const pe_header = ublob + pe_header_offset;
// signature
uint32_t pe_signature = extract32(pe_header + 0);
if (pe_signature != 0x00004550) {
throw std::runtime_error("PE-header signature is not 0x00004550");
}
// machine
uint16_t machine = extract16(pe_header + 4);
if (machine != 0x8664) {
throw std::runtime_error("Only DLLs for x64 supported");
}
// number of sections
uint16_t number_of_sections = extract16(pe_header + 6);
// sizeof optional header
uint16_t sizeof_optional_header = extract16(pe_header + 20);
// magic
uint16_t magic = extract16(pe_header + 24);
if (magic != 0x10b && magic != 0x20b) {
throw std::runtime_error("MAGIC is not 0x010b or 0x020b");
}
///////////////////////////////////////////////////////////////////////
// Analyze tail of optional header
constexpr int coff_header_size = 24;
unsigned char const *const tail_of_optional_header =
pe_header + coff_header_size + sizeof_optional_header;
if (extract64(tail_of_optional_header - 8) != 0) {
throw std::runtime_error("Optional header not zero-padded");
}
///////////////////////////////////////////////////////////////////////
// Analyze last section header
constexpr int section_header_size = 40;
unsigned char const *const last_section_header =
tail_of_optional_header + section_header_size * (number_of_sections - 1);
uint32_t sizeof_raw_data = extract32(last_section_header + 16);
uint32_t pointer_to_raw_data = extract32(last_section_header + 20);
return sizeof_raw_data + pointer_to_raw_data;
#else
if (blob[0] != 0x7F || blob[1] != 'E' || blob[2] != 'L' || blob[3] != 'F')
throw std::runtime_error("Blob is not in ELF format");
if (blob[4] != 0x02)
throw std::runtime_error("Only 64-bit headers are supported");
if (blob[5] != 0x01)
throw std::runtime_error("Only little-endian headers are supported");
unsigned char const *const ublob =
reinterpret_cast<unsigned char const *const>(blob);
uint64_t e_shoff = extract64(ublob + 0x28);
uint16_t e_shentsize = extract16(ublob + 0x3A);
uint16_t e_shnum = extract16(ublob + 0x3C);
return e_shoff + (e_shentsize * e_shnum);
#endif
}
#ifdef _WIN32
class path_lib_record {
public:
void operator=(const path_lib_record &) = delete;
~path_lib_record() {
for (auto entry : lib_to_path) {
FreeLibrary(static_cast<HMODULE>(entry.first));
fs::permissions(entry.second, fs::perms::owner_all);
fs::remove_all(entry.second.remove_filename());
}
}
static void record_lib_path(fs::path path, void *library) {
lib_to_path[library] = path;
}
static void remove_lib(void *library) {
auto path = lib_to_path[library];
std::error_code ec;
FreeLibrary(static_cast<HMODULE>(library));
fs::permissions(path, fs::perms::owner_all);
if (fs::remove_all(path.remove_filename(), ec) != 2 || ec)
// one directory and one temporary file should have been deleted
throw std::runtime_error("Directory delete failed");
lib_to_path.erase(library);
}
private:
static inline std::unordered_map<void *, fs::path> lib_to_path;
};
#endif
} // namespace detail
class kernel_library {
public:
kernel_library() : ptr{nullptr} {}
kernel_library(void *ptr) : ptr{ptr} {}
operator void *() const { return ptr; }
private:
void *ptr;
#ifdef _WIN32
static inline detail::path_lib_record single_instance_to_trigger_destructor;
#endif
};
namespace detail {
static inline kernel_library load_dl_from_data(char const *const data,
size_t size) {
fs::path filename = write_data_to_file(data, size);
#ifdef _WIN32
void *so = LoadLibraryW(filename.wstring().c_str());
#else
void *so = dlopen(filename.c_str(), RTLD_LAZY);
#endif
if (so == nullptr)
throw std::runtime_error("Failed to load kernel library");
#ifdef _WIN32
detail::path_lib_record::record_lib_path(filename, so);
#else
std::error_code ec;
// Windows DLL cannot be deleted while in use
if (fs::remove_all(filename.remove_filename(), ec) != 2 || ec)
// one directory and one temporary file should have been deleted
throw std::runtime_error("Directory delete failed");
#endif
return so;
}
} // namespace detail
/// Load kernel library and return a handle to use the library.
/// \param [in] name The name of the library.
static inline kernel_library load_kernel_library(const std::string &name) {
std::ifstream ifs;
ifs.open(name, std::ios::in | std::ios::binary);
std::stringstream buffer;
buffer << ifs.rdbuf();
const std::string buffer_string = buffer.str();
return detail::load_dl_from_data(buffer_string.c_str(), buffer_string.size());
}
/// Load kernel library whose image is alreay in memory and return a handle to
/// use the library.
/// \param [in] image A pointer to the image in memory.
static inline kernel_library load_kernel_library_mem(char const *const image) {
const size_t size = detail::get_lib_size(image);
return detail::load_dl_from_data(image, size);
}
/// Unload kernel library.
/// \param [in,out] library Handle to the library to be closed.
static inline void unload_kernel_library(const kernel_library &library) {
#ifdef _WIN32
detail::path_lib_record::remove_lib(library);
#else
dlclose(library);
#endif
}
class kernel_function {
public:
kernel_function() : ptr{nullptr} {}
kernel_function(dpct::kernel_functor ptr) : ptr{ptr} {}
operator void *() const { return ((void *)ptr); }
void operator()(sycl::queue &q, const sycl::nd_range<3> &range,
unsigned int a, void **args, void **extra) {
ptr(q, range, a, args, extra);
}
private:
dpct::kernel_functor ptr;
};
/// Find kernel function in a kernel library and return its address.
/// \param [in] library Handle to the kernel library.
/// \param [in] name Name of the kernel function.
static inline dpct::kernel_function
get_kernel_function(kernel_library &library, const std::string &name) {
#ifdef _WIN32
dpct::kernel_functor fn = reinterpret_cast<dpct::kernel_functor>(
GetProcAddress(static_cast<HMODULE>(static_cast<void *>(library)),
(name + std::string("_wrapper")).c_str()));
#else
dpct::kernel_functor fn = reinterpret_cast<dpct::kernel_functor>(
dlsym(library, (name + std::string("_wrapper")).c_str()));
#endif
if (fn == nullptr)
throw std::runtime_error("Failed to get function");
return fn;
}
/// Invoke a kernel function.
/// \param [in] function kernel function.
/// \param [in] queue SYCL queue used to execute kernel
/// \param [in] groupRange SYCL group range
/// \param [in] localRange SYCL local range
/// \param [in] localMemSize The size of local memory required by the kernel
/// function.
/// \param [in] kernelParams Array of pointers to kernel arguments.
/// \param [in] extra Extra arguments.
static inline void invoke_kernel_function(dpct::kernel_function &function,
sycl::queue &queue,
sycl::range<3> groupRange,
sycl::range<3> localRange,
unsigned int localMemSize,
void **kernelParams, void **extra) {
function(queue, sycl::nd_range<3>(groupRange * localRange, localRange),
localMemSize, kernelParams, extra);
}
/// Find image wrapper in a kernel library and return its address.
/// \param [in] library Handle to the kernel library.
/// \param [in] name Name of the target image wrapper.
static inline dpct::image_wrapper_base_p
get_image_wrapper(dpct::kernel_library &library, const std::string &name) {
#ifdef _WIN32
dpct::image_wrapper_base_p fn =
reinterpret_cast<dpct::image_wrapper_base_p>(GetProcAddress(
static_cast<HMODULE>(static_cast<void *>(library)), name.c_str()));
#else
dpct::image_wrapper_base_p fn = reinterpret_cast<dpct::image_wrapper_base_p>(
dlsym(library, name.c_str()));
#endif
if (fn == nullptr)
throw std::runtime_error("Failed to get image");
return fn;
}
} // namespace dpct
#endif // __DPCT_KERNEL_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/dpct.hpp | //==---- dpct.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_HPP__
#define __DPCT_HPP__
#include <sycl/sycl.hpp>
#include <iostream>
#include <limits.h>
#include <math.h>
template <class... Args> class dpct_kernel_name;
template <int Arg> class dpct_kernel_scalar;
#include "atomic.hpp"
#include "device.hpp"
#include "image.hpp"
#include "kernel.hpp"
#include "math.hpp"
#include "memory.hpp"
#include "util.hpp"
#if defined(_MSC_VER)
#define __dpct_align__(n) __declspec(align(n))
#define __dpct_inline__ __forceinline
#else
#define __dpct_align__(n) __attribute__((aligned(n)))
#define __dpct_inline__ __inline__ __attribute__((always_inline))
#endif
#if defined(_MSC_VER)
#define __dpct_noinline__ __declspec(noinline)
#else
#define __dpct_noinline__ __attribute__((noinline))
#endif
#define DPCT_COMPATIBILITY_TEMP (600)
namespace dpct{
enum error_code { success = 0, default_error = 999 };
}
#define DPCT_CHECK_ERROR(expr) \
[&]() { \
try { \
expr; \
return dpct::success; \
} catch (std::exception const &e) { \
std::cerr << e.what() << std::endl; \
return dpct::default_error; \
} \
}()
#define DPCT_PI_F (3.14159274101257f)
#define DPCT_PI (3.141592653589793115998)
#endif // __DPCT_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/dnnl_utils.hpp | //==---- dnnl_utils.hpp ---------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_DNNL_UTILS_HPP__
#define __DPCT_DNNL_UTILS_HPP__
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/numeric>
#include <oneapi/mkl.hpp>
#include <oneapi/mkl/rng/device.hpp>
#include <sycl/sycl.hpp>
#include <oneapi/dnnl/dnnl.hpp>
#include <oneapi/dnnl/dnnl_sycl.hpp>
#include <unordered_map>
#include <algorithm>
#include <list>
#include "memory.hpp"
#include "device.hpp"
#include "lib_common_utils.hpp"
namespace dpct {
namespace dnnl {
/// Get concatenated library version as an integer.
static inline size_t get_version() {
const ::dnnl::version_t *ver = ::dnnl::version();
return ver->major * 1000 + ver->minor * 100 + ver->patch;
}
class engine_ext;
typedef oneapi::mkl::rng::philox4x32x10 rng_engine_t;
/// An enum class representing memory layout. Used by
/// memory_desc_ext to create a memory with pre-defined layout.
enum class memory_format_tag { nchw, nhwc, nchw_blocked };
/// An enum class representing RNN data memory layout. Used by
/// memory_desc_ext to create a memory with pre-defined layout.
enum class rnn_memory_format_tag { tnc, ntc };
/// A class holding the description of an N-dimensions memory.
class memory_desc_ext {
::dnnl::memory::desc _desc;
public:
/// Convert dpct::library_data_t to dnnl::memory::data_type.
static ::dnnl::memory::data_type to_dnnl_data_type(dpct::library_data_t dt);
/// Convert dnnl::memory::data_type to dpct::library_data_t.
static dpct::library_data_t
to_dpct_library_data_t(::dnnl::memory::data_type dt, unsigned block_size);
/// Convert dpct::dnnl::memory_format_tag to dnnl::memory::format_tag.
static ::dnnl::memory::format_tag to_dnnl_format_tag(dpct::library_data_t dt,
memory_format_tag tag);
memory_desc_ext() = default;
memory_desc_ext(::dnnl::memory::desc &desc) : _desc(desc) {}
memory_desc_ext(::dnnl::memory::desc &&desc) : _desc(std::move(desc)) {}
/// Setting a 4D memory with given parameters.
/// \param [in] tag Format tag.
/// \param [in] dt Data type.
/// \param [in] n Number of images.
/// \param [in] c Number of channels.
/// \param [in] h Height of images.
/// \param [in] w Width of images.
void set(memory_format_tag tag, dpct::library_data_t dt, int n, int c, int h,
int w);
/// Setting a 3D RNN data memory with given parameters.
/// \param [in] tag RNN data format tag.
/// \param [in] dt Data type.
/// \param [in] t Number of sequence length.
/// \param [in] n Number of batch.
/// \param [in] c Height of input channel.
void set(rnn_memory_format_tag tag, dpct::library_data_t dt, int t, int n, int c);
/// Setting a 4D memory with given parameters.
/// \param [in] dt Data type.
/// \param [in] n Number of images.
/// \param [in] c Number of channels.
/// \param [in] h Height of images.
/// \param [in] w Width of images.
/// \param [in] n_stride Stride between two continuous images.
/// \param [in] c_stride Stride between two continuous channels.
/// \param [in] h_stride Stride between two continuous rows.
/// \param [in] w_stride Stride between two continuous columns.
void set(dpct::library_data_t dt, int n, int c, int h, int w, int n_stride,
int c_stride, int h_stride, int w_stride);
/// Setting a ND memory with given parameters.
/// \param [in] dt Data type.
/// \param [in] ndims Dimension of the memory.
/// \param [in] dims Array of dimension ndims that contain the size of each
/// memory dimension. \param [in] strides Array of dimension ndims that
/// contain the stride of each memory dimension.
void set(dpct::library_data_t dt, int ndims, const int dims[],
const int strides[]);
/// Setting a ND memory with given parameters.
/// \param [in] tag Format tag.
/// \param [in] dt Data type.
/// \param [in] ndims Dimension of the memory.
/// \param [in] dims Array of dimension ndims that contain the size of each
/// memory dimension.
void set(memory_format_tag tag, dpct::library_data_t dt, int ndims,
const int dims[]);
/// Getting a ::dnnl::memory::desc from a memory_desc_ext.
/// \returns The ::dnnl::memory::desc.
const ::dnnl::memory::desc &get_desc() const { return _desc; }
/// Setting holding desc with given dnnl memory descriptor.
void set_desc(::dnnl::memory::desc desc) { _desc = desc; }
/// Getting a size of a memory_desc_ext in bytes.
/// \returns The size.
size_t get_size() const { return _desc.get_size(); }
/// Getting parameters from a 4D memory.
/// \param [out] dt Data type.
/// \param [out] n Number of images.
/// \param [out] c Number of channels.
/// \param [out] h Height of images.
/// \param [out] w Width of images.
/// \param [out] n_stride Stride between two continuous images.
/// \param [out] c_stride Stride between two continuous channels.
/// \param [out] h_stride Stride between two continuous rows.
/// \param [out] w_stride Stride between two continuous columns.
void get(dpct::library_data_t *dt, int *n, int *c, int *h, int *w,
int *n_stride, int *c_stride, int *h_stride, int *w_stride) const;
/// Getting parameters from a 4D memory.
/// \param [out] dt Data type.
/// \param [out] tag Format tag.
/// \param [out] n Number of images.
/// \param [out] c Number of channels.
/// \param [out] h Height of images.
/// \param [out] w Width of images.
void get(dpct::library_data_t *dt, memory_format_tag *tag, int *n, int *c,
int *h, int *w) const;
/// Getting parameters from a 3D RNN data memory.
/// \param [out] dt Data type.
/// \param [out] tag RNN data format tag.
/// \param [out] t Number of sequence length.
/// \param [out] n Number of batch.
/// \param [out] c Height of input channel.
void get(dpct::library_data_t *dt, rnn_memory_format_tag *tag, int *t, int *n,
int *c) const;
/// Getting parameters from a ND memory.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given memory descriptor.
/// \param [out] dt Data type.
/// \param [out] ndims Dimension of the memory.
/// \param [out] dims Array of dimension requested_ndims that contain the
/// size of each memory dimension.
/// \param [out] strides Array of dimension requested_ndims that contain the
/// stride of each memory dimension.
void get(int requested_ndims, dpct::library_data_t *dt, int *ndims,
int dims[], int strides[]) const;
/// Getting parameters from a ND memory.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given memory descriptor.
/// \param [out] dt Data type.
/// \param [out] tag Format tag.
/// \param [out] ndims Dimension of the memory.
/// \param [out] dims Array of dimension requested_ndims that contain the
/// size of each memory dimension.
void get(int requested_ndims, dpct::library_data_t *dt,
memory_format_tag *tag, int *ndims, int dims[]) const;
/// Getting dims from a ND memory.
/// \return The dims.
std::vector<int64_t> get_dims() const { return _desc.get_dims(); }
/// Getting strides from a ND memory.
/// \return The strides.
std::vector<int64_t> get_strides() const {
return _desc.get_strides();
}
/// Getting element num from a ND memory.
/// \return The element number.
size_t get_element_num() const {
auto dims = _desc.get_dims();
if (dims.empty()) {
return 0;
}
size_t result = 1;
for (auto &dim : dims) {
result *= dim;
}
return result;
}
operator bool() const {
return bool(_desc);
}
memory_desc_ext &operator=(std::nullptr_t) {
_desc.reset(nullptr);
return *this;
}
};
/// A class holding description for an activation operation.
class activation_desc {
::dnnl::algorithm _alg;
float _alpha;
float _beta;
public:
/// Setting an activation descriptor with given parameters.
/// \param [in] alg Activation algorithm.
/// \param [in] alpha Value of alpha parameter.
void set(::dnnl::algorithm alg, float alpha) {
_alg = alg;
if(alg == ::dnnl::algorithm::eltwise_clip) {
_alpha = 0;
_beta = alpha;
} else {
_alpha = alpha;
}
}
/// Getting parameters form an activation descriptor.
/// \param [out] alg Activation algorithm.
/// \param [out] alpha Value of alpha parameter.
void get(::dnnl::algorithm *alg, float *alpha) const {
*alg = _alg;
if(_alg == ::dnnl::algorithm::eltwise_clip) {
*alpha = _beta;
} else {
*alpha = _alpha;
}
}
/// Setting the alpha parameter of an activation descriptor.
/// \param [in] alpha Value of alpha parameter.
void set_alpha(float alpha) { _alpha = alpha; }
/// Setting the beta parameter of an activation descriptor.
/// \param [in] beta Value of beta parameter.
void set_beta(float beta) { _beta = beta; }
/// Setting the algorithm parameter of an activation descriptor.
/// \param [in] alg Activation algorithm.
void set_algorithm(::dnnl::algorithm alg) { _alg = alg; }
/// Getting the alpha parameter from an activation descriptor.
/// \param [out] alpha Value of alpha parameter.
float get_alpha() const { return _alpha; }
/// Getting the beta parameter from an activation descriptor.
/// \param [out] beta Value of beta parameter.
float get_beta() const { return _beta; }
/// Getting the algorithm parameter from an activation descriptor.
/// \param [out] alg Activation algorithm.
::dnnl::algorithm get_algorithm() const { return _alg; }
};
/// A class holding description for a local response normalization operation.
class lrn_desc {
unsigned int _local_size;
float _alpha;
float _beta;
float _k;
public:
/// Setting a local response normalization descriptor with given parameters.
/// \param [in] local_size Value of local_size parameter.
/// \param [in] alpha Value of alpha parameter.
/// \param [in] beta Value of beta parameter.
/// \param [in] k Value of k parameter.
void set(unsigned int local_size, float alpha, float beta, float k) {
_local_size = local_size;
_alpha = alpha;
_beta = beta;
_k = k;
}
/// Getting parameters form a local response normalization descriptor.
/// \param [out] local_size Value of local_size parameter.
/// \param [out] alpha Value of alpha parameter.
/// \param [out] beta Value of beta parameter.
/// \param [out] k Value of k parameter.
void get(unsigned int *local_size, float *alpha, float *beta,
float *k) const {
*local_size = _local_size;
*alpha = _alpha;
*beta = _beta;
*k = _k;
}
/// Setting the local size parameter of a local response normalization
/// descriptor.
/// \param [in] local_size Value of local_size parameter.
void set_local_size(unsigned int local_size) { _local_size = local_size; }
/// Setting the alpha parameter of a local response normalization descriptor.
/// \param [in] alpha Value of alpha parameter.
void set_alpha(float alpha) { _alpha = alpha; }
/// Setting the beta parameter of a local response normalization descriptor.
/// \param [in] beta Value of beta parameter.
void set_beta(float beta) { _beta = beta; }
/// Setting the k parameter of a local response normalization descriptor.
/// \param [in] k Value of k parameter.
void set_k(float k) { _k = k; }
/// Getting the local size parameter from a local response normalization
/// descriptor.
/// \param [out] local_size Value of local_size parameter.
unsigned int get_local_size() const { return _local_size; }
/// Getting the alpha parameter from a local response normalization
/// descriptor.
/// \param [out] alpha Value of alpha parameter.
float get_alpha() const { return _alpha; }
/// Getting the beta parameter from a local response normalization descriptor.
/// \param [out] beta Value of beta parameter.
float get_beta() const { return _beta; }
/// Getting the k parameter from a local response normalization descriptor.
/// \param [out] k Value of k parameter.
float get_k() const { return _k; }
};
/// An enum class representing softmax algorithm.
enum class softmax_algorithm { normal, log };
/// An enum class representing softmax mode.
enum class softmax_mode { instance, channel };
/// A class holding description for a pooling operation.
class pooling_desc {
::dnnl::algorithm _alg;
std::vector<int64_t> _stride;
std::vector<int64_t> _kernel;
std::vector<int64_t> _padding;
public:
/// Setting a 2D pooling descriptor with given parameters.
/// \param [in] alg Pooling algorithm.
/// \param [in] kernel_h Value of height of kernel.
/// \param [in] kernel_w Value of width of kernel.
/// \param [in] padding_h Value of height of padding.
/// \param [in] padding_w Value of width of padding.
/// \param [in] stride_h Value of height of stride.
/// \param [in] stride_w Value of width of stride.
void set(::dnnl::algorithm alg, int kernel_h, int kernel_w, int padding_h,
int padding_w, int stride_h, int stride_w) {
_alg = alg;
_stride = {stride_h, stride_w};
_kernel = {kernel_h, kernel_w};
_padding = {padding_h, padding_w};
}
/// Setting a ND pooling descriptor with given parameters.
/// \param [in] alg Pooling algorithm.
/// \param [in] ndims Dimension of the pooling operation.
/// \param [in] kernel Array of dimension ndims containing the kernel size of
/// each dimension.
/// \param [in] padding Array of dimension ndims containing the padding size of
/// each dimension.
/// \param [in] stride Array of dimension ndims containing the stride size of
/// each dimension.
void set(::dnnl::algorithm alg, int ndims, int kernel[], int padding[],
int stride[]) {
_alg = alg;
_stride = std::vector<int64_t>(stride, stride + ndims);
_kernel = std::vector<int64_t>(kernel, kernel + ndims);
_padding = std::vector<int64_t>(padding, padding + ndims);
}
/// Getting parameters from a 2D pooling descriptor.
/// \param [out] alg Pooling algorithm.
/// \param [out] kernel_h Value of height of kernel.
/// \param [out] kernel_w Value of width of kernel.
/// \param [out] padding_h Value of height of padding.
/// \param [out] padding_w Value of width of padding.
/// \param [out] stride_h Value of height of stride.
/// \param [out] stride_w Value of width of stride.
void get(::dnnl::algorithm *alg, int *kernel_h, int *kernel_w, int *padding_h,
int *padding_w, int *stride_h, int *stride_w) const {
*alg = _alg;
*kernel_h = _kernel[0];
*kernel_w = _kernel[1];
*padding_h = _padding[0];
*padding_w = _padding[1];
*stride_h = _stride[0];
*stride_w = _stride[1];
}
/// Getting parameters from a ND pooling descriptor.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given pooling descriptor.
/// \param [out] alg Pooling algorithm.
/// \param [out] ndims Dimension of the pooling operation.
/// \param [out] kernel Array of dimension ndims containing the kernel size of
/// each dimension.
/// \param [out] padding Array of dimension ndims containing the padding size
/// of each dimension.
/// \param [out] stride Array of dimension ndims containing the stride size of
/// each dimension.
void get(int requested_ndims, ::dnnl::algorithm *alg, int *ndims,
int kernel[], int padding[], int stride[]) const {
*alg = _alg;
*ndims = _stride.size();
for (int i = 0; i < requested_ndims; i++) {
kernel[i] = _kernel[i];
padding[i] = _padding[i];
stride[i] = _stride[i];
}
}
/// Setting the algorithm parameter of a pooling descriptor.
/// \param [in] alg Pooling algorithm.
void set_algorithm(::dnnl::algorithm alg) { _alg = alg; }
/// Setting the stride parameter of a pooling descriptor.
/// \param [in] stride Array of dimension ndims containing the stride size of
/// each dimension.
void set_stride(const std::vector<int64_t> &stride) { _stride = stride; }
/// Setting the kernel parameter of a pooling descriptor.
/// \param [in] kernel Array of dimension ndims containing the kernel size of
/// each dimension.
void set_kernel(const std::vector<int64_t> &kernel) { _kernel = kernel; }
/// Setting the padding parameter of a pooling descriptor.
/// \param [in] padding Array of dimension ndims containing the padding size
/// of each dimension.
void set_padding(const std::vector<int64_t> &padding) { _padding = padding; }
/// Getting the algorithm parameter from a pooling descriptor.
/// \param [out] alg Pooling algorithm.
::dnnl::algorithm get_algorithm() const { return _alg; }
/// Getting the stride parameter from a pooling descriptor.
/// \returns Array of dimension ndims containing the stride size of each
/// dimension.
const std::vector<int64_t> &get_stride() const { return _stride; }
/// Getting the kernel parameter from a pooling descriptor.
/// \returns Array of dimension ndims containing the kernel size of each
/// dimension.
const std::vector<int64_t> &get_kernel() const { return _kernel; }
/// Getting the padding parameter from a pooling descriptor.
/// \returns Array of dimension ndims containing the padding size of each
/// dimension.
const std::vector<int64_t> &get_padding() const { return _padding; }
/// Getting the output dimensions of a memory after 2D pooling has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [out] out_n Number of images.
/// \param [out] out_c Number of channels.
/// \param [out] out_h Height of images.
/// \param [out] out_w Width of images.
void get_forward_output_dim(const memory_desc_ext &desc, int *out_n,
int *out_c, int *out_h, int *out_w) const {
auto dims = desc.get_dims();
*out_n = dims[0];
*out_c = dims[1];
*out_h = 1 + (dims[2] + 2 * _padding[0] - _kernel[0]) / _stride[0];
*out_w = 1 + (dims[3] + 2 * _padding[1] - _kernel[1]) / _stride[1];
}
/// Getting the output dimensions of a memory after ND pooling has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [out] ndims Dimension of the memory.
/// \param [out] out_dims Array of dimension requested_ndims that contain
/// the size of each memory dimension.
void get_forward_output_dim(const memory_desc_ext &desc, int ndims,
int out_dims[]) const {
assert(ndims >= 4 && "ndims is at least 4.");
auto dims = desc.get_dims();
out_dims[0] = dims[0];
out_dims[1] = dims[1];
for (int i = 2; i < ndims; i++) {
out_dims[i] =
1 + (dims[i] + 2 * _padding[i - 2] - _kernel[i - 2]) / _stride[i - 2];
}
}
};
/// An enum class representing reduction operations.
enum class reduction_op {
max,
min,
sum,
mul,
mean,
amax,
mul_no_zeros,
norm1,
norm2
};
/// An enum class representing batch normalization mode.
enum class batch_normalization_mode { per_activation, spatial };
/// An enum class representing batch normalization operations.
enum class batch_normalization_ops { none, activation, add_activation };
/// An enum class representing binary operations.
enum class binary_op { add, sub, mul, div, min, max, sqrt, neg };
/// An struct representing convolution algorithm infomation.
struct convolution_algorithm_info {
::dnnl::algorithm algo = ::dnnl::algorithm::convolution_auto;
int status = 0;
};
/// A class holding description for a convolution operation.
class convolution_desc {
std::vector<int64_t> _strides;
std::vector<int64_t> _dilates;
std::vector<int64_t> _paddings;
int _group_count = 1;
::dnnl::fpmath_mode _math_mode = ::dnnl::fpmath_mode::strict;
public:
/// Setting a group count to be used in the convolution.
/// \param [in] group_count Value of group count.
void set_group_count(int group_count) { _group_count = group_count; }
/// Getting a group count specified in the given convolution descriptor.
/// \returns Value of group count.
int get_group_count() { return _group_count; }
/// Setting floating point math mode to be used in the convolution.
/// \param [in] math_mode Value of math_mode.
void set_math_mode(::dnnl::fpmath_mode math_mode) { _math_mode = math_mode; }
/// Getting floating point math mode specified in the given convolution descriptor.
/// \returns Value of math mode.
::dnnl::fpmath_mode get_math_mode() { return _math_mode; }
/// Setting a 2D convolution descriptor with given parameters.
/// \param [in] padding_h Value of height of padding.
/// \param [in] padding_w Value of width of padding.
/// \param [in] stride_h Value of height of stride.
/// \param [in] stride_w Value of width of stride.
/// \param [in] dilate_h Value of height of dilate.
/// \param [in] dilate_w Value of width of dilate.
void set(int padding_h, int padding_w, int stride_h, int stride_w,
int dilate_h, int dilate_w) {
_strides = {stride_h, stride_w};
_dilates = {dilate_h - 1, dilate_w - 1};
_paddings = {padding_h, padding_w};
}
/// Setting a ND convolution descriptor with given parameters.
/// \param [in] ndims Dimension of the convolution operation.
/// \param [in] paddings Array of dimension ndims containing the padding size of
/// each dimension.
/// \param [in] strides Array of dimension ndims containing the stride size of
/// each dimension.
/// \param [in] dilates Array of dimension ndims containing the kernel size of
/// each dimension.
void set(int ndims, int paddings[], int strides[], int dilates[]) {
_strides = std::vector<int64_t>(strides, strides + ndims);
_paddings = std::vector<int64_t>(paddings, paddings + ndims);
_dilates = std::vector<int64_t>(dilates, dilates + ndims);
for (auto &dilate : _dilates) {
dilate--;
}
}
/// Getting parameters from a 2D convolution descriptor.
/// \param [out] padding_h Value of height of padding.
/// \param [out] padding_w Value of width of padding.
/// \param [out] stride_h Value of height of stride.
/// \param [out] stride_w Value of width of stride.
/// \param [out] dilate_h Value of height of dilate.
/// \param [out] dilate_w Value of width of dilate.
void get(int *padding_h, int *padding_w, int *stride_h, int *stride_w,
int *dilate_h, int *dilate_w) const {
*dilate_h = _dilates[0];
*dilate_w = _dilates[1];
*padding_h = _paddings[0];
*padding_w = _paddings[1];
*stride_h = _strides[0];
*stride_w = _strides[1];
}
/// Getting parameters from a ND convolution descriptor.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given convolution descriptor.
/// \param [out] ndims Dimension of the pooling operation.
/// \param [out] paddings Array of dimension ndims containing the padding size
/// of each dimension.
/// \param [out] strides Array of dimension ndims containing the stride size of
/// each dimension.
/// \param [out] dilates Array of dimension ndims containing the dilate size of
/// each dimension.
void get(int requested_ndims, int *ndims, int paddings[], int strides[],
int dilates[]) const {
*ndims = _strides.size();
for (int i = 0; i < requested_ndims; i++) {
dilates[i] = _dilates[i];
paddings[i] = _paddings[i];
strides[i] = _strides[i];
}
}
/// Getting the stride parameter from a convolution descriptor.
/// \returns Array of dimension ndims containing the stride size of each
/// dimension.
const std::vector<int64_t> &get_stride() const { return _strides; }
/// Getting the kernel parameter from a convolution descriptor.
/// \returns Array of dimension ndims containing the dilate size of each
/// dimension.
const std::vector<int64_t> &get_dilate() const { return _dilates; }
/// Getting the padding parameter from a convolution descriptor.
/// \returns Array of dimension ndims containing the padding size of each
/// dimension.
const std::vector<int64_t> &get_padding() const { return _paddings; }
/// Getting the output dimensions of a memory after 2D convolution has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [in] weight_desc Input weight memory descriptor.
/// \param [out] out_n Number of images.
/// \param [out] out_c Number of channels.
/// \param [out] out_h Height of images.
/// \param [out] out_w Width of images.
void get_forward_output_dim(const memory_desc_ext &desc,
const memory_desc_ext &weight_desc, int *out_n,
int *out_c, int *out_h, int *out_w) const {
auto dims = desc.get_dims();
auto weight_dims = weight_desc.get_dims();
*out_n = dims[0];
*out_c = weight_dims[0];
*out_h = 1 + (dims[2] + 2 * _paddings[0] -
(1 + (_dilates[0] * (weight_dims[2] - 1)))) /
_strides[0];
*out_w = 1 + (dims[3] + 2 * _paddings[1] -
(1 + (_dilates[1] * (weight_dims[3] - 1)))) /
_strides[1];
}
/// Getting the output dimensions of a memory after ND convolution has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [in] weight_desc Input weight memory descriptor.
/// \param [out] ndims Dimension of the memory.
/// \param [out] out_dims Array of dimension requested_ndims that contain
/// the size of each memory dimension.
void get_forward_output_dim(const memory_desc_ext &desc,
const memory_desc_ext &weight_desc, int ndims,
int out_dims[]) const {
assert(ndims >= 4 && "ndims is at least 4.");
auto dims = desc.get_dims();
auto weight_dims = weight_desc.get_dims();
out_dims[0] = dims[0];
out_dims[1] = weight_dims[1];
for (int i = 2; i < ndims; i++) {
out_dims[i] = 1 + (dims[i] + 2 * _paddings[i - 2] -
(1 + (_dilates[i - 2] * (weight_dims[i] - 1)))) /
_strides[i - 2];
}
}
convolution_desc &operator=(std::nullptr_t) {
return *this = convolution_desc();
}
operator bool() const {
return !(_strides.size() == 0
&& _dilates.size() == 0
&& _paddings.size() == 0);
}
};
/// An enum class representing rnn mode.
enum class rnn_mode { vanilla_relu, vanilla_tanh, lstm, gru };
/// An enum class representing rnn bias mode.
enum class rnn_bias_mode { none, single };
/// An enum class representing rnn direction.
enum class rnn_direction {unidirectional, bidirectional};
/// A class holding description for a RNN operation.
class rnn_desc {
rnn_mode _mode;
rnn_bias_mode _bias_mode;
rnn_direction _direction;
dpct::library_data_t _dt;
int _input_size;
int _hidden_size;
int _projection_size;
int _layer_size;
public:
void set(rnn_mode mode, rnn_bias_mode bias_mode, rnn_direction direction,
dpct::library_data_t dt, int input_size, int hidden_size,
int projection_size, int layer_size) {
_mode = mode;
_bias_mode = bias_mode;
_direction = direction;
_input_size = input_size;
_hidden_size = hidden_size;
_projection_size = projection_size;
_layer_size = layer_size;
_dt = dt;
}
void get(rnn_mode *mode, rnn_bias_mode *bias_mode, rnn_direction *direction,
dpct::library_data_t *dt, int *input_size, int *hidden_size,
int *projection_size, int *layer_size) const {
*mode = _mode;
*bias_mode = _bias_mode;
*direction = _direction;
*input_size = _input_size;
*hidden_size = _hidden_size;
*projection_size = _projection_size;
*layer_size = _layer_size;
*dt = _dt;
}
};
/// A class holding description for a Dropout operation.
class dropout_desc {
struct dropout_desc_imp {
float _p = 0.5f;
unsigned long long _seed = 1;
void *_state = nullptr;
std::vector<std::uint8_t> _host_state;
rng_engine_t _rng_engine;
dropout_desc_imp() : _rng_engine(dpct::get_default_queue(), 1) {}
};
std::shared_ptr<dropout_desc_imp> _imp;
void generate(sycl::queue *q, std::int64_t required_state_size,
std::int64_t num, void *buffer) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
sycl::event e_gen = oneapi::mkl::rng::generate(
oneapi::mkl::rng::bernoulli<std::int32_t>(1.f - _imp->_p),
_imp->_rng_engine, num, (std::int32_t *)buffer);
sycl::event e_save = q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e_gen);
cgh.host_task([=] {
oneapi::mkl::rng::save_state(_imp->_rng_engine,
_imp->_host_state.data());
});
});
q->memcpy(_imp->_state, _imp->_host_state.data(), required_state_size,
e_save);
#endif
}
public:
operator bool() const {
return bool(_imp);
}
dropout_desc &operator=(std::nullptr_t) {
_imp.reset();
return *this;
}
/// Initializing a dropout descriptor.
void init(){
_imp = std::make_shared<dropout_desc_imp>();
}
/// Setting a dropout descriptor with given parameters.
/// \param [in] engine Engine of the dropout operation.
/// \param [in] p Probability of value set to zero.
/// \param [in] state Memory that store random generator state.
/// \param [in] state_size Required size to store random generator state.
/// \param [in] seed Seed to initialize conditions of the generator state.
void set(engine_ext &engine, float p, void *state, size_t state_size,
unsigned long long seed);
/// Getting parameters from a dropout descriptor.
/// \param [in] engine Engine of the dropout operation.
/// \param [in] p Probability of value set to zero.
/// \param [in] state Memory that store random generator state.
/// \param [in] seed Seed to initialize conditions of the generator state.
void get(float *p, void **states, unsigned long long *seed) const noexcept {
*seed = _imp->_seed;
*states = _imp->_state;
*p = _imp->_p;
}
/// Getting the probability of value set to zero.
/// \returns Probability.
float get_probability() const noexcept { return _imp->_p; }
/// Restoreing a dropout descriptor from stored state.
/// \param [in] engine Engine of the dropout operation.
/// \param [in] p Probability of value set to zero.
/// \param [in] state Memory that store random generator state.
/// \param [in] state_size Required size to store random generator state.
/// \param [in] seed Seed to initialize conditions of the generator state.
void restore(engine_ext &engine, float p, void *state, size_t state_size,
unsigned long long seed);
friend class engine_ext;
};
namespace detail {
typedef std::string primitive_cache_key_type;
typedef std::list<primitive_cache_key_type> usage_list_type;
typedef struct {
::dnnl::primitive *primitive;
usage_list_type::iterator usage_it;
std::function<void(::dnnl::primitive *)> destructor;
sycl::event e;
} primitive_cache_value_type;
typedef std::unordered_map<primitive_cache_key_type, primitive_cache_value_type>
cache_map_type;
// The primitive cache uses LRU replacement policy, and the default cache
// capacity is 1024.
class primitive_cache {
int _capacity = 1024;
usage_list_type usage;
cache_map_type cache_map;
void touch(cache_map_type::iterator it, sycl::event e = {},
bool update_event = false) {
if (it->second.usage_it != usage.begin()) {
const primitive_cache_key_type &key = it->first;
usage.erase(it->second.usage_it);
usage.push_front(key);
it->second.usage_it = usage.begin();
}
if (update_event) {
it->second.e = e;
}
}
void async_destruct_primitive(const primitive_cache_value_type &value) {
dpct::get_current_device().default_queue().submit([&](sycl::handler &cgh) {
cgh.depends_on(value.e);
cgh.host_task([=] { value.destructor(value.primitive); });
});
}
public:
::dnnl::primitive *get(const primitive_cache_key_type &key) {
auto it = cache_map.find(key);
if (it == cache_map.end()) {
return nullptr;
}
touch(it);
return it->second.primitive;
}
void put(const primitive_cache_key_type &key, ::dnnl::primitive *value,
std::function<void(::dnnl::primitive *)> destructor, sycl::event e) {
auto it = cache_map.find(key);
if (it != cache_map.end()) {
touch(it, e, true);
} else {
if (cache_map.size() == _capacity) {
auto last_primitive = cache_map.find(usage.back());
async_destruct_primitive(last_primitive->second);
cache_map.erase(usage.back());
usage.pop_back();
}
usage.push_front(key);
cache_map[key] = {value, usage.begin(), destructor, e};
}
}
~primitive_cache() {
for (auto &v : cache_map) {
async_destruct_primitive(v.second);
}
}
};
} // namespace detail
/// A class holding the oneDNN engine.
class engine_ext {
struct output_argument_info {
float _alpha;
float _beta;
int _name;
memory_desc_ext _desc;
void *_data;
output_argument_info(float alpha, float beta, int name,
memory_desc_ext desc, void *data)
: _alpha(alpha), _beta(beta), _name(name), _desc(desc), _data(data) {}
output_argument_info(float alpha, float beta, memory_desc_ext desc,
void *data)
: _alpha(alpha), _beta(beta), _name(0), _desc(desc), _data(data) {}
};
::dnnl::engine _eng;
::dnnl::stream _s;
sycl::queue *_q = nullptr;
std::map<void *, ::dnnl::memory> workspace_map;
std::int64_t _random_engine_state_size = -1;
detail::primitive_cache _primitive_cache;
::dnnl::memory &get_workspace(void *key) { return workspace_map[key]; }
void insert_workspace(void *key, ::dnnl::memory workspace) {
workspace_map[key] = workspace;
}
const ::dnnl::stream &get_stream() const { return _s; }
const ::dnnl::engine &get_engine() const { return _eng; }
void *allocate(const memory_desc_ext &desc, int count = 1) const;
::dnnl::memory::desc
compress_spatial_dimensions_to_channel(const ::dnnl::memory::desc &desc);
::dnnl::memory::desc
get_bn_scale_bias_mean_var_desc(const ::dnnl::memory::desc &desc,
batch_normalization_mode mode);
sycl::event batch_normalization_backward_internal(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_desc, void *scale, void *bias,
float beta_param, void *diff_scale, void *diff_bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var);
sycl::event batch_normalization_forward_internal(
bool is_infer, batch_normalization_mode mode, float epsilon, float factor,
float alpha, const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var,
void *running_mean, void *running_var);
::dnnl::memory::desc
transfer_memory_desc_to_channel_major_format(const ::dnnl::memory::desc &desc);
::dnnl::memory::desc
bn_reorder_memory_to_channel_major_format(bool is_input, ::dnnl::memory::desc &desc,
void *src, void **cache,
std::vector<void *> &caches);
::dnnl::memory::desc
transfer_memory_desc_to_format_tag_any(const ::dnnl::memory::desc &desc){
return ::dnnl::memory::desc(desc.get_dims(), desc.get_data_type(),
::dnnl::memory::format_tag::any);
}
void allocate_and_reorder_memory_to_optimal(::dnnl::memory::desc &from_desc,
void *&from,
::dnnl::memory::desc &to_desc,
void *&to,
std::vector<void *> &caches) {
if (from_desc != to_desc) {
to = allocate(to_desc);
caches.push_back(to);
async_reorder(1.f, from_desc, from, 0.f, to_desc, to);
}
}
template <typename primitive_type, typename... args_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
create_primitive(args_type &&...args);
template <typename primitive_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
create_primitive_with_pd(const typename primitive_type::primitive_desc &pd);
template <typename primitive_type, typename... args_type>
typename primitive_type::primitive_desc
create_primitive_desc(args_type &&...args);
template <typename primitive_desc_type>
std::string generate_cache_key(const primitive_desc_type &pd);
void serialize_dims(std::stringstream &ss, const std::vector<int64_t> &dims) {
ss.write((char *)dims.data(), dims.size() * sizeof(int64_t));
};
void serialize_mem_desc(std::stringstream &ss,
const ::dnnl::memory::desc &desc) {
if (desc.is_zero()) {
return;
}
auto format_kind = desc.get_format_kind();
ss << desc.get_ndims() << (std::uint8_t)desc.get_data_type()
<< (std::uint8_t)format_kind;
serialize_dims(ss, desc.get_dims());
serialize_dims(ss, desc.get_strides());
if (format_kind == ::dnnl::memory::format_kind::blocked) {
ss << desc.get_inner_nblks();
serialize_dims(ss, desc.get_inner_blks());
serialize_dims(ss, desc.get_inner_idxs());
}
};
sycl::event execute_rnn_forward_primitive(
rnn_mode mode, ::dnnl::prop_kind kind, ::dnnl::rnn_direction direction,
rnn_bias_mode bias_mode, ::dnnl::memory::data_type dt,
::dnnl::memory::format_tag tag, int seq_length, int batch_size, int src_c,
int dst_c, int layer_size, int direction_num, int hidden_size,
int gate_num, int projection_size, std::vector<void *> &data,
std::vector<int> &offset, int iter_num, size_t *weight_size = nullptr,
size_t *workspace_size = nullptr, size_t *scratchpad_size = nullptr);
sycl::event rnn_forward_internal(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight, size_t workspace_size, void *workspace,
size_t scratchpad_size, void *scratchpad, bool is_get_execution_args,
size_t *weight_size_query, size_t *workspace_size_query,
size_t *scratchpad_size_query);
sycl::event execute_rnn_backward_primitive(
rnn_mode mode, ::dnnl::rnn_direction direction, rnn_bias_mode bias_mode,
::dnnl::memory::data_type dt, ::dnnl::memory::format_tag tag,
int seq_length, int batch_size, int src_c, int dst_c, int layer_size,
int direction_num, int hidden_size, int gate_num, int projection_size,
std::vector<void *> &data, std::vector<int> &offset, int iter_num);
void async_free(sycl::queue *q, sycl::event e,
std::unordered_map<int, ::dnnl::memory> *args,
std::vector<void *> device_ptrs = {}) {
q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
if (args) {
delete args;
}
for (auto ptr : device_ptrs) {
if (ptr) {
sycl::free(ptr, *_q);
}
}
});
});
};
bool
scale_parameter_preprocess(const std::vector<output_argument_info> &args);
template <typename primitive_type>
sycl::event
execute_primitive(const std::pair<detail::primitive_cache_key_type,
primitive_type *> &primitive,
std::unordered_map<int, ::dnnl::memory> *args,
const std::vector<output_argument_info> &extra_args = {},
const std::vector<void *> &device_ptrs = {});
template <typename T>
sycl::event fill_with_type(sycl::queue *q, void *src, const void *value,
size_t size_with_byte) {
return q->fill<T>(static_cast<T *>(src), *static_cast<const T *>(value),
size_with_byte / sizeof(T));
}
template <typename T> struct no_zero_op {
T operator()(T e) {
if (!e) {
return 1;
}
return e;
}
};
template <typename T>
void transform_no_zero_with_type(sycl::queue *q, void *src, void *dst,
size_t num) {
std::transform(oneapi::dpl::execution::make_device_policy(*q),
static_cast<T *>(src), static_cast<T *>(src) + num,
static_cast<T *>(dst), no_zero_op<T>());
}
void transform_no_zero(const memory_desc_ext &desc, void *src, void *dst);
::dnnl::memory::desc get_group_weight_desc(int group_count,
const memory_desc_ext &weight_desc);
void get_rnn_configuration(const ::dnnl::memory::desc &desc,
rnn_direction direction, rnn_mode mode,
dpct::library_data_t dt, int hidden_size,
::dnnl::memory::data_type *dnnl_dt,
::dnnl::memory::format_tag *tag,
int *projection_size, int *output_size,
int *seq_length, int *batch_size,
int *direction_num, int *gate_num);
public:
engine_ext() {}
operator bool() const {
return bool(_eng) && bool(_s) && bool(_q);
}
engine_ext &operator=(std::nullptr_t) {
_eng.reset(nullptr);
_s.reset(nullptr);
_q = nullptr;
return *this;
}
/// Creating oneDNN engine.
void create_engine() {
_eng = ::dnnl::sycl_interop::make_engine(
dpct::get_current_device(), dpct::get_current_device().get_context());
_s = ::dnnl::sycl_interop::make_stream(
_eng, dpct::get_current_device().default_queue());
_q = &dpct::get_current_device().default_queue();
}
/// Setting the user's SYCL queue for an oneDNN engine.
/// \param [in] q Pointer to the SYCL queue.
void set_queue(sycl::queue *q) {
if (!q) {
throw std::runtime_error("set_queue: pointer must not be nullptr.");
}
if (!_eng) {
throw std::runtime_error("set_queue: current engine is invalid.");
}
if (q->get_context() != ::dnnl::sycl_interop::get_context(_eng)) {
throw std::runtime_error(
"set_queue: queue is mismatch with current engine context.");
}
_q = q;
_s = ::dnnl::sycl_interop::make_stream(_eng, *q);
}
/// Retrieving the user's SYCL queue set in the oneDNN engine.
/// \returns Pointer to the SYCL queue.
sycl::queue *get_queue() const { return _q; }
/// Setting all elements of a memory to a given value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] valuePtr Pointer to a single value.
void fill(const memory_desc_ext &src_desc, void *src,
const void *valuePtr);
/// Coping the scaled data from a memory to another memory with a different
/// description.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void reorder(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst);
/// Scaling all the elements of a memory by a given factor.
/// \param [in] alpha Value to scaling factors.
/// \param [in] src_desc Source memory descriptor.
/// \param [out] src Pointer to source data.
void scale(float alpha, const memory_desc_ext &src_desc, void *src);
/// Adding the scaled values of a memory to another memory.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void sum(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst);
/// Computing a specified activation function value.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void activation_forward(activation_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst);
/// Computing the gradient of a specified activation function.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
void
activation_backward(activation_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src);
/// Computing a specified pooling function value.
/// \param [in] desc Pooling descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward propagation.
void pooling_forward(pooling_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified pooling function.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential
/// source data.
/// \param [in] workspace Pointer to workspace used for backward
/// propagation.
void pooling_backward(pooling_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc,
void *diff_dst, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src,
::dnnl::memory *workspace = nullptr);
/// Computing a specified softmax function value.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void softmax_forward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Computing the gradient of a specified softmax function.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
void softmax_backward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc,
void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src);
/// Computing a specified local response normalization function value.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
void lrn_forward(lrn_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified local response normalization
/// function.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] workspace Pointer to workspace used for backward propagation.
void lrn_backward(lrn_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &diff_src_desc,
void *diff_src, ::dnnl::memory *workspace = nullptr);
/// Setting all elements of a memory to a given value asynchronously.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] valuePtr Pointer to a single value.
/// \returns An event representing the fill operations.
sycl::event async_fill(const memory_desc_ext &src_desc, void *src,
const void *valuePtr);
/// Coping the scaled data from a memory to another memory with a different
/// description asynchronously.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the reorder operations.
sycl::event async_reorder(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst);
/// Scaling all the elements of a memory by a given factor asynchronously.
/// \param [in] alpha Value to scaling factors.
/// \param [in] src_desc Source memory descriptor.
/// \param [out] src Pointer to source data.
/// \returns An event representing the scale operations.
sycl::event async_scale(float alpha, const memory_desc_ext &src_desc, void *src);
/// Adding the scaled values of a memory to another memory asynchronously.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the sum operations.
sycl::event async_sum(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst);
/// Perform specified binary operation asynchronously.
/// \param [in] op Specified binary operation.
/// \param [in] alpha_0 Value to scaling factors used to scale the src_0
/// value.
/// \param [in] src_desc_0 Source 0 memory descriptor.
/// \param [in] src_0 Pointer to source 0 data.
/// \param [in] alpha_1 Value to scaling factors used to scale the src_1
/// value.
/// \param [in] src_desc_1 Source 1 memory descriptor.
/// \param [in] src_1 Pointer to source 1 data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the binary operations.
sycl::event async_binary(binary_op op, float alpha_0,
const memory_desc_ext &src_desc_0, void *src_0,
float alpha_1, const memory_desc_ext &src_desc_1,
void *src_1, float beta, const memory_desc_ext &dst_desc,
void *dst);
/// Perform specified binary operation asynchronously.
/// \param [in] op Specified reduction operation.
/// \param [in] alpha Value to scaling factors used to scale the data
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the reduction operations.
sycl::event async_reduction(reduction_op op, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Computing a specified activation function value asynchronously.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the activation forward operations.
sycl::event async_activation_forward(activation_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst);
/// Computing the gradient of a specified activation function asynchronously.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \returns An event representing the activation backward operations.
sycl::event
async_activation_backward(activation_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src);
/// Computing a specified pooling function value asynchronously.
/// \param [in] desc Pooling descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward propagation.
/// \returns An event representing the pooling forward operations.
sycl::event async_pooling_forward(pooling_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified pooling function asynchronously.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential
/// source data.
/// \param [in] workspace Pointer to workspace used for backward
/// propagation.
/// \returns An event representing the pooling backward operations.
sycl::event async_pooling_backward(pooling_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc,
void *diff_dst, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src,
::dnnl::memory *workspace = nullptr);
/// Computing a specified softmax function value asynchronously.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the softmax forward operations.
sycl::event async_softmax_forward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Computing the gradient of a specified softmax function asynchronously.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \returns An event representing the softmax backward operations.
sycl::event async_softmax_backward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc,
void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src);
/// Computing a specified local response normalization function value
/// asynchronously.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
/// \returns An event representing the lrn forward operations.
sycl::event async_lrn_forward(lrn_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified local response normalization
/// function asynchronously.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] workspace Pointer to workspace used for backward propagation.
/// \returns An event representing the lrn backward operations.
sycl::event async_lrn_backward(lrn_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &diff_src_desc,
void *diff_src, ::dnnl::memory *workspace = nullptr);
/// Derives a memory descriptor for the batch normalization scale, bias, mean,
/// variance from the source memory descriptor and batch normalization mode.
/// \param [out] desc Derived memory descriptor.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] mode Batch normalization mode.
static void derive_batch_normalization_memory_desc(memory_desc_ext &desc,
const memory_desc_ext &src_desc,
batch_normalization_mode mode);
/// Derives a memory descriptor for the batch normalization scale, bias, mean,
/// variance from the source memory descriptor and batch normalization mode.
/// \param [out] scale_bias_desc Derived scale and bias memory descriptor.
/// \param [out] mean_var_desc Derived mean and var memory descriptor.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] mode Batch normalization mode.
static void derive_batch_normalization_memory_desc(memory_desc_ext &scale_bias_desc,
memory_desc_ext &mean_var_desc,
const memory_desc_ext &src_desc,
batch_normalization_mode mode);
/// Get the size of workspace that needed by batch normalization. The data stored
/// in workspace must be preserved between forward and backward.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] src_desc Source memory descriptor.
/// \returns Size of workspace.
size_t get_batch_normalization_workspace_size(
batch_normalization_ops ops, const memory_desc_ext &src_desc);
/// Computing a specified batch normalization inference stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory
/// descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] mean Pointer to mean data.
/// \param [in] var Pointer to variance data.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_inference(
batch_normalization_mode mode, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *mean, void *var);
/// Computing a specified batch normalization inference stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] scale_bias_desc Scale, bias memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] mean_var_desc Mean, variance memory descriptor.
/// \param [in] mean Pointer to mean data.
/// \param [in] var Pointer to variance data.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_inference(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *mean, void *var);
/// Computing a specified batch normalization training stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] factor Factor value used in running mean and variance
/// computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory
/// descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [out] running_mean Pointer to running mean data.
/// \param [out] running_var Pointer to running variance data.
/// \param [out] saved_mean Pointer to optional cache to save mean data.
/// \param [out] saved_var Pointer to optional cache to save variance data.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_training(
batch_normalization_mode mode, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var);
/// Computing a specified batch normalization training stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] factor Factor value used in running mean and variance
/// computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory
/// descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [out] running_mean Pointer to running mean data.
/// \param [out] running_var Pointer to running variance data.
/// \param [out] saved_mean Pointer to optional cache to save mean data.
/// \param [out] saved_var Pointer to optional cache to save variance data.
/// \param [in] workspace_size Size of workspace.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace);
/// Computing a specified batch normalization training stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] factor Factor value used in running mean and variance
/// computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] scale_bias_desc Scale, bias memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] mean_var_desc Mean, variance memory descriptor.
/// \param [out] running_mean Pointer to running mean data.
/// \param [out] running_var Pointer to running variance data.
/// \param [out] saved_mean Pointer to optional cache to save mean data.
/// \param [out] saved_var Pointer to optional cache to save variance data.
/// \param [in] workspace_size Size of workspace.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *running_mean, void *running_var,
void *saved_mean, void *saved_var, size_t workspace_size, void *workspace);
/// Computing the gradient of a specified batch normalization function asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha_data Value to scaling factors used to scale the computed
/// data value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta_data Value to scaling factors used to scale the prior value
/// in the data memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] alpha_param Value to scaling factors used to scale the computed
/// parameter value.
/// \param [in] diff_scale_bias_mean_var_desc Differential scale, bias, mean,
/// variance memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] beta_param Value to scaling factors used to scale the prior value
/// in the parameter memory.
/// \param [in] diff_scale Pointer to differential scale data.
/// \param [in] diff_bias Pointer to differential bias data.
/// \param [in] saved_mean Pointer to optional cache saved mean data in forward.
/// \param [in] saved_var Pointer to optional cache saved variance data in forward.
/// \returns An event representing the batch normalization backward operations.
sycl::event async_batch_normalization_backward(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_mean_var_desc, void *scale,
float beta_param, void *diff_scale, void *diff_bias, void *saved_mean,
void *saved_var);
/// Computing the gradient of a specified batch normalization function
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha_data Value to scaling factors used to scale the computed
/// data value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta_data Value to scaling factors used to scale the prior value
/// in the data memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] diff_summand_desc Differential summand memory descriptor.
/// \param [out] diff_summand Pointer to differential summand data.
/// \param [in] alpha_param Value to scaling factors used to scale the computed
/// parameter value.
/// \param [in] diff_scale_bias_mean_var_desc Differential scale, bias, mean,
/// variance memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] beta_param Value to scaling factors used to scale the prior value
/// in the parameter memory.
/// \param [out] diff_scale Pointer to differential scale data.
/// \param [out] diff_bias Pointer to differential bias data.
/// \param [in] saved_mean Pointer to optional cache saved mean data in forward.
/// \param [in] saved_var Pointer to optional cache saved variance data in forward.
/// \param [in] workspace_size Size of workspace.
/// \param [in] workspace Pointer to workspace used for backward propagation.
/// \returns An event representing the batch normalization backward operations.
sycl::event async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_mean_var_desc,
void *scale, void *bias, float beta_param, void *diff_scale,
void *diff_bias, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace);
/// Computing the gradient of a specified batch normalization function
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha_data Value to scaling factors used to scale the computed
/// data value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta_data Value to scaling factors used to scale the prior value
/// in the data memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] diff_summand_desc Differential summand memory descriptor.
/// \param [out] diff_summand Pointer to differential summand data.
/// \param [in] alpha_param Value to scaling factors used to scale the computed
/// parameter value.
/// \param [in] diff_scale_bias_desc Differential scale, bias memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] beta_param Value to scaling factors used to scale the prior value
/// in the parameter memory.
/// \param [out] diff_scale Pointer to differential scale data.
/// \param [out] diff_bias Pointer to differential bias data.
/// \param [in] mean_var_desc Differential mean, variance memory descriptor.
/// \param [in] saved_mean Pointer to optional cache saved mean data in forward.
/// \param [in] saved_var Pointer to optional cache saved variance data in forward.
/// \param [in] workspace_size Size of workspace.
/// \param [in] workspace Pointer to workspace used for backward propagation.
/// \returns An event representing the batch normalization backward operations.
sycl::event async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_desc, void *scale,
void *bias, float beta_param, void *diff_scale, void *diff_bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace);
/// Computing a specified convolution function value asynchronously.
/// \param [in] desc Convolution descriptor.
/// \param [in] alg Convolution algorithm.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] weight_desc Weight memory descriptor.
/// \param [in] weight Pointer to weight data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the convolution forward operations.
sycl::event async_convolution_forward(convolution_desc &desc, ::dnnl::algorithm alg,
float alpha, const memory_desc_ext &src_desc,
void *src, const memory_desc_ext &weight_desc,
void *weight, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Computing a specified convolution function value asynchronously.
/// \param [in] desc Convolution descriptor.
/// \param [in] alg Convolution algorithm.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] alpha_0 Value to scaling factors used to scale the data
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] weight_desc Weight memory descriptor.
/// \param [in] weight Pointer to weight data.
/// \param [in] alpha_1 Value to scaling factors used to scale the summand
/// value.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] bias_desc Bias memory descriptor.
/// \param [in] bias Pointer to bias data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the convolution forward operations.
sycl::event async_convolution_forward(
convolution_desc &desc, ::dnnl::algorithm alg, activation_desc &adesc,
float alpha_0, const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &weight_desc, void *weight, float alpha_1,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &bias_desc, void *bias,
const memory_desc_ext &dst_desc, void *dst);
/// Computing the data gradient of a specified convolution function asynchronously.
/// \param [in] desc Convolution descriptor.
/// \param [in] alg Convolution algorithm.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] weight_desc Weight memory descriptor.
/// \param [in] weight Pointer to weight data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \returns An event representing the convolution backward data operations.
sycl::event async_convolution_backward_data(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &weight_desc, void *weight,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src);
/// Computing the weight gradient of a specified convolution function
/// asynchronously.
/// \param [in] desc Convolution descriptor.
/// \param [in] alg Convolution algorithm.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] diff_weight_desc Differential weight memory descriptor.
/// \param [out] diff_weight Pointer to differential weight data.
/// \returns An event representing the convolution backward weight operations.
sycl::event async_convolution_backward_weight(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_weight_desc, void *diff_weight);
/// Computing the bias gradient of a specified convolution function
/// asynchronously.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] diff_bias_desc Differential bias memory descriptor.
/// \param [out] diff_bias Pointer to differential bias data.
/// \returns An event representing the convolution backward bias operations.
sycl::event async_convolution_backward_bias(float alpha,
const memory_desc_ext &diff_dst_desc,
void *diff_dst, float beta,
const memory_desc_ext &diff_bias_desc,
void *diff_bias);
/// Getting the required weight space size for specified rnn operation.
/// \param [in] desc RNN descriptor.
/// \param [out] weight_space_size Size of required weight space.
void rnn_get_weight_space_size(const rnn_desc &desc,
size_t *weight_space_size);
/// Getting the required scratchpad size and workspace size for specified rnn operation.
/// \param [in] desc RNN descriptor.
/// \param [in] kind Propagation kind.
/// \param [in] src_desc Source memory descriptor.
/// \param [out] scratchpad_size Size of required scratchpad.
/// \param [out] workspace_size Size of required workspace.
void rnn_get_scratchpad_workspace_size(const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc,
size_t *scratchpad_size, size_t *workspace_size);
/// Computing a specified rnn function value asynchronously.
/// \param [in] desc RNN descriptor.
/// \param [in] kind Propagation kind.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] iter_desc Recurrent hidden state data memory descriptor.
/// \param [in] src_iter Pointer to input recurrent hidden state data.
/// \param [in] dst_iter Pointer to output recurrent hidden state data.
/// \param [in] iter_c_desc Recurrent cell state data memory descriptor.
/// \param [in] src_c_iter Pointer to input recurrent cell state data.
/// \param [in] dst_c_iter Pointer to output recurrent cell state data.
/// \param [in] weight_size Size of weight memory.
/// \param [in] weight Pointer to weight data.
/// \param [in] scratchpad_size Size of scratchpad memory.
/// \param [in] scratchpad Pointer to scratchpad data.
/// \param [in] workspace_size Size of workspace memory.
/// \param [in] workspace Pointer to workspace data.
/// \returns An event representing the status of rnn forward operations.
sycl::event async_rnn_forward(const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &iter_desc, void *src_iter,
void *dst_iter,
const memory_desc_ext &iter_c_desc,
void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight,
size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace);
/// Computing the data and weight gradient of a specified rnn function
/// asynchronously.
/// \param [in] desc RNN descriptor.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] iter_desc Recurrent hidden state data memory descriptor.
/// \param [in] src_iter Pointer to input recurrent hidden state data.
/// \param [in] diff_dst_iter Pointer to differential output recurrent hidden state data.
/// \param [out] diff_src_iter Pointer to differential input recurrent hidden state data.
/// \param [in] iter_c_desc Recurrent cell state data memory descriptor.
/// \param [in] src_c_iter Pointer to input recurrent cell state data.
/// \param [in] diff_dst_c_iter Pointer to differential output recurrent cell state data.
/// \param [out] diff_src_c_iter Pointer to differential input recurrent cell state data.
/// \param [in] weight_size Size of weight memory.
/// \param [in] weight Pointer to weight data.
/// \param [out] diff_weight Pointer to differential weight data.
/// \param [in] scratchpad_size Size of scratchpad memory.
/// \param [in] scratchpad Pointer to scratchpad data.
/// \param [in] workspace_size Size of workspace memory.
/// \param [in] workspace Pointer to workspace data.
/// \returns An event representing the status of rnn backward operations.
sycl::event async_rnn_backward(
const rnn_desc &desc, const memory_desc_ext &dst_desc, void *dst,
void *diff_dst, const memory_desc_ext &src_desc, void *src,
void *diff_src, const memory_desc_ext &iter_desc, void *src_iter,
void *diff_dst_iter, void *diff_src_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c,
void *diff_dst_iter_c, void *diff_src_iter_c, size_t weight_size,
void *weight, void *diff_weight, size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace);
/// Getting the required state size for specified dropout operation.
/// \param [in] src_desc Source memory descriptor.
/// \returns Required size of state.
size_t get_dropout_state_size();
/// Getting the required workspace size for dropout operation.
/// \param [in] src_desc Source memory descriptor.
/// \returns Required size of workspace.
static size_t get_dropout_workspace_size(const memory_desc_ext &src_desc);
/// Computing a specified dropout function value asynchronously.
/// \param [in] desc Dropout descriptor.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] workspace Pointer to workspace data.
/// \param [in] workspace_size Size of workspace memory.
/// \returns An event representing the dropout forward operations.
sycl::event async_dropout_forward(dropout_desc &desc,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
void *workspace, size_t workspace_size);
/// Computing the gradient of a specified dropout function asynchronously.
/// \param [in] desc Dropout descriptor.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] workspace Pointer to workspace data.
/// \param [in] workspace_size Size of workspace memory.
/// \returns An event representing the dropout backward operations.
sycl::event async_dropout_backward(dropout_desc &desc,
const memory_desc_ext &diff_dst_desc,
void *diff_dst,
const memory_desc_ext &diff_src_desc,
void *diff_src, void *workspace,
size_t workspace_size);
};
inline
void dropout_desc::restore(engine_ext &engine, float p, void *state,
size_t state_size, unsigned long long seed) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
if (state) {
std::int64_t required_state_size = engine.get_dropout_state_size();
if (state_size < required_state_size) {
throw std::runtime_error("restore: state_size less than required state size.");
}
sycl::queue *q = engine.get_queue();
_imp->_p = p;
_imp->_seed = seed;
_imp->_state = state;
_imp->_host_state = std::vector<std::uint8_t>(required_state_size);
q->memcpy(_imp->_host_state.data(), _imp->_state, required_state_size).wait();
_imp->_rng_engine =
oneapi::mkl::rng::load_state<rng_engine_t>(
*q, _imp->_host_state.data());
}
#endif
}
inline
void dropout_desc::set(engine_ext &engine, float p, void *state,
size_t state_size, unsigned long long seed) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
_imp->_p = p;
if (state) {
std::int64_t required_state_size = engine.get_dropout_state_size();
if (state_size < required_state_size) {
throw std::runtime_error("set: no sufficient memory to save states.");
}
sycl::queue *q = engine.get_queue();
_imp->_seed = seed;
_imp->_state = state;
_imp->_host_state = std::vector<std::uint8_t>(required_state_size);
_imp->_rng_engine = rng_engine_t(*q, seed);
oneapi::mkl::rng::save_state(_imp->_rng_engine, _imp->_host_state.data());
q->memcpy(_imp->_state, _imp->_host_state.data(), required_state_size).wait();
}
#endif
}
inline
::dnnl::memory::data_type
memory_desc_ext::to_dnnl_data_type(dpct::library_data_t dt) {
using dnnl_dt = ::dnnl::memory::data_type;
switch (dt) {
case dpct::library_data_t::real_half:
return dnnl_dt::f16;
case dpct::library_data_t::real_bfloat16:
return dnnl_dt::bf16;
case dpct::library_data_t::real_float:
return dnnl_dt::f32;
case dpct::library_data_t::real_int32:
return dnnl_dt::s32;
case dpct::library_data_t::real_int8:
return dnnl_dt::s8;
case dpct::library_data_t::real_uint8:
return dnnl_dt::u8;
case dpct::library_data_t::real_int8_4:
return dnnl_dt::s8;
case dpct::library_data_t::real_int8_32:
return dnnl_dt::s8;
case dpct::library_data_t::real_uint8_4:
return dnnl_dt::u8;
default:
throw std::runtime_error("to_dnnl_data_type: unsupported data type.");
}
}
inline
dpct::library_data_t
memory_desc_ext::to_dpct_library_data_t(::dnnl::memory::data_type dt,
unsigned block_size) {
using dpct_dt = dpct::library_data_t;
using dnnl_dt = ::dnnl::memory::data_type;
switch (dt) {
case dnnl_dt::f16:
return dpct_dt::real_half;
case dnnl_dt::bf16:
return dpct_dt::real_bfloat16;
case dnnl_dt::f32:
return dpct_dt::real_float;
case dnnl_dt::s32:
return dpct_dt::real_int32;
case dnnl_dt::s8:
if (block_size == 4) {
return dpct_dt::real_int8_4;
} else if (block_size == 32) {
return dpct_dt::real_int8_32;
} else {
return dpct_dt::real_int8;
}
case dnnl_dt::u8:
if (block_size == 4) {
return dpct_dt::real_uint8_4;
} else {
return dpct_dt::real_uint8;
}
default:
throw std::runtime_error("to_dpct_library_data_t: unsupported data type "
"dnnl::memory::data_type::undef.");
}
}
inline
::dnnl::memory::format_tag
memory_desc_ext::to_dnnl_format_tag(dpct::library_data_t dt,
memory_format_tag tag) {
using dpct_dt = dpct::library_data_t;
using dpct_tag = memory_format_tag;
using dnnl_tag = ::dnnl::memory::format_tag;
switch (tag) {
case dpct_tag::nchw:
return dnnl_tag::nchw;
case dpct_tag::nhwc:
return dnnl_tag::nhwc;
default:
if (dt == dpct_dt::real_int8_32) {
return dnnl_tag::nChw32c;
} else {
return dnnl_tag::nChw4c;
}
}
}
inline
void memory_desc_ext::set(memory_format_tag tag, dpct::library_data_t dt, int n,
int c, int h, int w) {
_desc = ::dnnl::memory::desc({n, c, h, w}, to_dnnl_data_type(dt),
to_dnnl_format_tag(dt, tag));
}
inline
void memory_desc_ext::set(dpct::library_data_t dt, int n, int c, int h, int w,
int n_stride, int c_stride, int h_stride,
int w_stride) {
_desc = ::dnnl::memory::desc({n, c, h, w}, to_dnnl_data_type(dt),
{n_stride, c_stride, h_stride, w_stride});
}
inline
void memory_desc_ext::set(dpct::library_data_t dt, int ndims, const int dims[],
const int strides[]) {
_desc = ::dnnl::memory::desc({dims, dims + ndims}, to_dnnl_data_type(dt),
{strides, strides + ndims});
}
inline
void memory_desc_ext::set(memory_format_tag tag, dpct::library_data_t dt,
int ndims, const int dims[]) {
_desc = ::dnnl::memory::desc({dims, dims + ndims}, to_dnnl_data_type(dt),
to_dnnl_format_tag(dt, tag));
}
inline
void memory_desc_ext::set(rnn_memory_format_tag tag, dpct::library_data_t dt,
int t, int n, int c) {
if (tag == rnn_memory_format_tag::tnc) {
_desc = ::dnnl::memory::desc({t, n, c}, to_dnnl_data_type(dt),
::dnnl::memory::format_tag::tnc);
} else if(tag == rnn_memory_format_tag::ntc) {
_desc = ::dnnl::memory::desc({t, n, c}, to_dnnl_data_type(dt),
::dnnl::memory::format_tag::ntc);
} else {
throw std::runtime_error("set: unsupported memory format tag.");
}
}
inline
void memory_desc_ext::get(dpct::library_data_t *dt, int *n, int *c, int *h,
int *w, int *n_stride, int *c_stride, int *h_stride,
int *w_stride) const {
unsigned block_size = 1;
auto dims = _desc.get_dims();
auto inner_blks = _desc.get_inner_blks();
auto strides = _desc.get_strides();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*n = dims[0];
*c = dims[1];
*h = dims[2];
*w = dims[3];
*n_stride = strides[0] / block_size;
*c_stride = strides[1] / block_size;
*h_stride = strides[2] / block_size;
*w_stride = strides[3] / block_size;
}
inline
void memory_desc_ext::get(dpct::library_data_t *dt, memory_format_tag *tag,
int *n, int *c, int *h, int *w) const {
unsigned block_size = 1;
*tag = memory_format_tag::nchw;
auto dims = _desc.get_dims();
auto strides = _desc.get_strides();
auto inner_blks = _desc.get_inner_blks();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
*tag = memory_format_tag::nchw_blocked;
}
if (strides[1] == 1 && dims[1] != 1) {
*tag = memory_format_tag::nhwc;
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*n = dims[0];
*c = dims[1];
*h = dims[2];
*w = dims[3];
}
inline
void memory_desc_ext::get(dpct::library_data_t *dt, rnn_memory_format_tag *tag,
int *t, int *n, int *c) const {
auto dims = _desc.get_dims();
auto strides = _desc.get_strides();
if (strides[0] >= strides[1]) {
*tag = rnn_memory_format_tag::tnc;
} else {
*tag = rnn_memory_format_tag::ntc;
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), 1);
*t = dims[0];
*n = dims[1];
*c = dims[2];
}
inline
void memory_desc_ext::get(int requested_ndims, dpct::library_data_t *dt,
int *ndims, int dims[], int strides[]) const {
unsigned block_size = 1;
auto inner_blks = _desc.get_inner_blks();
auto adims = _desc.get_dims();
auto astrides = _desc.get_strides();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*ndims = _desc.get_ndims();
for (int index = 0; index < requested_ndims; index++) {
dims[index] = adims[index];
strides[index] =
astrides[index] / block_size;
}
}
inline
void memory_desc_ext::get(int requested_ndims, dpct::library_data_t *dt,
memory_format_tag *tag, int *ndims,
int dims[]) const {
unsigned block_size = 1;
*tag = memory_format_tag::nchw;
auto inner_blks = _desc.get_inner_blks();
auto adims = _desc.get_dims();
auto astrides = _desc.get_strides();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
*tag = memory_format_tag::nchw_blocked;
}
if (astrides[1] == 1 &&
adims[1] != 1) {
*tag = memory_format_tag::nhwc;
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*ndims = _desc.get_ndims();
for (int index = 0; index < requested_ndims; index++) {
dims[index] = adims[index];
}
}
inline
void engine_ext::get_rnn_configuration(const ::dnnl::memory::desc &desc,
rnn_direction direction, rnn_mode mode,
dpct::library_data_t dt, int hidden_size,
::dnnl::memory::data_type *dnnl_dt,
::dnnl::memory::format_tag *tag,
int *projection_size, int *output_size,
int *seq_length, int *batch_size,
int *direction_num, int *gate_num) {
if (!desc.is_zero()) {
auto dims = desc.get_dims();
auto strides = desc.get_strides();
if (strides[0] >= strides[1]) {
*tag = ::dnnl::memory::format_tag::tnc;
*seq_length = dims[0];
*batch_size = dims[1];
} else {
*tag = ::dnnl::memory::format_tag::ntc;
*seq_length = dims[1];
*batch_size = dims[0];
}
}
if (direction == rnn_direction::bidirectional) {
*direction_num = 2;
} else {
*direction_num = 1;
}
if (mode == rnn_mode::lstm) {
*gate_num = 4;
} else if (mode == rnn_mode::gru) {
*gate_num = 3;
} else {
*gate_num = 1;
}
if (*projection_size != hidden_size) {
*output_size = *projection_size;
} else {
*projection_size = 0;
*output_size = hidden_size;
}
*dnnl_dt = memory_desc_ext::to_dnnl_data_type(dt);
}
inline
void *engine_ext::allocate(const memory_desc_ext &data_desc, int count) const {
size_t mem_size = data_desc.get_size();
void *mem = sycl::malloc_device(mem_size * count, *_q);
return mem;
}
inline
void engine_ext::transform_no_zero(const memory_desc_ext &desc, void *src, void *dst) {
::dnnl::memory::data_type dt = desc.get_desc().get_data_type();
size_t element_num = desc.get_element_num();
switch (dt) {
case ::dnnl::memory::data_type::f32:
transform_no_zero_with_type<float>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::f16:
transform_no_zero_with_type<sycl::half>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::s32:
transform_no_zero_with_type<int32_t>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::s8:
transform_no_zero_with_type<int8_t>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::u8:
transform_no_zero_with_type<uint8_t>(_q, src, dst, element_num);
break;
default:
throw std::runtime_error("transform_no_zero: unsupported data type.");
}
}
inline
::dnnl::memory::desc
engine_ext::get_group_weight_desc(int group_count,
const memory_desc_ext &weight_desc) {
if (group_count == 1) {
return weight_desc.get_desc();
}
auto help_weight_desc = weight_desc.get_desc();
int ndims = help_weight_desc.get_ndims();
if (!help_weight_desc.get_inner_blks().empty()) {
throw std::runtime_error("get_group_weight_desc: group convolution with "
"blocked weight memory unimplemented.");
}
std::vector<int64_t> new_size;
auto old_size = weight_desc.get_dims();
new_size.push_back(group_count);
new_size.push_back(old_size[0] / group_count);
for (int index = 1; index < old_size.size(); index++) {
new_size.push_back(old_size[index]);
}
std::vector<int64_t> strides = help_weight_desc.get_strides();
::dnnl::memory::format_tag tag;
bool is_nhwc = (strides[1] == 1 && old_size[1] != 1);
if (ndims == 4) {
if (is_nhwc) {
tag = ::dnnl::memory::format_tag::gohwi;
} else {
tag = ::dnnl::memory::format_tag::goihw;
}
} else if (ndims == 5) {
if (is_nhwc) {
tag = ::dnnl::memory::format_tag::godhwi;
} else {
tag = ::dnnl::memory::format_tag::goidhw;
}
}
help_weight_desc =
::dnnl::memory::desc(new_size, weight_desc.get_desc().get_data_type(), tag);
return help_weight_desc;
}
inline
::dnnl::memory::desc engine_ext::compress_spatial_dimensions_to_channel(
const ::dnnl::memory::desc &desc) {
int ndims = desc.get_ndims();
auto dims = desc.get_dims();
auto inner_blks = desc.get_inner_blks();
assert(ndims >= 4 && "ndims is at least 4.");
std::vector<int64_t> compressed_dims(ndims);
compressed_dims[0] = dims[0];
compressed_dims[1] = dims[1];
for (int index = 2; index < ndims; index++) {
compressed_dims[1] = compressed_dims[1] * dims[index];
compressed_dims[index] = 1;
}
if (!inner_blks.empty() && inner_blks[0] == 4) {
return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(),
::dnnl::memory::format_tag::nChw4c);
} else if (!inner_blks.empty() && inner_blks[0] == 32) {
return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(),
::dnnl::memory::format_tag::nChw32c);
}
std::vector<int64_t> strides(ndims, 1);
strides[0] = compressed_dims[1];
return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(), strides);
}
inline
::dnnl::memory::desc
engine_ext::get_bn_scale_bias_mean_var_desc(const ::dnnl::memory::desc &desc,
batch_normalization_mode mode) {
int ndims = desc.get_ndims();
auto dims = desc.get_dims();
assert(ndims >= 4 && "ndims is at least 4.");
int channel_num = 1;
if (mode == batch_normalization_mode::spatial) {
channel_num = dims[1];
} else {
for (int index = 1; index < ndims; index++) {
channel_num = channel_num * dims[index];
}
}
return ::dnnl::memory::desc({channel_num}, desc.get_data_type(),
::dnnl::memory::format_tag::a);
}
inline
::dnnl::memory::desc engine_ext::transfer_memory_desc_to_channel_major_format(
const ::dnnl::memory::desc &desc) {
if (!desc.get_inner_blks().empty()) {
return desc;
}
int ndims = desc.get_ndims();
auto dims = desc.get_dims();
if (ndims == 4) {
return ::dnnl::memory::desc(dims, desc.get_data_type(),
::dnnl::memory::format_tag::nchw);
}
return ::dnnl::memory::desc(dims, desc.get_data_type(),
::dnnl::memory::format_tag::ncdhw);
}
/// If the alpha = 0 and beta = 1, then the destination (dst = alpha * out +
/// beta * prior_dst) have no change. In this case this function returns true
/// means the operation can exit directly.
inline
bool engine_ext::scale_parameter_preprocess(
const std::vector<output_argument_info> &args) {
bool direct_exit = true;
for (auto &arg : args) {
if (arg._alpha == 0.f) {
if (arg._beta != 1.f) {
async_scale(arg._beta, arg._desc, arg._data);
}
} else {
direct_exit = false;
}
}
return direct_exit;
}
inline
void engine_ext::derive_batch_normalization_memory_desc(
memory_desc_ext &scale_bias_desc, memory_desc_ext &mean_var_desc,
const memory_desc_ext &src_desc, batch_normalization_mode mode) {
derive_batch_normalization_memory_desc(scale_bias_desc, src_desc, mode);
derive_batch_normalization_memory_desc(mean_var_desc, src_desc, mode);
}
inline
void engine_ext::derive_batch_normalization_memory_desc(
memory_desc_ext &desc, const memory_desc_ext &src_desc,
batch_normalization_mode mode) {
int src_ndims = src_desc.get_desc().get_ndims();
auto inner_blks = src_desc.get_desc().get_inner_blks();
if (src_desc.get_desc().get_ndims() != 4 ||
src_desc.get_desc().get_ndims() != 5) {
throw std::runtime_error("derive_batch_normalization_memory_desc: only 4d "
"and 5d memory descriptor supported.");
}
std::vector<int64_t> dims = src_desc.get_dims();
dims[0] = 1;
if (mode == batch_normalization_mode::spatial) {
dims[2] = 1;
dims[3] = 1;
if (src_ndims == 5) {
dims[4] = 1;
}
}
auto data_type = src_desc.get_desc().get_data_type();
if (data_type == ::dnnl::memory::data_type::f16) {
data_type = ::dnnl::memory::data_type::f32;
}
if (!inner_blks.empty() && inner_blks[0] == 4) {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::nChw4c));
} else if (!inner_blks.empty() && inner_blks[0] == 32) {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::nChw32c));
} else {
if (src_ndims == 4) {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::nchw));
} else {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::ncdhw));
}
}
}
template <typename primitive_type>
sycl::event engine_ext::execute_primitive(
const std::pair<detail::primitive_cache_key_type, primitive_type *>
&primitive,
std::unordered_map<int, ::dnnl::memory> *args,
const std::vector<output_argument_info> &output_args,
const std::vector<void *> &device_ptrs) {
std::vector<void *> caches;
int output_arg_num = output_args.size();
for (int i = 0; i < output_arg_num; i++) {
if (output_args[i]._beta != 0.f) {
auto cache = allocate(output_args[i]._desc);
caches.push_back(cache);
args->insert(
{output_args[i]._name,
::dnnl::memory(output_args[i]._desc.get_desc(), _eng, cache)});
} else {
args->insert(
{output_args[i]._name, ::dnnl::memory(output_args[i]._desc.get_desc(),
_eng, output_args[i]._data)});
}
}
auto e = ::dnnl::sycl_interop::execute(
*(static_cast<primitive_type *>(primitive.second)), _s, *args);
_primitive_cache.put(
primitive.first, primitive.second,
[](::dnnl::primitive *p) { delete static_cast<primitive_type *>(p); }, e);
int cache_index = 0;
for (int i = 0; i < output_arg_num; i++) {
if (output_args[i]._beta != 0.f) {
e = async_sum(output_args[i]._alpha, output_args[i]._desc,
caches[cache_index++], output_args[i]._beta, output_args[i]._desc,
output_args[i]._data);
} else {
if (output_args[i]._alpha != 1.f) {
e = async_scale(output_args[i]._alpha, output_args[i]._desc,
output_args[i]._data);
}
}
}
caches.insert(caches.end(), device_ptrs.begin(), device_ptrs.end());
async_free(_q, e, args, caches);
return e;
}
inline
::dnnl::memory::desc engine_ext::bn_reorder_memory_to_channel_major_format(
bool is_input, ::dnnl::memory::desc &desc, void *src, void **cache,
std::vector<void *> &caches) {
::dnnl::memory::desc result;
result = transfer_memory_desc_to_channel_major_format(desc);
if ((result != desc) || !src) {
*cache = allocate(desc);
if (is_input && src) {
async_reorder(1.f, desc, src, 0.f, result, *cache);
}
caches.push_back(*cache);
}
return result;
}
inline
sycl::event engine_ext::batch_normalization_backward_internal(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_desc, void *scale, void *bias,
float beta_param, void *diff_scale, void *diff_bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var) {
if (scale_parameter_preprocess(
{{alpha_data, beta_data, diff_src_desc, diff_src},
{alpha_param, beta_param, diff_scale_bias_desc, diff_scale},
{alpha_param, beta_param, diff_scale_bias_desc, diff_bias}})) {
return sycl::event();
}
std::vector<void *> caches;
void *reordered_src = nullptr, *reordered_diff_dst = nullptr,
*reordered_diff_src = nullptr, *reordered_scale = nullptr,
*reordered_bias = nullptr, *reordered_diff_scale = nullptr,
*reordered_diff_bias = nullptr, *reordered_saved_mean = nullptr,
*reordered_saved_var = nullptr;
::dnnl::memory::desc help_src_desc = src_desc.get_desc();
::dnnl::memory::desc help_diff_dst_desc = diff_dst_desc.get_desc();
::dnnl::memory::desc help_diff_src_desc = diff_src_desc.get_desc();
::dnnl::memory::desc help_diff_scale_bias_desc =
diff_scale_bias_desc.get_desc();
::dnnl::memory::desc help_mean_var_desc = mean_var_desc.get_desc();
::dnnl::memory::desc actual_diff_src_desc = help_diff_src_desc;
::dnnl::memory::desc actual_diff_scale_bias_desc = help_diff_scale_bias_desc;
if (mode == batch_normalization_mode::per_activation) {
help_src_desc = bn_reorder_memory_to_channel_major_format(true, help_src_desc, src,
&reordered_src, caches);
help_diff_dst_desc = bn_reorder_memory_to_channel_major_format(
true, help_diff_dst_desc, diff_dst, &reordered_diff_dst, caches);
help_diff_src_desc = bn_reorder_memory_to_channel_major_format(
false, help_diff_src_desc, diff_src, &reordered_diff_src, caches);
actual_diff_src_desc = help_diff_src_desc;
help_diff_scale_bias_desc = bn_reorder_memory_to_channel_major_format(
true, help_diff_scale_bias_desc, scale, &reordered_scale, caches);
actual_diff_scale_bias_desc = help_diff_scale_bias_desc;
if (bias) {
bn_reorder_memory_to_channel_major_format(true, help_diff_scale_bias_desc, bias,
&reordered_bias, caches);
}
bn_reorder_memory_to_channel_major_format(false, help_diff_scale_bias_desc,
diff_scale, &reordered_diff_scale,
caches);
bn_reorder_memory_to_channel_major_format(false, help_diff_scale_bias_desc,
diff_bias, &reordered_diff_bias, caches);
help_mean_var_desc = bn_reorder_memory_to_channel_major_format(
true, help_mean_var_desc, saved_mean, &reordered_saved_mean, caches);
bn_reorder_memory_to_channel_major_format(true, help_mean_var_desc, saved_var,
&reordered_saved_var, caches);
help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc);
help_diff_src_desc =
compress_spatial_dimensions_to_channel(help_diff_src_desc);
help_diff_dst_desc =
compress_spatial_dimensions_to_channel(help_diff_dst_desc);
} else {
if ((help_src_desc != help_diff_dst_desc) ||
(help_src_desc != help_diff_src_desc) ||
(help_diff_dst_desc != help_diff_src_desc)) {
help_src_desc = bn_reorder_memory_to_channel_major_format(
true, help_src_desc, src, &reordered_src, caches);
help_diff_dst_desc = bn_reorder_memory_to_channel_major_format(
true, help_diff_dst_desc, diff_dst, &reordered_diff_dst, caches);
help_diff_src_desc = bn_reorder_memory_to_channel_major_format(
false, help_diff_src_desc, diff_src, &reordered_diff_src, caches);
actual_diff_src_desc = help_diff_src_desc;
}
}
help_diff_scale_bias_desc =
get_bn_scale_bias_mean_var_desc(help_diff_scale_bias_desc, mode);
help_mean_var_desc =
get_bn_scale_bias_mean_var_desc(help_mean_var_desc, mode);
auto forward_primitive =
create_primitive_desc<::dnnl::batch_normalization_forward>(
::dnnl::prop_kind::forward_training, help_src_desc,
help_diff_dst_desc, epsilon,
::dnnl::normalization_flags::use_scale |
::dnnl::normalization_flags::use_shift);
auto primitive =
create_primitive<::dnnl::batch_normalization_backward>(
::dnnl::prop_kind::backward, help_diff_src_desc, help_diff_dst_desc,
help_src_desc, epsilon,
::dnnl::normalization_flags::use_scale |
::dnnl::normalization_flags::use_shift, forward_primitive);
void *dst_cache = nullptr;
if (!saved_mean && !saved_var) {
dst_cache = allocate(diff_dst_desc);
if (!reordered_saved_mean) {
reordered_saved_mean = allocate(mean_var_desc);
caches.push_back(reordered_saved_mean);
}
if (!reordered_saved_var) {
reordered_saved_var = allocate(mean_var_desc);
caches.push_back(reordered_saved_var);
}
if (!bias) {
_q->fill(reordered_bias, 0, diff_scale_bias_desc.get_size());
}
batch_normalization_forward_internal(
true, mode, epsilon, 0.f, 1.f, src_desc, src, 0.f, diff_dst_desc,
dst_cache, diff_scale_bias_desc, scale, bias ? bias : reordered_bias,
mean_var_desc, reordered_saved_mean, reordered_saved_var, nullptr,
nullptr);
caches.push_back(dst_cache);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC,
{::dnnl::memory(help_src_desc, _eng,
reordered_src ? reordered_src : src)}},
{DNNL_ARG_SCALE,
{::dnnl::memory(help_diff_scale_bias_desc, _eng,
reordered_scale ? reordered_scale : scale)}},
{DNNL_ARG_MEAN,
{::dnnl::memory(help_mean_var_desc, _eng,
reordered_saved_mean ? reordered_saved_mean
: saved_mean)}},
{DNNL_ARG_VARIANCE,
{::dnnl::memory(help_mean_var_desc, _eng,
reordered_saved_var ? reordered_saved_var : saved_var)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(help_diff_src_desc, _eng,
reordered_diff_dst ? reordered_diff_dst : diff_dst)}}};
sycl::event e = execute_primitive(
primitive, execution_args,
{{alpha_data, beta_data, DNNL_ARG_DIFF_SRC, help_diff_src_desc,
reordered_diff_src ? reordered_diff_src : diff_src},
{alpha_param, beta_param, DNNL_ARG_DIFF_SCALE, help_diff_scale_bias_desc,
reordered_diff_scale ? reordered_diff_scale : diff_scale},
{alpha_param, beta_param, DNNL_ARG_DIFF_SHIFT, help_diff_scale_bias_desc,
reordered_diff_bias ? reordered_diff_bias : diff_bias}});
if (actual_diff_src_desc != diff_src_desc.get_desc() && reordered_diff_src) {
e = async_reorder(1.f, actual_diff_src_desc, reordered_diff_src, 0.f,
diff_src_desc, diff_src);
}
if (actual_diff_scale_bias_desc != diff_scale_bias_desc.get_desc() &&
reordered_diff_scale && reordered_diff_bias) {
async_reorder(1.f, actual_diff_scale_bias_desc, reordered_diff_scale, 0.f,
diff_scale_bias_desc, diff_scale);
e = async_reorder(1.f, actual_diff_scale_bias_desc, reordered_diff_bias, 0.f,
diff_scale_bias_desc, diff_bias);
}
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
inline
sycl::event engine_ext::batch_normalization_forward_internal(
bool is_infer, batch_normalization_mode mode, float epsilon, float factor,
float alpha, const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var,
void *running_mean, void *running_var) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
std::vector<void *> caches;
void *reordered_src = nullptr, *reordered_dst = nullptr,
*reordered_scale = nullptr, *reordered_bias = nullptr,
*reordered_saved_mean = nullptr, *reordered_saved_var = nullptr;
::dnnl::memory::desc help_src_desc = src_desc.get_desc();
::dnnl::memory::desc help_dst_desc = dst_desc.get_desc();
::dnnl::memory::desc help_scale_bias_desc = scale_bias_desc.get_desc();
::dnnl::memory::desc help_mean_var_desc = mean_var_desc.get_desc();
::dnnl::memory::desc actual_dst_desc = help_dst_desc;
::dnnl::memory::desc actual_mean_var_desc = help_mean_var_desc;
if (mode == batch_normalization_mode::per_activation) {
help_src_desc = bn_reorder_memory_to_channel_major_format(true, help_src_desc, src,
&reordered_src, caches);
help_dst_desc = bn_reorder_memory_to_channel_major_format(
false, help_dst_desc, dst, &reordered_dst, caches);
actual_dst_desc = help_dst_desc;
help_scale_bias_desc = bn_reorder_memory_to_channel_major_format(
true, help_scale_bias_desc, scale, &reordered_scale, caches);
bn_reorder_memory_to_channel_major_format(true, help_scale_bias_desc, bias,
&reordered_bias, caches);
help_mean_var_desc = bn_reorder_memory_to_channel_major_format(
is_infer, help_mean_var_desc, saved_mean,
&reordered_saved_mean, caches);
actual_mean_var_desc = help_mean_var_desc;
bn_reorder_memory_to_channel_major_format(is_infer,
help_mean_var_desc, saved_var,
&reordered_saved_var, caches);
help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc);
help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc);
} else {
if (help_src_desc != help_dst_desc) {
help_src_desc = bn_reorder_memory_to_channel_major_format(
true, help_src_desc, src, &reordered_src, caches);
help_dst_desc = bn_reorder_memory_to_channel_major_format(
false, help_dst_desc, dst, &reordered_dst, caches);
actual_dst_desc = help_dst_desc;
}
}
help_scale_bias_desc =
get_bn_scale_bias_mean_var_desc(help_scale_bias_desc, mode);
help_mean_var_desc =
get_bn_scale_bias_mean_var_desc(help_mean_var_desc, mode);
::dnnl::prop_kind kind;
::dnnl::normalization_flags flag = ::dnnl::normalization_flags::use_scale |
::dnnl::normalization_flags::use_shift;
if (is_infer) {
kind = ::dnnl::prop_kind::forward_inference;
flag = ::dnnl::normalization_flags::use_global_stats | flag;
} else {
kind = ::dnnl::prop_kind::forward_training;
}
auto primitive =
create_primitive<::dnnl::batch_normalization_forward>(
kind, help_src_desc, help_dst_desc, epsilon, flag);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC,
{::dnnl::memory(help_src_desc, _eng,
reordered_src ? reordered_src : src)}},
{DNNL_ARG_SCALE,
{::dnnl::memory(help_scale_bias_desc, _eng,
reordered_scale ? reordered_scale : scale)}},
{DNNL_ARG_SHIFT,
{::dnnl::memory(help_scale_bias_desc, _eng,
reordered_bias ? reordered_bias : bias)}},
{DNNL_ARG_MEAN,
{::dnnl::memory(help_mean_var_desc, _eng,
reordered_saved_mean ? reordered_saved_mean
: saved_mean)}},
{DNNL_ARG_VARIANCE,
{::dnnl::memory(help_mean_var_desc, _eng,
reordered_saved_var ? reordered_saved_var
: saved_var)}}};
sycl::event e = execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, help_dst_desc,
reordered_dst ? reordered_dst : dst}});
if (!is_infer && running_var) {
auto src_ndim = src_desc.get_desc().get_ndims();
auto src_dims = src_desc.get_dims();
int element_num = src_dims[0];
if (mode == batch_normalization_mode::spatial) {
for (int index = 2; index < src_ndim; index++) {
element_num *= src_dims[index];
}
}
float unbias_factor = element_num / (element_num - 1.f);
async_scale(1.f - factor, mean_var_desc, running_var);
e = async_sum(factor * unbias_factor, mean_var_desc,
reordered_saved_var ? reordered_saved_var : saved_var,
1.f, mean_var_desc, running_var);
}
if (!is_infer && running_mean) {
e = async_sum(factor, mean_var_desc,
reordered_saved_mean ? reordered_saved_mean : saved_mean,
(1.f - factor), mean_var_desc, running_mean);
}
if (reordered_dst && (actual_dst_desc != dst_desc.get_desc())) {
e = async_reorder(1.f, actual_dst_desc, reordered_dst, 0.f, dst_desc, dst);
}
if (!is_infer && reordered_saved_mean && reordered_saved_var && saved_mean &&
saved_var && (actual_mean_var_desc != mean_var_desc.get_desc())) {
e = async_reorder(1.f, actual_mean_var_desc, reordered_saved_mean, 0.f,
mean_var_desc, saved_mean);
e = async_reorder(1.f, actual_mean_var_desc, reordered_saved_var, 0.f,
mean_var_desc, saved_var);
}
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
inline
sycl::event engine_ext::rnn_forward_internal(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight, size_t workspace_size, void *workspace,
size_t scratchpad_size, void *scratchpad, bool is_get_execution_args,
size_t *weight_size_query, size_t *workspace_size_query,
size_t *scratchpad_size_query) {
::dnnl::memory::data_type src_dt;
::dnnl::memory::format_tag src_format_tag;
rnn_mode mode;
rnn_bias_mode bias_mode;
rnn_direction direction;
dpct::library_data_t dt;
int direction_num = 1, input_size = 0, hidden_size = 0, projection_size = 0,
layer_size = 0, gate_num = 1, output_size = 0, data_type_size = 0,
seq_length = 1, batch_size = 1;
std::vector<void *> data = {src, dst, src_iter, dst_iter,
src_iter_c, dst_iter_c, weight, workspace,
scratchpad};
std::vector<int> offset(6, 0);
void *input_layer_cache = nullptr, *hidden_layer_cache = nullptr;
sycl::event e;
desc.get(&mode, &bias_mode, &direction, &dt, &input_size, &hidden_size,
&projection_size, &layer_size);
get_rnn_configuration(src_desc.get_desc(), direction, mode, dt, hidden_size,
&src_dt, &src_format_tag, &projection_size,
&output_size, &seq_length, &batch_size, &direction_num,
&gate_num);
if (direction == rnn_direction::bidirectional) {
// Here to combine the oneDNN bidirectional_sum and
// bidirectional_concat config, so call execute_rnn_forward_primitive
// twice.
if (layer_size > 1) {
if (!is_get_execution_args) {
input_layer_cache = allocate(src_desc);
hidden_layer_cache = allocate(src_desc);
_q->memcpy(input_layer_cache, src, src_desc.get_size());
}
data[0] = input_layer_cache;
data[1] = hidden_layer_cache;
e = execute_rnn_forward_primitive(
mode, kind, ::dnnl::rnn_direction::bidirectional_sum, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
output_size, 1, direction_num, hidden_size, gate_num, projection_size,
data, offset, layer_size - 1, weight_size_query, workspace_size_query,
scratchpad_size_query);
data[0] =
((layer_size - 1) % 2 == 0) ? input_layer_cache : hidden_layer_cache;
data[1] = dst;
}
e = execute_rnn_forward_primitive(
mode, kind, ::dnnl::rnn_direction::bidirectional_concat, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
2 * output_size, 1, direction_num, hidden_size, gate_num,
projection_size, data, offset, 1, weight_size_query,
workspace_size_query, scratchpad_size_query);
} else {
e = execute_rnn_forward_primitive(
mode, kind, ::dnnl::rnn_direction::unidirectional_left2right, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
output_size, layer_size, direction_num, hidden_size, gate_num,
projection_size, data, offset, 1, weight_size_query,
workspace_size_query, scratchpad_size_query);
}
if (is_get_execution_args) {
return e;
}
if (input_layer_cache && hidden_layer_cache) {
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(input_layer_cache, *_q);
sycl::free(hidden_layer_cache, *_q);
});
});
}
return e;
}
inline
sycl::event engine_ext::execute_rnn_forward_primitive(
rnn_mode mode, ::dnnl::prop_kind kind, ::dnnl::rnn_direction direction,
rnn_bias_mode bias_mode, ::dnnl::memory::data_type dt,
::dnnl::memory::format_tag tag, int seq_length, int batch_size, int src_c,
int dst_c, int layer_size, int direction_num, int hidden_size, int gate_num,
int projection_size, std::vector<void *> &data, std::vector<int> &offset,
int iter_num, size_t *weight_size, size_t *workspace_size,
size_t *scratchpad_size) {
sycl::event e;
::dnnl::primitive *p = nullptr;
detail::primitive_cache_key_type key;
std::unordered_map<int, ::dnnl::memory> *execution_args;
::dnnl::memory::desc bias_desc(
{layer_size, direction_num, gate_num, hidden_size}, dt,
::dnnl::memory::format_tag::ldgo);
::dnnl::memory::desc weight_layer_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc weight_iter_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc projection_desc;
if (projection_size) {
projection_desc = ::dnnl::memory::desc(
{layer_size, direction_num, hidden_size, projection_size}, dt,
::dnnl::memory::format_tag::ldio);
}
if (weight_size) {
*weight_size +=
(weight_layer_desc.get_size() + weight_iter_desc.get_size() +
projection_desc.get_size() + bias_desc.get_size()) *
iter_num;
return e;
}
::dnnl::memory::desc src_desc({seq_length, batch_size, src_c}, dt, tag);
::dnnl::memory::desc dst_desc({seq_length, batch_size, dst_c}, dt, tag);
::dnnl::memory::desc iter_desc(
{layer_size, direction_num, batch_size,
projection_size ? projection_size : hidden_size},
dt, ::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc iter_c_desc(
{layer_size, direction_num, batch_size, hidden_size}, dt,
::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc workspace_desc;
::dnnl::memory::desc scratchpad_desc;
::dnnl::primitive_attr attr;
attr.set_scratchpad_mode(::dnnl::scratchpad_mode::user);
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
auto pd = create_primitive_desc<::dnnl::vanilla_rnn_forward>(
kind,
mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu
: ::dnnl::algorithm::eltwise_tanh,
direction, src_desc, iter_desc, weight_layer_desc, weight_iter_desc,
bias_desc, dst_desc, iter_desc, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
if (workspace_size && scratchpad_size) {
*workspace_size += workspace_desc.get_size() * iter_num;
*scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size
? scratchpad_desc.get_size()
: *scratchpad_size;
} else {
auto r = create_primitive_with_pd<::dnnl::vanilla_rnn_forward>(pd);
key = r.first;
p = r.second;
}
} else if (mode == rnn_mode::gru) {
auto pd = create_primitive_desc<::dnnl::gru_forward>(
kind, direction, src_desc, iter_desc, weight_layer_desc,
weight_iter_desc, bias_desc, dst_desc, iter_desc, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
if (workspace_size && scratchpad_size) {
*workspace_size += workspace_desc.get_size() * iter_num;
*scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size
? scratchpad_desc.get_size()
: *scratchpad_size;
} else {
auto r = create_primitive_with_pd<::dnnl::gru_forward>(pd);
key = r.first;
p = r.second;
}
} else if (mode == rnn_mode::lstm) {
auto pd = create_primitive_desc<::dnnl::lstm_forward>(
kind, direction, src_desc, iter_desc, iter_c_desc, weight_layer_desc,
weight_iter_desc, ::dnnl::memory::desc(), projection_desc, bias_desc,
dst_desc, iter_desc, iter_c_desc, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
if (workspace_size && scratchpad_size) {
*workspace_size += workspace_desc.get_size() * iter_num;
*scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size
? scratchpad_desc.get_size()
: *scratchpad_size;
} else {
auto r = create_primitive_with_pd<::dnnl::lstm_forward>(pd);
key = r.first;
p = r.second;
}
}
for (int i = 0; i < iter_num; i++) {
void *in_cache = data[0], *out_cache = data[1], *dst_iter_c_cache = nullptr,
*dst_iter_cache = ((uint8_t *)(data[3]) + offset[1]);
if (mode == rnn_mode::lstm) {
dst_iter_c_cache = (uint8_t *)(data[4]) + offset[2];
}
if (!workspace_size) {
execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC_LAYER, {::dnnl::memory(src_desc, _eng, data[0])}},
{DNNL_ARG_DST_LAYER, {::dnnl::memory(dst_desc, _eng, data[1])}},
{DNNL_ARG_SCRATCHPAD, {::dnnl::memory(scratchpad_desc, _eng, data[8])}}};
auto insert_args = [&](int arg_name, ::dnnl::memory::desc &d, void *data,
int &offset) {
execution_args->insert(
{arg_name, {::dnnl::memory(d, _eng, (uint8_t *)data + offset)}});
offset += d.get_size();
};
insert_args(DNNL_ARG_SRC_ITER, iter_desc, data[2], offset[0]);
insert_args(DNNL_ARG_DST_ITER, iter_desc, data[3], offset[1]);
if (mode == rnn_mode::lstm) {
insert_args(DNNL_ARG_SRC_ITER_C, iter_c_desc, data[4], offset[2]);
insert_args(DNNL_ARG_DST_ITER_C, iter_c_desc, data[5], offset[3]);
}
insert_args(DNNL_ARG_WEIGHTS_LAYER, weight_layer_desc, data[6],
offset[4]);
insert_args(DNNL_ARG_WEIGHTS_ITER, weight_iter_desc, data[6], offset[4]);
if (projection_size) {
insert_args(DNNL_ARG_WEIGHTS_PROJECTION, projection_desc, data[6],
offset[4]);
}
if (bias_mode == rnn_bias_mode::none) {
_q->memset((uint8_t *)(data[6]) + offset[4], 0, bias_desc.get_size());
}
insert_args(DNNL_ARG_BIAS, bias_desc, data[6], offset[4]);
if (kind == ::dnnl::prop_kind::forward_training) {
insert_args(DNNL_ARG_WORKSPACE, workspace_desc, data[7], offset[5]);
}
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
execute_primitive<::dnnl::vanilla_rnn_forward>(
{key, static_cast<::dnnl::vanilla_rnn_forward *>(p)},
execution_args);
} else if (mode == rnn_mode::gru) {
execute_primitive<::dnnl::gru_forward>(
{key, static_cast<::dnnl::gru_forward *>(p)}, execution_args);
} else if (mode == rnn_mode::lstm) {
execute_primitive<::dnnl::lstm_forward>(
{key, static_cast<::dnnl::lstm_forward *>(p)}, execution_args);
}
if (i != iter_num - 1) {
std::swap(data[0], data[1]);
}
}
if (kind == ::dnnl::prop_kind::forward_training) {
if (workspace_size) {
*workspace_size +=
(src_desc.get_size() + dst_desc.get_size() + iter_desc.get_size());
if (mode == rnn_mode::lstm) {
*workspace_size += iter_c_desc.get_size();
}
} else {
_q->memcpy((uint8_t *)(data[7]) + offset[5], in_cache,
src_desc.get_size());
offset[5] += src_desc.get_size();
_q->memcpy((uint8_t *)(data[7]) + offset[5], out_cache,
dst_desc.get_size());
offset[5] += dst_desc.get_size();
_q->memcpy((uint8_t *)(data[7]) + offset[5], dst_iter_cache,
iter_desc.get_size());
offset[5] += iter_desc.get_size();
if (mode == rnn_mode::lstm) {
_q->memcpy((uint8_t *)(data[7]) + offset[5], dst_iter_c_cache,
iter_c_desc.get_size());
offset[5] += iter_c_desc.get_size();
}
}
}
}
return e;
}
inline
sycl::event engine_ext::execute_rnn_backward_primitive(
rnn_mode mode, ::dnnl::rnn_direction direction, rnn_bias_mode bias_mode,
::dnnl::memory::data_type dt, ::dnnl::memory::format_tag tag,
int seq_length, int batch_size, int src_c, int dst_c, int layer_size,
int direction_num, int hidden_size, int gate_num, int projection_size,
std::vector<void *> &data, std::vector<int> &offset, int iter_num) {
sycl::event e;
::dnnl::primitive *p = nullptr;
detail::primitive_cache_key_type key;
::dnnl::prop_kind fkind = ::dnnl::prop_kind::forward_training;
::dnnl::prop_kind bkind = ::dnnl::prop_kind::backward;
::dnnl::memory::desc bias_desc(
{layer_size, direction_num, gate_num, hidden_size}, dt,
::dnnl::memory::format_tag::ldgo);
::dnnl::memory::desc weight_layer_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc weight_iter_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc diff_weight_layer_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldgoi);
::dnnl::memory::desc diff_weight_iter_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldgoi);
::dnnl::memory::desc projection_desc, diff_projection_desc;
if (projection_size) {
projection_desc = ::dnnl::memory::desc(
{layer_size, direction_num, hidden_size, projection_size}, dt,
::dnnl::memory::format_tag::ldio);
diff_projection_desc = ::dnnl::memory::desc(
{layer_size, direction_num, hidden_size, projection_size}, dt,
::dnnl::memory::format_tag::ldoi);
}
::dnnl::memory::desc src_desc({seq_length, batch_size, src_c}, dt, tag);
::dnnl::memory::desc dst_desc({seq_length, batch_size, dst_c}, dt, tag);
::dnnl::memory::desc iter_desc(
{layer_size, direction_num, batch_size,
projection_size ? projection_size : hidden_size},
dt, ::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc iter_c_desc(
{layer_size, direction_num, batch_size, hidden_size}, dt,
::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc workspace_desc;
::dnnl::memory::desc scratchpad_desc;
::dnnl::primitive_attr attr;
attr.set_scratchpad_mode(::dnnl::scratchpad_mode::user);
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
auto fpd = create_primitive_desc<::dnnl::vanilla_rnn_forward>(
fkind,
mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu
: ::dnnl::algorithm::eltwise_tanh,
direction, src_desc, iter_desc, weight_layer_desc, weight_iter_desc,
bias_desc, dst_desc, iter_desc, attr);
auto pd = create_primitive_desc<::dnnl::vanilla_rnn_backward>(
bkind,
mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu
: ::dnnl::algorithm::eltwise_tanh,
direction, src_desc, iter_desc, diff_weight_layer_desc,
diff_weight_iter_desc, bias_desc, dst_desc, iter_desc, src_desc,
iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc,
iter_desc, fpd, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
auto r = create_primitive_with_pd<::dnnl::vanilla_rnn_backward>(pd);
key = r.first;
p = r.second;
} else if (mode == rnn_mode::gru) {
auto fpd = create_primitive_desc<::dnnl::gru_forward>(
fkind, direction, src_desc, iter_desc, weight_layer_desc,
weight_iter_desc, bias_desc, dst_desc, iter_desc, attr);
auto pd = create_primitive_desc<::dnnl::gru_backward>(
bkind, direction, src_desc, iter_desc, diff_weight_layer_desc,
diff_weight_iter_desc, bias_desc, dst_desc, iter_desc, src_desc,
iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc,
iter_desc, fpd, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
auto r = create_primitive_with_pd<::dnnl::gru_backward>(pd);
key = r.first;
p = r.second;
} else if (mode == rnn_mode::lstm) {
auto fpd = create_primitive_desc<::dnnl::lstm_forward>(
fkind, direction, src_desc, iter_desc, iter_c_desc, weight_layer_desc,
weight_iter_desc, ::dnnl::memory::desc(), projection_desc, bias_desc,
dst_desc, iter_desc, iter_c_desc, attr);
auto pd = create_primitive_desc<::dnnl::lstm_backward>(
bkind, direction, src_desc, iter_desc, iter_c_desc,
diff_weight_layer_desc, diff_weight_iter_desc, ::dnnl::memory::desc(),
diff_projection_desc, bias_desc, dst_desc, iter_desc, iter_c_desc,
src_desc, iter_desc, iter_c_desc, weight_layer_desc, weight_iter_desc,
::dnnl::memory::desc(), projection_desc, bias_desc, dst_desc, iter_desc,
iter_c_desc, fpd, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
auto r = create_primitive_with_pd<::dnnl::lstm_backward>(pd);
key = r.first;
p = r.second;
}
for (int i = 0; i < iter_num; i++) {
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DIFF_SRC_LAYER, {::dnnl::memory(src_desc, _eng, data[8])}},
{DNNL_ARG_DIFF_DST_LAYER, {::dnnl::memory(dst_desc, _eng, data[9])}},
{DNNL_ARG_SCRATCHPAD, {::dnnl::memory(scratchpad_desc, _eng, data[15])}}};
auto insert_args = [&](int arg_name, ::dnnl::memory::desc &d, void *data,
int &offset) {
offset += d.get_size();
execution_args->insert(
{arg_name, {::dnnl::memory(d, _eng, (uint8_t *)data - offset)}});
};
if (mode == rnn_mode::lstm) {
insert_args(DNNL_ARG_DST_ITER_C, iter_c_desc, data[7], offset[0]);
insert_args(DNNL_ARG_SRC_ITER_C, iter_c_desc, data[4], offset[2]);
}
insert_args(DNNL_ARG_DST_ITER, iter_desc, data[7], offset[0]);
insert_args(DNNL_ARG_DST_LAYER, dst_desc, data[7], offset[0]);
insert_args(DNNL_ARG_SRC_LAYER, src_desc, data[7], offset[0]);
insert_args(DNNL_ARG_WORKSPACE, workspace_desc, data[7], offset[0]);
insert_args(DNNL_ARG_SRC_ITER, iter_desc, data[2], offset[1]);
insert_args(DNNL_ARG_BIAS, bias_desc, data[6], offset[3]);
if (projection_size) {
insert_args(DNNL_ARG_WEIGHTS_PROJECTION, diff_projection_desc, data[6],
offset[3]);
}
insert_args(DNNL_ARG_WEIGHTS_ITER, diff_weight_iter_desc, data[6],
offset[3]);
insert_args(DNNL_ARG_WEIGHTS_LAYER, diff_weight_layer_desc, data[6],
offset[3]);
insert_args(DNNL_ARG_DIFF_SRC_ITER, iter_desc, data[10], offset[4]);
insert_args(DNNL_ARG_DIFF_DST_ITER, iter_desc, data[11], offset[5]);
if (mode == rnn_mode::lstm) {
insert_args(DNNL_ARG_DIFF_SRC_ITER_C, iter_c_desc, data[12], offset[6]);
insert_args(DNNL_ARG_DIFF_DST_ITER_C, iter_c_desc, data[13], offset[7]);
}
insert_args(DNNL_ARG_DIFF_BIAS, bias_desc, data[14], offset[8]);
if (bias_mode == rnn_bias_mode::none) {
_q->memset((uint8_t *)(data[14]) - offset[8], 0, bias_desc.get_size());
}
if (projection_size) {
insert_args(DNNL_ARG_DIFF_WEIGHTS_PROJECTION, projection_desc, data[14],
offset[8]);
}
insert_args(DNNL_ARG_DIFF_WEIGHTS_ITER, weight_iter_desc, data[14],
offset[8]);
insert_args(DNNL_ARG_DIFF_WEIGHTS_LAYER, weight_layer_desc, data[14],
offset[8]);
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
e = execute_primitive<::dnnl::vanilla_rnn_backward>(
{key, static_cast<::dnnl::vanilla_rnn_backward *>(p)},
execution_args);
} else if (mode == rnn_mode::gru) {
e = execute_primitive<::dnnl::gru_backward>(
{key, static_cast<::dnnl::gru_backward *>(p)}, execution_args);
} else if (mode == rnn_mode::lstm) {
e = execute_primitive<::dnnl::lstm_backward>(
{key, static_cast<::dnnl::lstm_backward *>(p)}, execution_args);
}
if (i != iter_num - 1) {
std::swap(data[8], data[9]);
}
}
return e;
}
#define GENERATE_RNN_PRIMITIVE_KEY(name) \
template <> \
inline std::string \
engine_ext::generate_cache_key<::dnnl::name::primitive_desc>( \
const ::dnnl::name::primitive_desc &pd) { \
std::stringstream ss; \
ss << (std::uint8_t)pd.get_kind() << (std::uint8_t)pd.get_prop_kind() \
<< (std::uint8_t)pd.get_cell_kind() << (std::uint8_t)pd.get_direction() \
<< (std::uint8_t)pd.get_algorithm(); \
serialize_mem_desc(ss, pd.src_layer_desc()); \
serialize_mem_desc(ss, pd.src_iter_desc()); \
serialize_mem_desc(ss, pd.dst_layer_desc()); \
serialize_mem_desc(ss, pd.dst_iter_desc()); \
serialize_mem_desc(ss, pd.diff_src_layer_desc()); \
serialize_mem_desc(ss, pd.diff_src_iter_desc()); \
serialize_mem_desc(ss, pd.diff_dst_layer_desc()); \
serialize_mem_desc(ss, pd.diff_dst_iter_desc()); \
serialize_mem_desc(ss, pd.src_iter_c_desc()); \
serialize_mem_desc(ss, pd.dst_iter_c_desc()); \
serialize_mem_desc(ss, pd.diff_src_iter_c_desc()); \
serialize_mem_desc(ss, pd.diff_dst_iter_c_desc()); \
return ss.str(); \
}
#define GENERATE_CONVOLUTION_PRIMITIVE_KEY(name, query_type) \
template <> \
inline std::string \
engine_ext::generate_cache_key<::dnnl::name::primitive_desc>( \
const ::dnnl::name::primitive_desc &pd) { \
std::stringstream ss; \
ss << (std::uint8_t)pd.get_kind() << (std::uint8_t)pd.get_prop_kind() \
<< (std::uint8_t)pd.get_algorithm() \
<< (std::uint8_t)pd.get_primitive_attr().get_fpmath_mode() \
<< (std::uint8_t)pd.get_group_size(); \
serialize_dims(ss, pd.get_strides()); \
serialize_dims(ss, pd.get_dilations()); \
serialize_dims(ss, pd.get_padding_l()); \
serialize_mem_desc(ss, pd.src_desc()); \
serialize_mem_desc(ss, pd.diff_src_desc()); \
serialize_mem_desc(ss, pd.dst_desc()); \
serialize_mem_desc(ss, pd.diff_dst_desc()); \
serialize_mem_desc(ss, pd.query_type()); \
serialize_mem_desc(ss, pd.weights_desc()); \
serialize_mem_desc(ss, pd.diff_weights_desc()); \
return ss.str(); \
}
GENERATE_RNN_PRIMITIVE_KEY(vanilla_rnn_forward)
GENERATE_RNN_PRIMITIVE_KEY(vanilla_rnn_backward)
GENERATE_RNN_PRIMITIVE_KEY(lstm_forward)
GENERATE_RNN_PRIMITIVE_KEY(lstm_backward)
GENERATE_RNN_PRIMITIVE_KEY(gru_forward)
GENERATE_RNN_PRIMITIVE_KEY(gru_backward)
GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_forward, bias_desc)
GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_backward_data, scratchpad_desc)
GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_backward_weights, diff_bias_desc)
template <typename primitive_desc_type>
std::string engine_ext::generate_cache_key(const primitive_desc_type &pd) {
std::stringstream ss;
auto kind = pd.get_kind();
ss << (std::uint8_t)kind << (std::uint8_t)pd.get_prop_kind()
<< (std::uint8_t)pd.get_algorithm();
serialize_mem_desc(ss, pd.src_desc());
serialize_mem_desc(ss, pd.diff_src_desc());
serialize_mem_desc(ss, pd.dst_desc());
serialize_mem_desc(ss, pd.diff_dst_desc());
switch (kind) {
case ::dnnl::primitive::kind::batch_normalization:
ss << pd.get_epsilon() << (std::uint8_t)pd.get_flags();
case ::dnnl::primitive::kind::reduction:
ss << pd.get_p();
break;
case ::dnnl::primitive::kind::eltwise:
ss << pd.get_alpha() << pd.get_beta();
case ::dnnl::primitive::kind::lrn:
ss << pd.get_k();
break;
case ::dnnl::primitive::kind::pooling:
serialize_dims(ss, pd.get_strides());
serialize_dims(ss, pd.get_dilations());
serialize_dims(ss, pd.get_padding_l());
serialize_dims(ss, pd.get_kernel());
break;
case ::dnnl::primitive::kind::softmax:
ss << pd.get_axis();
break;
default:
break;
}
return ss.str();
}
template <typename primitive_type, typename... args_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
engine_ext::create_primitive(args_type &&...args) {
auto pd =
create_primitive_desc<primitive_type>(std::forward<args_type>(args)...);
return create_primitive_with_pd<primitive_type>(pd);
}
template <typename primitive_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
engine_ext::create_primitive_with_pd(
const typename primitive_type::primitive_desc &pd) {
detail::primitive_cache_key_type key = generate_cache_key(pd);
primitive_type *p = (primitive_type *)_primitive_cache.get(key);
if (!p) {
p = new primitive_type(pd);
}
return {key, p};
}
template <typename primitive_type, typename... args_type>
typename primitive_type::primitive_desc
engine_ext::create_primitive_desc(args_type &&...args) {
return typename primitive_type::primitive_desc(
_eng, std::forward<args_type>(args)...);
}
inline
void engine_ext::fill(const memory_desc_ext &src_desc, void *src,
const void *valuePtr) {
async_fill(src_desc, src, valuePtr).wait();
}
inline
void engine_ext::reorder(float alpha, const memory_desc_ext &src_desc,
void *src, float beta, const memory_desc_ext &dst_desc,
void *dst) {
async_reorder(alpha, src_desc, src, beta, dst_desc, dst).wait();
}
inline
void engine_ext::scale(float alpha, const memory_desc_ext &src_desc,
void *src) {
async_scale(alpha, src_desc, src).wait();
}
inline
void engine_ext::sum(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst) {
async_sum(alpha, src_desc, src, beta, dst_desc, dst).wait();
}
inline
void engine_ext::activation_forward(activation_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst) {
async_activation_forward(desc, alpha, src_desc, src, beta, dst_desc, dst)
.wait();
}
inline
void engine_ext::activation_backward(
activation_desc &desc, float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
async_activation_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst,
src_desc, src, beta, diff_src_desc, diff_src)
.wait();
}
inline
void engine_ext::pooling_forward(pooling_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst,
::dnnl::memory *workspace) {
async_pooling_forward(desc, alpha, src_desc, src, beta, dst_desc, dst,
workspace).wait();
}
inline
void engine_ext::pooling_backward(
pooling_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace) {
async_pooling_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst,
src_desc, src, beta, diff_src_desc, diff_src,
workspace)
.wait();
}
inline
void engine_ext::softmax_forward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc, void *dst) {
async_softmax_forward(alg, mode, alpha, src_desc, src, beta, dst_desc, dst)
.wait();
}
inline
void engine_ext::softmax_backward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &dst_desc,
void *dst,
const memory_desc_ext &diff_dst_desc,
void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src) {
async_softmax_backward(alg, mode, alpha, dst_desc, dst, diff_dst_desc,
diff_dst, beta, diff_src_desc, diff_src)
.wait();
}
inline
void engine_ext::lrn_forward(lrn_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace) {
async_lrn_forward(desc, alpha, src_desc, src, beta, dst_desc, dst, workspace)
.wait();
}
inline
void engine_ext::lrn_backward(lrn_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc,
void *diff_dst, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src,
::dnnl::memory *workspace) {
async_lrn_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst,
src_desc, src, beta, diff_src_desc, diff_src, workspace)
.wait();
}
inline
sycl::event engine_ext::async_fill(const memory_desc_ext &src_desc, void *src,
const void *valuePtr) {
::dnnl::memory::data_type dt = src_desc.get_desc().get_data_type();
unsigned mem_size = src_desc.get_size();
switch (dt) {
case ::dnnl::memory::data_type::f32:
return fill_with_type<float>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::f16:
return fill_with_type<sycl::half>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::s32:
return fill_with_type<int32_t>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::s8:
return fill_with_type<int8_t>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::u8:
return fill_with_type<uint8_t>(_q, src, valuePtr, mem_size);
default:
throw std::runtime_error("async_fill: unsupported data type.");
}
}
inline
sycl::event engine_ext::async_reorder(float alpha, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc, void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto pd = ::dnnl::reorder::primitive_desc(_eng, src_desc.get_desc(), _eng,
dst_desc.get_desc());
auto primitive = create_primitive_with_pd<::dnnl::reorder>(pd);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline
sycl::event engine_ext::async_scale(float alpha, const memory_desc_ext &src_desc,
void *src) {
if (alpha == 1.f) {
return sycl::event();
}
void *src_cache = allocate(src_desc);
_q->memcpy(src_cache, src, src_desc.get_size());
auto primitive = create_primitive<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward_inference, ::dnnl::algorithm::eltwise_linear,
src_desc.get_desc(), src_desc.get_desc(), alpha, 0.f);
auto args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, ::dnnl::memory(src_desc.get_desc(), _eng, src)},
{DNNL_ARG_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src_cache)}};
return execute_primitive(primitive, args, {}, {src_cache});
}
inline sycl::event
engine_ext::async_sum(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst) {
if (alpha == 0.f && beta == 1.f) {
return sycl::event();
}
void *dst_cache = allocate(dst_desc);
_q->memcpy(dst_cache, dst, dst_desc.get_size());
auto pd = create_primitive_desc<::dnnl::sum>(
std::vector<float>{alpha, beta},
std::vector<::dnnl::memory::desc>{src_desc.get_desc(),
dst_desc.get_desc()});
std::stringstream ss;
ss << (std::uint8_t)pd.get_kind() << alpha << beta;
serialize_mem_desc(ss, pd.src_desc(0));
serialize_mem_desc(ss, pd.src_desc(1));
detail::primitive_cache_key_type key = ss.str();
::dnnl::sum *p = (::dnnl::sum *)_primitive_cache.get(key);
if (!p) {
p = new ::dnnl::sum(pd);
}
auto args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, ::dnnl::memory(dst_desc.get_desc(), _eng, dst)},
{DNNL_ARG_MULTIPLE_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src)},
{DNNL_ARG_MULTIPLE_SRC + 1,
::dnnl::memory(dst_desc.get_desc(), _eng, dst_cache)}};
return execute_primitive<::dnnl::sum>({key, p}, args, {}, {dst_cache});
}
inline
sycl::event engine_ext::async_binary(binary_op op, float alpha_0,
const memory_desc_ext &src_desc_0, void *src_0,
float alpha_1, const memory_desc_ext &src_desc_1,
void *src_1, float beta,
const memory_desc_ext &dst_desc, void *dst) {
::dnnl::algorithm onednn_algorithm;
switch (op) {
case binary_op::max:
onednn_algorithm = ::dnnl::algorithm::binary_max;
break;
case binary_op::min:
onednn_algorithm = ::dnnl::algorithm::binary_min;
break;
case binary_op::add:
onednn_algorithm = ::dnnl::algorithm::binary_add;
break;
case binary_op::sub:
onednn_algorithm = ::dnnl::algorithm::binary_sub;
break;
case binary_op::mul:
onednn_algorithm = ::dnnl::algorithm::binary_mul;
break;
case binary_op::div:
onednn_algorithm = ::dnnl::algorithm::binary_div;
break;
case binary_op::sqrt:
onednn_algorithm = ::dnnl::algorithm::eltwise_sqrt;
break;
case binary_op::neg:
onednn_algorithm = ::dnnl::algorithm::eltwise_linear;
break;
}
if (onednn_algorithm == ::dnnl::algorithm::eltwise_sqrt ||
onednn_algorithm == ::dnnl::algorithm::eltwise_linear) {
void *src_cache = nullptr, *dst_cache = nullptr;
src_cache = allocate(src_desc_0);
dst_cache = allocate(dst_desc);
_q->memcpy(src_cache, src_0, src_desc_0.get_size());
_q->memcpy(dst_cache, dst, dst_desc.get_size());
async_scale(alpha_0, src_desc_0, src_cache);
async_scale(beta, dst_desc, dst_cache);
// Let the output = 1 - input to simulate the behavior of neg.
auto primitive = create_primitive<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward_inference, onednn_algorithm,
src_desc_0.get_desc(), dst_desc.get_desc(), -1.f, 1.f);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC,
{::dnnl::memory(src_desc_0.get_desc(), _eng, src_cache)}}};
execute_primitive(
primitive, execution_args, {{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}});
auto e = async_sum(1.f, dst_desc, dst_cache, 1.f, dst_desc, dst);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(src_cache, *_q);
sycl::free(dst_cache, *_q);
});
});
return e;
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{};
void *src_0_cache = nullptr, *src_1_cache = nullptr, *dst_cache = nullptr;
src_0_cache = allocate(src_desc_0);
src_1_cache = allocate(src_desc_1);
dst_cache = allocate(dst_desc);
_q->memcpy(src_0_cache, src_0, src_desc_0.get_size());
_q->memcpy(src_1_cache, src_1, src_desc_1.get_size());
_q->memcpy(dst_cache, dst, dst_desc.get_size());
async_scale(alpha_0, src_desc_0, src_0_cache);
async_scale(alpha_1, src_desc_1, src_1_cache);
async_scale(beta, dst_desc, dst_cache);
execution_args->insert({DNNL_ARG_SRC_0, ::dnnl::memory(src_desc_0.get_desc(),
_eng, src_0_cache)});
execution_args->insert({DNNL_ARG_SRC_1, ::dnnl::memory(src_desc_1.get_desc(),
_eng, src_1_cache)});
auto primitive = create_primitive<::dnnl::binary>(
onednn_algorithm, src_desc_0.get_desc(), src_desc_1.get_desc(),
dst_desc.get_desc());
auto e = execute_primitive(primitive, execution_args,
{{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}});
async_sum(1.f, dst_desc, dst_cache, 1.f, dst_desc, dst);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(dst_cache, *_q);
sycl::free(src_0_cache, *_q);
sycl::free(src_1_cache, *_q);
});
});
return e;
}
inline
sycl::event engine_ext::async_reduction(reduction_op op, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst) {
if (alpha == 0.f && beta == 1.f) {
return sycl::event();
}
float p = 2.f;
::dnnl::algorithm onednn_algorithm;
void *cache = nullptr;
switch (op) {
case reduction_op::amax:
cache = allocate(src_desc);
activation_desc adesc;
adesc.set_algorithm(::dnnl::algorithm::eltwise_abs);
async_activation_forward(adesc, 1.f, src_desc, src, 0.f, src_desc, cache);
onednn_algorithm = ::dnnl::algorithm::reduction_max;
src = cache;
break;
case reduction_op::max:
onednn_algorithm = ::dnnl::algorithm::reduction_max;
break;
case reduction_op::min:
onednn_algorithm = ::dnnl::algorithm::reduction_min;
break;
case reduction_op::sum:
onednn_algorithm = ::dnnl::algorithm::reduction_sum;
break;
case reduction_op::mean:
onednn_algorithm = ::dnnl::algorithm::reduction_mean;
break;
case reduction_op::mul:
onednn_algorithm = ::dnnl::algorithm::reduction_mul;
break;
case reduction_op::mul_no_zeros:
cache = allocate(src_desc);
transform_no_zero(src_desc, src, cache);
onednn_algorithm = ::dnnl::algorithm::reduction_mul;
src = cache;
break;
case reduction_op::norm1:
p = 1.f;
onednn_algorithm = ::dnnl::algorithm::reduction_norm_lp_power_p_sum;
break;
case reduction_op::norm2:
onednn_algorithm = ::dnnl::algorithm::reduction_norm_lp_sum;
break;
}
auto primitive = create_primitive<::dnnl::reduction>(
onednn_algorithm, src_desc.get_desc(), dst_desc.get_desc(), p, 0.f);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src)}};
if (cache) {
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}},
{cache});
}
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline
sycl::event engine_ext::async_activation_forward(activation_desc &desc, float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto primitive = create_primitive<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward, desc.get_algorithm(), src_desc.get_desc(),
dst_desc.get_desc(), desc.get_alpha(), desc.get_beta());
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline
sycl::event engine_ext::async_activation_backward(
activation_desc &desc, float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
::dnnl::memory::desc data_desc = dst_desc.get_desc();
auto alg = desc.get_algorithm();
if ((alg == ::dnnl::algorithm::eltwise_clip) ||
(alg == ::dnnl::algorithm::eltwise_linear) ||
(alg == ::dnnl::algorithm::eltwise_swish)) {
data_desc = src_desc.get_desc();
}
auto primitive = create_primitive<::dnnl::eltwise_backward>(
alg, diff_src_desc.get_desc(), diff_dst_desc.get_desc(), data_desc,
desc.get_alpha(), desc.get_beta(),
create_primitive_desc<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward, alg, src_desc.get_desc(),
dst_desc.get_desc(), desc.get_alpha(), desc.get_beta()));
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}},
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline
sycl::event engine_ext::async_pooling_forward(pooling_desc &desc, float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
int pooling_dim = desc.get_stride().size();
std::vector<int64_t> dilation(pooling_dim, 0);
auto primitive_desc =
create_primitive_desc<::dnnl::pooling_forward>(
::dnnl::prop_kind::forward_training, desc.get_algorithm(),
src_desc.get_desc(), dst_desc.get_desc(), desc.get_stride(),
desc.get_kernel(), dilation, desc.get_padding(), desc.get_padding());
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
::dnnl::memory ws_mem(primitive_desc.workspace_desc(), _eng);
execution_args->insert({DNNL_ARG_WORKSPACE, ws_mem});
if (workspace) {
*workspace = ws_mem;
} else {
insert_workspace(src, ws_mem);
}
auto primitive =
create_primitive_with_pd<::dnnl::pooling_forward>(primitive_desc);
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline
sycl::event engine_ext::async_pooling_backward(
pooling_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
int pooling_dim = desc.get_stride().size();
std::vector<int64_t> dilation(pooling_dim, 0);
auto primitive = create_primitive<::dnnl::pooling_backward>(
desc.get_algorithm(), diff_src_desc.get_desc(), diff_dst_desc.get_desc(),
desc.get_stride(), desc.get_kernel(), dilation, desc.get_padding(),
desc.get_padding(),
create_primitive_desc<::dnnl::pooling_forward>(
::dnnl::prop_kind::forward_training, desc.get_algorithm(),
src_desc.get_desc(), dst_desc.get_desc(), desc.get_stride(),
desc.get_kernel(), dilation, desc.get_padding(), desc.get_padding()));
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}},
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
if (workspace) {
execution_args->insert({DNNL_ARG_WORKSPACE, *workspace});
} else {
execution_args->insert({DNNL_ARG_WORKSPACE, get_workspace(src)});
}
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline
sycl::event engine_ext::async_softmax_forward(softmax_algorithm alg,
softmax_mode mode, float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
::dnnl::memory::desc help_src_desc = src_desc.get_desc();
::dnnl::memory::desc help_dst_desc = dst_desc.get_desc();
if (mode == softmax_mode::instance) {
help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc);
help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(help_src_desc, _eng, src)}}};
::dnnl::algorithm softmax_alg = ::dnnl::algorithm::softmax_accurate;
if (alg == softmax_algorithm::log) {
softmax_alg = ::dnnl::algorithm::softmax_log;
}
auto primitive = create_primitive<::dnnl::softmax_forward>(
::dnnl::prop_kind::forward, softmax_alg, help_src_desc,
help_dst_desc, 1);
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, memory_desc_ext(help_dst_desc), dst}});
}
inline
sycl::event engine_ext::async_softmax_backward(
softmax_algorithm alg, softmax_mode mode, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
::dnnl::memory::desc help_diff_src_desc = diff_src_desc.get_desc();
::dnnl::memory::desc help_dst_desc = dst_desc.get_desc();
::dnnl::memory::desc help_diff_dst_desc = diff_dst_desc.get_desc();
if (mode == softmax_mode::instance) {
help_diff_src_desc =
compress_spatial_dimensions_to_channel(help_diff_src_desc);
help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc);
help_diff_dst_desc =
compress_spatial_dimensions_to_channel(help_diff_dst_desc);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(help_dst_desc, _eng, dst)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(help_diff_dst_desc, _eng, diff_dst)}}};
::dnnl::algorithm softmax_alg = ::dnnl::algorithm::softmax_accurate;
if (alg == softmax_algorithm::log) {
softmax_alg = ::dnnl::algorithm::softmax_log;
}
auto primitive = create_primitive<::dnnl::softmax_backward>(
softmax_alg, help_diff_src_desc, help_diff_dst_desc, help_dst_desc, 1,
create_primitive_desc<::dnnl::softmax_forward>(
::dnnl::prop_kind::forward, softmax_alg, help_diff_src_desc,
help_dst_desc, 1));
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC,
memory_desc_ext(help_diff_src_desc), diff_src}});
}
inline
sycl::event engine_ext::async_lrn_forward(lrn_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto primitive_desc = create_primitive_desc<::dnnl::lrn_forward>(
::dnnl::prop_kind::forward_training,
::dnnl::algorithm::lrn_across_channels, src_desc.get_desc(),
dst_desc.get_desc(), desc.get_local_size(), desc.get_alpha(),
desc.get_beta(), desc.get_k());
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
::dnnl::memory ws_mem(primitive_desc.workspace_desc(), _eng);
execution_args->insert({DNNL_ARG_WORKSPACE, ws_mem});
if (workspace) {
*workspace = ws_mem;
} else {
insert_workspace(src, ws_mem);
}
auto primitive =
create_primitive_with_pd<::dnnl::lrn_forward>(primitive_desc);
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline
sycl::event
engine_ext::async_lrn_backward(lrn_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
auto primitive = create_primitive<::dnnl::lrn_backward>(
::dnnl::algorithm::lrn_across_channels, diff_src_desc.get_desc(),
diff_dst_desc.get_desc(), src_desc.get_desc(), desc.get_local_size(),
desc.get_alpha(), desc.get_beta(), desc.get_k(),
create_primitive_desc<::dnnl::lrn_forward>(
::dnnl::prop_kind::forward_training,
::dnnl::algorithm::lrn_across_channels, src_desc.get_desc(),
dst_desc.get_desc(), desc.get_local_size(), desc.get_alpha(),
desc.get_beta(), desc.get_k()));
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}},
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
if (workspace) {
execution_args->insert({DNNL_ARG_WORKSPACE, *workspace});
} else {
execution_args->insert({DNNL_ARG_WORKSPACE, get_workspace(src)});
}
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline
size_t engine_ext::get_batch_normalization_workspace_size(
batch_normalization_ops ops, const memory_desc_ext &src_desc) {
if(ops == batch_normalization_ops::none) {
return 0;
}
return src_desc.get_size();
}
inline
sycl::event engine_ext::async_batch_normalization_forward_inference(
batch_normalization_mode mode, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *mean, void *var) {
return batch_normalization_forward_internal(
true, mode, epsilon, 0.f, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_mean_var_desc, scale, bias, scale_bias_mean_var_desc, mean,
var, nullptr, nullptr);
}
inline
sycl::event engine_ext::async_batch_normalization_forward_inference(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *mean, void *var) {
bool has_post_op = (ops != batch_normalization_ops::none);
sycl::event e;
std::vector<void *> caches;
if (has_post_op) {
void *dst_cache = allocate(dst_desc);
caches.push_back(dst_cache);
batch_normalization_forward_internal(
true, mode, epsilon, 0.f, 1.f, src_desc, src, 0.f, dst_desc, dst_cache,
scale_bias_desc, scale, bias, mean_var_desc, mean, var, nullptr,
nullptr);
if (ops == batch_normalization_ops::add_activation) {
async_sum(1.f, summand_desc, summand, 1.f, dst_desc, dst_cache);
}
async_activation_forward(adesc, 1.f, dst_desc, dst_cache, 0.f, dst_desc,
dst_cache);
e = async_sum(alpha, dst_desc, dst_cache, beta, dst_desc, dst);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
return batch_normalization_forward_internal(
true, mode, epsilon, 0.f, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_desc, scale, bias, mean_var_desc, mean, var, nullptr, nullptr);
}
inline
sycl::event engine_ext::async_batch_normalization_forward_training(
batch_normalization_mode mode, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var) {
return batch_normalization_forward_internal(
false, mode, epsilon, factor, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_mean_var_desc, scale, bias, scale_bias_mean_var_desc,
saved_mean, saved_var, running_mean, running_var);
}
inline
sycl::event engine_ext::async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *running_mean, void *running_var,
void *saved_mean, void *saved_var, size_t workspace_size,
void *workspace) {
bool has_post_op = (ops != batch_normalization_ops::none);
sycl::event e;
if (has_post_op) {
if(workspace_size < dst_desc.get_desc().get_size()) {
throw std::runtime_error("async_batch_normalization_forward_training_ex: "
"no sufficient workspace.");
}
batch_normalization_forward_internal(
false, mode, epsilon, factor, 1.f, src_desc, src, 0.f, dst_desc,
workspace, scale_bias_desc, scale, bias, mean_var_desc,
saved_mean, saved_var, running_mean, running_var);
if (ops == batch_normalization_ops::add_activation) {
async_sum(1.f, summand_desc, summand, 1.f, dst_desc,
workspace);
}
return async_activation_forward(adesc, alpha, dst_desc, workspace,
beta, dst_desc, dst);
}
return batch_normalization_forward_internal(
false, mode, epsilon, factor, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_desc, scale, bias, mean_var_desc, saved_mean, saved_var,
running_mean, running_var);
}
inline
sycl::event engine_ext::async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace) {
return async_batch_normalization_forward_training(
mode, ops, adesc, epsilon, factor, alpha, src_desc, src, beta, dst_desc,
dst, summand_desc, summand, scale_bias_mean_var_desc, scale, bias,
scale_bias_mean_var_desc, running_mean, running_var, saved_mean,
saved_var, workspace_size, workspace);
}
inline
sycl::event engine_ext::async_batch_normalization_backward(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_mean_var_desc, void *scale,
float beta_param, void *diff_scale, void *diff_bias, void *saved_mean,
void *saved_var) {
return batch_normalization_backward_internal(
mode, epsilon, alpha_data, src_desc, src, diff_dst_desc, diff_dst,
beta_data, diff_src_desc, diff_src, alpha_param,
diff_scale_bias_mean_var_desc, scale, nullptr, beta_param, diff_scale,
diff_bias, diff_scale_bias_mean_var_desc, saved_mean, saved_var);
}
inline
sycl::event engine_ext::async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_desc, void *scale,
void *bias, float beta_param, void *diff_scale, void *diff_bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace) {
std::vector<void *> caches;
::dnnl::memory::desc real_diff_dst_desc = diff_dst_desc.get_desc();
void *real_diff_dst = diff_dst;
if (ops != batch_normalization_ops::none &&
workspace_size < dst_desc.get_desc().get_size()) {
throw std::runtime_error("async_batch_normalization_backward_ex: "
"no sufficient workspace.");
}
if (ops == batch_normalization_ops::add_activation) {
void *diff_summand_cache = allocate(diff_summand_desc);
async_activation_backward(adesc, 1.f, dst_desc, dst, diff_dst_desc, diff_dst,
dst_desc, workspace, 0.f,
diff_summand_desc, diff_summand_cache);
caches.push_back(diff_summand_cache);
async_sum(alpha_data, diff_summand_desc, diff_summand_cache, beta_data,
diff_summand_desc, diff_summand);
real_diff_dst_desc = diff_summand_desc.get_desc();
real_diff_dst = diff_summand_cache;
} else if (ops == batch_normalization_ops::activation) {
void *diff_dst_cache = allocate(diff_dst_desc);
caches.push_back(diff_dst_cache);
async_activation_backward(adesc, 1.f, dst_desc, dst, diff_dst_desc,
diff_dst, dst_desc, workspace,
0.f, diff_dst_desc, diff_dst_cache);
real_diff_dst = diff_dst_cache;
}
sycl::event e = batch_normalization_backward_internal(
mode, epsilon, alpha_data, src_desc, src, real_diff_dst_desc,
real_diff_dst, beta_data, diff_src_desc, diff_src, alpha_param,
diff_scale_bias_desc, scale, bias, beta_param, diff_scale, diff_bias,
mean_var_desc, saved_mean, saved_var);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
inline
sycl::event engine_ext::async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_mean_var_desc,
void *scale, void *bias, float beta_param, void *diff_scale,
void *diff_bias, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace) {
return async_batch_normalization_backward(
mode, ops, adesc, epsilon, alpha_data, src_desc, src, dst_desc, dst,
diff_dst_desc, diff_dst, beta_data, diff_src_desc, diff_src,
diff_summand_desc, diff_summand, alpha_param,
diff_scale_bias_mean_var_desc, scale, bias, beta_param, diff_scale,
diff_bias, diff_scale_bias_mean_var_desc, saved_mean, saved_var,
workspace_size, workspace);
}
inline
sycl::event
engine_ext::async_convolution_forward(convolution_desc &desc, ::dnnl::algorithm alg,
float alpha, const memory_desc_ext &src_desc,
void *src, const memory_desc_ext &weight_desc,
void *weight, float beta,
const memory_desc_ext &dst_desc, void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto help_weight_desc =
get_group_weight_desc(desc.get_group_count(), weight_desc);
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto origin_src_md = src_desc.get_desc();
auto origin_dst_md = dst_desc.get_desc();
auto origin_weight_md = help_weight_desc;
auto src_md = transfer_memory_desc_to_format_tag_any(origin_src_md);
auto dst_md = transfer_memory_desc_to_format_tag_any(origin_dst_md);
auto weight_md = transfer_memory_desc_to_format_tag_any(origin_weight_md);
auto primitive = create_primitive<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training, alg, src_md, weight_md, dst_md,
desc.get_stride(), desc.get_dilate(), desc.get_padding(),
desc.get_padding(), attr);
::dnnl::convolution_forward::primitive_desc pd =
::dnnl::convolution_forward::primitive_desc(
const_cast<dnnl_primitive_desc_t>(
primitive.second->get_primitive_desc()));
auto optimal_src_md = pd.src_desc();
auto optimal_dst_md = pd.dst_desc();
auto optimal_weight_md = pd.weights_desc();
void *optimal_src = src, *optimal_dst = dst, *optimal_weight = weight;
std::vector<void *> input_caches, output_caches;
allocate_and_reorder_memory_to_optimal(origin_src_md, src, optimal_src_md,
optimal_src, input_caches);
allocate_and_reorder_memory_to_optimal(origin_dst_md, dst, optimal_dst_md,
optimal_dst, output_caches);
allocate_and_reorder_memory_to_optimal(origin_weight_md, weight,
optimal_weight_md, optimal_weight,
input_caches);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(optimal_src_md, _eng, optimal_src)}},
{DNNL_ARG_WEIGHTS, {::dnnl::memory(optimal_weight_md, _eng, optimal_weight)}}};
auto e = execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, optimal_dst_md, optimal_dst}},
input_caches);
if(origin_dst_md != optimal_dst_md){
e = async_reorder(1.f, optimal_dst_md, optimal_dst, 0.f, origin_dst_md, dst);
}
async_free(_q, e, nullptr, output_caches);
return e;
}
inline
sycl::event engine_ext::async_convolution_forward(
convolution_desc &desc, ::dnnl::algorithm alg, activation_desc &adesc,
float alpha_0, const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &weight_desc, void *weight, float alpha_1,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &bias_desc, void *bias,
const memory_desc_ext &dst_desc, void *dst) {
int channel_num = bias_desc.get_element_num();
auto help_weight_desc =
get_group_weight_desc(desc.get_group_count(), weight_desc);
::dnnl::memory::desc help_bias_desc = {{channel_num},
bias_desc.get_desc().get_data_type(),
::dnnl::memory::format_tag::a};
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto primitive = create_primitive<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training, alg, src_desc.get_desc(),
help_weight_desc, help_bias_desc, dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(), attr);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_BIAS, {::dnnl::memory(help_bias_desc, _eng, bias)}}};
void *cache = nullptr;
if (alpha_0 != 1.f) {
cache = allocate(help_weight_desc);
_q->memcpy(cache, weight, weight_desc.get_size());
async_scale(alpha_0, help_weight_desc, cache);
execution_args->insert(
{DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, cache)}});
execute_primitive(primitive, execution_args,
{{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}}, {cache});
} else {
execution_args->insert(
{DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, weight)}});
execute_primitive(primitive, execution_args,
{{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}});
}
async_sum(alpha_1, summand_desc, summand, 1.f, dst_desc, dst);
return async_activation_forward(adesc, 1.f, dst_desc, dst, 0.f, dst_desc, dst);
}
inline
sycl::event engine_ext::async_convolution_backward_data(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &weight_desc, void *weight,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
if (scale_parameter_preprocess({{alpha, beta, diff_dst_desc, diff_dst}})) {
return sycl::event();
}
auto help_weight_desc =
get_group_weight_desc(desc.get_group_count(), weight_desc);
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto forward_primitive =
create_primitive_desc<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training,
::dnnl::algorithm::convolution_auto, diff_src_desc.get_desc(),
help_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(), attr);
auto primitive = create_primitive<::dnnl::convolution_backward_data>(
::dnnl::algorithm::convolution_auto, diff_src_desc.get_desc(),
help_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(),
forward_primitive, attr);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}},
{DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, weight)}}};
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline
sycl::event engine_ext::async_convolution_backward_weight(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_weight_desc, void *diff_weight) {
if (scale_parameter_preprocess(
{{alpha, beta, diff_weight_desc, diff_weight}})) {
return sycl::event();
}
auto help_diff_weight_desc =
get_group_weight_desc(desc.get_group_count(), diff_weight_desc);
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto forward_primitive =
create_primitive_desc<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training,
::dnnl::algorithm::convolution_auto, src_desc.get_desc(),
help_diff_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(), attr);
auto primitive =
create_primitive<::dnnl::convolution_backward_weights>(
::dnnl::algorithm::convolution_auto, src_desc.get_desc(),
help_diff_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(),
forward_primitive, attr);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_WEIGHTS,
help_diff_weight_desc, diff_weight}});
}
inline
sycl::event engine_ext::async_convolution_backward_bias(
float alpha, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta, const memory_desc_ext &diff_bias_desc, void *diff_bias) {
return async_reduction(reduction_op::sum, alpha, diff_dst_desc, diff_dst, beta,
diff_bias_desc, diff_bias);
}
inline
void engine_ext::rnn_get_weight_space_size(const rnn_desc &desc,
size_t *weight_space_size) {
*weight_space_size = 0;
rnn_forward_internal(desc, ::dnnl::prop_kind::forward_inference,
memory_desc_ext(), nullptr, memory_desc_ext(), nullptr,
memory_desc_ext(), nullptr, nullptr, memory_desc_ext(),
nullptr, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, true,
weight_space_size, nullptr, nullptr);
return;
}
inline
void engine_ext::rnn_get_scratchpad_workspace_size(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, size_t *scratchpad_size,
size_t *workspace_size) {
*workspace_size = 0;
*scratchpad_size = 0;
rnn_forward_internal(desc, kind, src_desc, nullptr, memory_desc_ext(),
nullptr, memory_desc_ext(), nullptr, nullptr,
memory_desc_ext(), nullptr, nullptr, 0, nullptr, 0,
nullptr, 0, nullptr, true, nullptr, workspace_size,
scratchpad_size);
return;
}
inline
sycl::event engine_ext::async_rnn_forward(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight, size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace) {
return rnn_forward_internal(
desc, kind, src_desc, src, dst_desc, dst, iter_desc, src_iter, dst_iter,
iter_c_desc, src_iter_c, dst_iter_c, weight_size, weight, workspace_size,
workspace, scratchpad_size, scratchpad, false, nullptr, nullptr,
nullptr);
}
inline
sycl::event engine_ext::async_rnn_backward(
const rnn_desc &desc, const memory_desc_ext &dst_desc, void *dst,
void *diff_dst, const memory_desc_ext &src_desc, void *src, void *diff_src,
const memory_desc_ext &iter_desc, void *src_iter, void *diff_dst_iter,
void *diff_src_iter, const memory_desc_ext &iter_c_desc, void *src_iter_c,
void *diff_dst_iter_c, void *diff_src_iter_c, size_t weight_size,
void *weight, void *diff_weight, size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace) {
::dnnl::memory::data_type src_dt;
::dnnl::memory::format_tag src_format_tag;
rnn_mode mode;
rnn_memory_format_tag format_tag;
rnn_bias_mode bias_mode;
rnn_direction direction;
dpct::library_data_t dt;
int direction_num = 1, input_size = 0, hidden_size = 0, projection_size = 0,
layer_size = 0, gate_num = 1, output_size = 0, data_type_size = 0,
seq_length = 1, batch_size = 1;
void *last_layer_cache = nullptr;
void *hidden_layer_cache = nullptr;
sycl::event e;
std::vector<int> offset(9, 0);
std::vector<void *> data = {
src,
dst,
(uint8_t *)src_iter + iter_desc.get_size(),
nullptr,
(uint8_t *)src_iter_c + iter_c_desc.get_size(),
nullptr,
(uint8_t *)weight + weight_size,
(uint8_t *)workspace + workspace_size,
diff_src,
diff_dst,
(uint8_t *)diff_src_iter + iter_desc.get_size(),
(uint8_t *)diff_dst_iter + iter_desc.get_size(),
(uint8_t *)diff_src_iter_c + iter_c_desc.get_size(),
(uint8_t *)diff_dst_iter_c + iter_c_desc.get_size(),
(uint8_t *)diff_weight + weight_size,
scratchpad};
desc.get(&mode, &bias_mode, &direction, &dt, &input_size, &hidden_size,
&projection_size, &layer_size);
get_rnn_configuration(src_desc.get_desc(), direction, mode, dt, hidden_size,
&src_dt, &src_format_tag, &projection_size,
&output_size, &seq_length, &batch_size, &direction_num,
&gate_num);
if (direction == rnn_direction::bidirectional) {
if (layer_size > 1) {
last_layer_cache = allocate(src_desc);
hidden_layer_cache = allocate(src_desc);
data[8] = last_layer_cache;
}
e = execute_rnn_backward_primitive(
mode, ::dnnl::rnn_direction::bidirectional_concat, bias_mode, src_dt,
src_format_tag, seq_length, batch_size, output_size, 2 * output_size, 1,
direction_num, hidden_size, gate_num, projection_size, data, offset, 1);
if (layer_size > 1) {
data[8] = hidden_layer_cache;
data[9] = last_layer_cache;
e = execute_rnn_backward_primitive(
mode, ::dnnl::rnn_direction::bidirectional_sum, bias_mode, src_dt,
src_format_tag, seq_length, batch_size, output_size, output_size, 1,
direction_num, hidden_size, gate_num, projection_size, data, offset,
layer_size - 1);
_q->memcpy(diff_src,
((layer_size - 1) % 2 == 0) ? last_layer_cache
: hidden_layer_cache,
src_desc.get_size());
}
} else {
e = execute_rnn_backward_primitive(
mode, ::dnnl::rnn_direction::unidirectional_left2right, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
output_size, layer_size, direction_num, hidden_size, gate_num,
projection_size, data, offset, 1);
}
if (last_layer_cache && hidden_layer_cache) {
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(last_layer_cache, *_q);
sycl::free(hidden_layer_cache, *_q);
});
});
}
return e;
}
inline
size_t engine_ext::get_dropout_state_size(){
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
sycl::queue q;
if(_random_engine_state_size == -1) {
if(_q){
q = *_q;
} else {
q = dpct::get_current_device().default_queue();
}
auto rand_engine = rng_engine_t(q, 0);
_random_engine_state_size = oneapi::mkl::rng::get_state_size(rand_engine);
}
return _random_engine_state_size;
#endif
}
inline size_t
engine_ext::get_dropout_workspace_size(const memory_desc_ext &src_desc) {
return src_desc.get_size();
}
inline
sycl::event engine_ext::async_dropout_forward(dropout_desc &desc,
const memory_desc_ext &src_desc,
void *src,
const memory_desc_ext &dst_desc,
void *dst, void *workspace,
size_t workspace_size) {
if (workspace_size < src_desc.get_size()) {
throw std::runtime_error("async_dropout_forward: no sufficient workspace.");
}
float p = desc.get_probability();
if (p == 1.f) {
return _q->memset(dst, 0, dst_desc.get_size());
} else if (p == 0.f) {
return async_reorder(1.f, src_desc, src, 0.f, dst_desc, dst);
}
float scale_factor = 1.f / (1.f - p);
void *cache = workspace;
memory_desc_ext rng_data_desc(
::dnnl::memory::desc(src_desc.get_dims(), ::dnnl::memory::data_type::s32,
src_desc.get_strides()));
if (src_desc.get_desc().get_data_type() != ::dnnl::memory::data_type::s32) {
cache = allocate(rng_data_desc);
}
desc.generate(_q, _random_engine_state_size, rng_data_desc.get_element_num(),
(std::int32_t *)cache);
if (cache == workspace) {
async_scale(scale_factor, src_desc, workspace);
} else {
async_reorder(scale_factor, rng_data_desc, cache, 0.f, src_desc, workspace);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC_0, ::dnnl::memory(src_desc.get_desc(), _eng, src)},
{DNNL_ARG_SRC_1, ::dnnl::memory(src_desc.get_desc(), _eng, workspace)},
{DNNL_ARG_DST, ::dnnl::memory(dst_desc.get_desc(), _eng, dst)}};
auto primitive = create_primitive<::dnnl::binary>(
::dnnl::algorithm::binary_mul, src_desc.get_desc(), src_desc.get_desc(),
dst_desc.get_desc());
auto e = execute_primitive(primitive, execution_args);
if (cache != workspace) {
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { sycl::free(cache, *_q); });
});
}
return e;
}
inline
sycl::event engine_ext::async_dropout_backward(
dropout_desc &desc, const memory_desc_ext &diff_dst_desc,
void *diff_dst, const memory_desc_ext &diff_src_desc, void *diff_src,
void *workspace, size_t workspace_size) {
float p = desc.get_probability();
if (p == 1.f) {
return _q->memset(diff_src, 0, diff_src_desc.get_size());
} else if (p == 0.f) {
return async_reorder(1.f, diff_dst_desc, diff_dst, 0.f, diff_src_desc,
diff_src);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC_0,
::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)},
{DNNL_ARG_SRC_1,
::dnnl::memory(diff_dst_desc.get_desc(), _eng, workspace)},
{DNNL_ARG_DST, ::dnnl::memory(diff_src_desc.get_desc(), _eng, diff_src)}};
auto primitive = create_primitive<::dnnl::binary>(
::dnnl::algorithm::binary_mul, diff_dst_desc.get_desc(),
diff_dst_desc.get_desc(), diff_src_desc.get_desc());
return execute_primitive(primitive, execution_args);
}
} // namespace dnnl
} // namespace dpct
#endif // __DPCT_DNNL_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/lapack_utils.hpp | //==---- lapack_utils.hpp -------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_LAPACK_UTILS_HPP__
#define __DPCT_LAPACK_UTILS_HPP__
#include "memory.hpp"
#include "util.hpp"
#include "lib_common_utils.hpp"
#include <oneapi/mkl.hpp>
#include <sycl/sycl.hpp>
namespace dpct {
namespace lapack {
/// Computes all eigenvalues and, optionally, eigenvectors of a real generalized
/// symmetric definite eigenproblem using a divide and conquer method.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1 or 2 or 3. Specifies the problem type to be solved.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrices A and B.
/// \param [in,out] a The symmetric matrix A.
/// \param [in] lda The leading dimension of matrix A.
/// \param [in,out] b The symmetric matrix B.
/// \param [in] ldb The leading dimension of matrix B.
/// \param [out] w Eigenvalues.
/// \param [in] scratchpad Scratchpad memory to be used by the routine
/// for storing intermediate results.
/// \param [in] scratchpad_size Size of scratchpad memory as a number of
/// floating point elements of type T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T>
inline int sygvd(sycl::queue &queue, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb,
T *w, T *scratchpad, int scratchpad_size, int *info) {
#ifdef DPCT_USM_LEVEL_NONE
auto info_buf = get_buffer<int>(info);
auto a_buffer = get_buffer<T>(a);
auto b_buffer = get_buffer<T>(b);
auto w_buffer = get_buffer<T>(w);
auto scratchpad_buffer = get_buffer<T>(scratchpad);
int info_val = 0;
int ret_val = 0;
try {
oneapi::mkl::lapack::sygvd(queue, itype, jobz, uplo, n, a_buffer, lda,
b_buffer, ldb, w_buffer, scratchpad_buffer,
scratchpad_size);
} catch (oneapi::mkl::lapack::exception const& e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: sygvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_val = static_cast<int>(e.info());
ret_val = 1;
} catch (sycl::exception const& e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
ret_val = 1;
}
queue.submit([&, info_val](sycl::handler &cgh) {
auto info_acc = info_buf.get_access<sycl::access_mode::write>(cgh);
cgh.single_task<dpct_kernel_name<class sygvd_set_info, T>>(
[=]() { info_acc[0] = info_val; });
});
return ret_val;
#else
try {
oneapi::mkl::lapack::sygvd(queue, itype, jobz, uplo, n, a, lda, b, ldb, w,
scratchpad, scratchpad_size);
} catch (oneapi::mkl::lapack::exception const& e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: sygvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
int info_val = static_cast<int>(e.info());
queue.memcpy(info, &info_val, sizeof(int)).wait();
return 1;
} catch (sycl::exception const& e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, sizeof(int)).wait();
return 1;
}
queue.memset(info, 0, sizeof(int));
return 0;
#endif
}
/// Computes all the eigenvalues, and optionally, the eigenvectors of a complex
/// generalized Hermitian positive-definite eigenproblem using a divide and
/// conquer method.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1 or 2 or 3. Specifies the problem type to be solved.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrices A and B.
/// \param [in,out] a The Hermitian matrix A.
/// \param [in] lda The leading dimension of matrix A.
/// \param [in,out] b The Hermitian matrix B.
/// \param [in] ldb The leading dimension of matrix B.
/// \param [in] w Eigenvalues.
/// \param [in] scratchpad Scratchpad memory to be used by the routine
/// for storing intermediate results.
/// \param [in] scratchpad_size Size of scratchpad memory as a number of
/// floating point elements of type T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename Tw>
inline int hegvd(sycl::queue &queue, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb,
Tw *w, T *scratchpad, int scratchpad_size, int *info) {
using Ty = typename DataType<T>::T2;
#ifdef DPCT_USM_LEVEL_NONE
auto info_buf = get_buffer<int>(info);
auto a_buffer = get_buffer<Ty>(a);
auto b_buffer = get_buffer<Ty>(b);
auto w_buffer = get_buffer<Tw>(w);
auto scratchpad_buffer = get_buffer<Ty>(scratchpad);
int info_val = 0;
int ret_val = 0;
try {
oneapi::mkl::lapack::hegvd(queue, itype, jobz, uplo, n, a_buffer, lda,
b_buffer, ldb, w_buffer, scratchpad_buffer,
scratchpad_size);
} catch (oneapi::mkl::lapack::exception const& e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: hegvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_val = static_cast<int>(e.info());
ret_val = 1;
} catch (sycl::exception const& e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
ret_val = 1;
}
queue.submit([&, info_val](sycl::handler &cgh) {
auto info_acc = info_buf.get_access<sycl::access_mode::write>(cgh);
cgh.single_task<dpct_kernel_name<class hegvd_set_info, T>>(
[=]() { info_acc[0] = info_val; });
});
return ret_val;
#else
try {
oneapi::mkl::lapack::hegvd(queue, itype, jobz, uplo, n, (Ty *)a, lda, (Ty *)b,
ldb, w, (Ty *)scratchpad, scratchpad_size);
} catch (oneapi::mkl::lapack::exception const& e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: hegvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
int info_val = static_cast<int>(e.info());
queue.memcpy(info, &info_val, sizeof(int)).wait();
return 1;
} catch (sycl::exception const& e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, sizeof(int)).wait();
return 1;
}
queue.memset(info, 0, sizeof(int));
return 0;
#endif
}
/// Computes the Cholesky factorizations of a batch of symmetric (or Hermitian,
/// for complex data) positive-definite matrices.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in,out] a Array of pointers to matrix A.
/// \param [in] lda The leading dimension of matrix A.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
/// \param [in] group_size The batch size.
template <typename T>
inline int potrf_batch(sycl::queue &queue, oneapi::mkl::uplo uplo, int n,
T *a[], int lda, int *info, int group_size) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
using Ty = typename DataType<T>::T2;
struct matrix_info_t {
oneapi::mkl::uplo uplo_info;
std::int64_t n_info;
std::int64_t lda_info;
std::int64_t group_size_info;
};
matrix_info_t *matrix_info =
(matrix_info_t *)std::malloc(sizeof(matrix_info_t));
matrix_info->uplo_info = uplo;
matrix_info->n_info = n;
matrix_info->lda_info = lda;
matrix_info->group_size_info = group_size;
std::int64_t scratchpad_size = 0;
sycl::event e;
Ty *scratchpad = nullptr;
try {
scratchpad_size = oneapi::mkl::lapack::potrf_batch_scratchpad_size<Ty>(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info),
&(matrix_info->lda_info), 1, &(matrix_info->group_size_info));
scratchpad = sycl::malloc_device<Ty>(scratchpad_size, queue);
e = oneapi::mkl::lapack::potrf_batch(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info), (Ty **)a,
&(matrix_info->lda_info), 1, &(matrix_info->group_size_info),
scratchpad, scratchpad_size);
} catch (oneapi::mkl::lapack::batch_error const &be) {
std::cerr << "Unexpected exception caught during call to LAPACK API: "
"potrf_batch_scratchpad_size/potrf_batch"
<< std::endl
<< "reason: " << be.what() << std::endl
<< "number: " << be.info() << std::endl;
int i = 0;
auto &ids = be.ids();
std::vector<int> info_vec(group_size);
for (auto const &e : be.exceptions()) {
try {
std::rethrow_exception(e);
} catch (oneapi::mkl::lapack::exception &e) {
std::cerr << "Exception " << ids[i] << std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_vec[i] = e.info();
i++;
}
}
queue.memcpy(info, info_vec.data(), group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad)
sycl::free(scratchpad, queue);
return 1;
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad)
sycl::free(scratchpad, queue);
return 1;
}
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
std::free(matrix_info);
sycl::free(scratchpad, queue);
});
});
queue.memset(info, 0, group_size * sizeof(int));
return 0;
#endif
}
/// Solves a batch of systems of linear equations with a Cholesky-factored
/// symmetric (Hermitian) positive-definite coefficient matrices.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] nrhs The number of right-hand sides.
/// \param [in,out] a Array of pointers to matrix A.
/// \param [in] lda The leading dimension of matrix A.
/// \param [in,out] b Array of pointers to matrix B.
/// \param [in] ldb The leading dimension of matrix B.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
/// \param [in] group_size The batch size.
template <typename T>
inline int potrs_batch(sycl::queue &queue, oneapi::mkl::uplo uplo, int n,
int nrhs, T *a[], int lda, T *b[], int ldb, int *info,
int group_size) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
using Ty = typename DataType<T>::T2;
struct matrix_info_t {
oneapi::mkl::uplo uplo_info;
std::int64_t n_info;
std::int64_t nrhs_info;
std::int64_t lda_info;
std::int64_t ldb_info;
std::int64_t group_size_info;
};
matrix_info_t *matrix_info =
(matrix_info_t *)std::malloc(sizeof(matrix_info_t));
matrix_info->uplo_info = uplo;
matrix_info->n_info = n;
matrix_info->nrhs_info = nrhs;
matrix_info->lda_info = lda;
matrix_info->ldb_info = ldb;
matrix_info->group_size_info = group_size;
std::int64_t scratchpad_size = 0;
sycl::event e;
Ty *scratchpad = nullptr;
try {
scratchpad_size = oneapi::mkl::lapack::potrs_batch_scratchpad_size<Ty>(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info),
&(matrix_info->nrhs_info), &(matrix_info->lda_info),
&(matrix_info->ldb_info), 1, &(matrix_info->group_size_info));
scratchpad = sycl::malloc_device<Ty>(scratchpad_size, queue);
e = oneapi::mkl::lapack::potrs_batch(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info),
&(matrix_info->nrhs_info), (Ty **)a, &(matrix_info->lda_info), (Ty **)b,
&(matrix_info->ldb_info), 1, &(matrix_info->group_size_info),
scratchpad, scratchpad_size);
} catch (oneapi::mkl::lapack::batch_error const &be) {
std::cerr << "Unexpected exception caught during call to LAPACK API: "
"potrs_batch_scratchpad_size/potrs_batch"
<< std::endl
<< "reason: " << be.what() << std::endl
<< "number: " << be.info() << std::endl;
int i = 0;
auto &ids = be.ids();
std::vector<int> info_vec(group_size);
for (auto const &e : be.exceptions()) {
try {
std::rethrow_exception(e);
} catch (oneapi::mkl::lapack::exception &e) {
std::cerr << "Exception " << ids[i] << std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_vec[i] = e.info();
i++;
}
}
queue.memcpy(info, info_vec.data(), group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad)
sycl::free(scratchpad, queue);
return 1;
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad)
sycl::free(scratchpad, queue);
return 1;
}
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
std::free(matrix_info);
sycl::free(scratchpad, queue);
});
});
queue.memset(info, 0, group_size * sizeof(int));
return 0;
#endif
}
namespace detail {
template <template <typename> typename functor_t, typename... args_t>
inline int lapack_shim(sycl::queue &q, library_data_t a_type, int *info,
std::string const &lapack_api_name, args_t &&...args) {
auto handle_lapack_exception = [&](const oneapi::mkl::lapack::exception &e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: "
<< lapack_api_name << std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl
<< "detail: " << e.detail() << std::endl;
if (e.info() < std::numeric_limits<int>::min() ||
e.info() > std::numeric_limits<int>::max()) {
throw std::runtime_error("e.info() exceeds the limit of int type");
}
int info_val = static_cast<int>(e.info());
if (info)
dpct::detail::dpct_memcpy(q, info, &info_val, sizeof(int),
memcpy_direction::host_to_device)
.wait();
return 1;
};
try {
switch (a_type) {
case library_data_t::real_float: {
functor_t<float>()(std::forward<args_t>(args)...);
break;
}
case library_data_t::real_double: {
functor_t<double>()(std::forward<args_t>(args)...);
break;
}
case library_data_t::complex_float: {
functor_t<std::complex<float>>()(std::forward<args_t>(args)...);
break;
}
case library_data_t::complex_double: {
functor_t<std::complex<double>>()(std::forward<args_t>(args)...);
break;
}
default:
throw std::runtime_error("the data type is unsupported");
}
} catch (oneapi::mkl::lapack::batch_error const &be) {
try {
std::rethrow_exception(be.exceptions()[0]);
} catch (oneapi::mkl::lapack::exception &e) {
return handle_lapack_exception(e);
}
} catch (oneapi::mkl::lapack::exception const &e) {
return handle_lapack_exception(e);
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
if (info)
dpct::detail::dpct_memset(q, info, 0, sizeof(int)).wait();
return 1;
}
return 0;
}
template <typename T> class working_memory {
public:
working_memory(std::size_t element_number, const sycl::queue &q) : _q(q) {
_ptr = dpct::detail::dpct_malloc(element_number * sizeof(T), _q);
}
auto get_memory() {
return dpct::detail::get_memory(reinterpret_cast<T *>(_ptr));
}
auto get_ptr() {
return _ptr;
}
void set_event(sycl::event e) { _e = e; }
~working_memory() {
if (_ptr) {
dpct::async_dpct_free({_ptr}, {_e}, _q);
}
}
private:
void *_ptr = nullptr;
sycl::event _e;
sycl::queue _q;
};
std::size_t byte_to_element_number(std::size_t size_in_byte,
dpct::library_data_t element_type) {
auto dv = std::lldiv(
size_in_byte,
dpct::detail::library_data_size[static_cast<unsigned int>(element_type)] /
8);
if (dv.rem) {
throw std::runtime_error(
"size_in_byte is not divisible by the size of element (in bytes)");
}
return dv.quot;
}
std::size_t element_number_to_byte(std::size_t size_in_element,
dpct::library_data_t element_type) {
auto dv = std::lldiv(
dpct::detail::library_data_size[static_cast<unsigned int>(element_type)],
8);
if (dv.rem) {
throw std::runtime_error(
"the size of element (in bits) is not divisible by 8");
}
return size_in_element * dv.quot;
}
inline oneapi::mkl::jobsvd char2jobsvd(signed char job) {
switch (job) {
case 'A':
return oneapi::mkl::jobsvd::vectors;
case 'S':
return oneapi::mkl::jobsvd::somevec;
case 'O':
return oneapi::mkl::jobsvd::vectorsina;
case 'N':
return oneapi::mkl::jobsvd::novec;
default:
throw std::runtime_error("the job type is unsupported");
}
}
template <typename T> struct getrf_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
device_ws_size =
oneapi::mkl::lapack::getrf_scratchpad_size<T>(q, m, n, lda);
}
};
template <typename T> struct getrf_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
std::int64_t *ipiv, void *device_ws,
std::size_t device_ws_size, int *info) {
auto ipiv_data = dpct::detail::get_memory(ipiv);
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::getrf(q, m, n, a_data, lda, ipiv_data, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T> struct getrs_impl {
void operator()(sycl::queue &q, oneapi::mkl::transpose trans, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, std::int64_t *ipiv, library_data_t b_type,
void *b, std::int64_t ldb, int *info) {
auto ipiv_data = dpct::detail::get_memory(ipiv);
std::int64_t device_ws_size = oneapi::mkl::lapack::getrs_scratchpad_size<T>(
q, trans, n, nrhs, lda, ldb);
working_memory<T> device_ws(device_ws_size, q);
auto device_ws_data = device_ws.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
oneapi::mkl::lapack::getrs(q, trans, n, nrhs, a_data, lda, ipiv_data,
b_data, ldb, device_ws_data, device_ws_size);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
device_ws.set_event(e);
}
};
template <typename T> struct geqrf_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
device_ws_size =
oneapi::mkl::lapack::geqrf_scratchpad_size<T>(q, m, n, lda);
}
};
template <typename T> struct geqrf_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t tau_type, void *tau, void *device_ws,
std::size_t device_ws_size, int *info) {
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto tau_data = dpct::detail::get_memory(reinterpret_cast<T *>(tau));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::geqrf(q, m, n, a_data, lda, tau_data, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T> struct getrfnp_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
std::int64_t *ipiv, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
std::int64_t a_stride = m * lda;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::getrfnp_batch(q, m, n, a_data, lda, a_stride, 1,
device_ws_data, device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
template <typename T> struct gesvd_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu,
oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
library_data_t u_type, std::int64_t ldu,
library_data_t vt_type, std::int64_t ldvt,
std::size_t &device_ws_size) {
device_ws_size = oneapi::mkl::lapack::gesvd_scratchpad_size<T>(
q, jobu, jobvt, m, n, lda, ldu, ldvt);
}
};
template <typename T> struct ElementType {
using value_tpye = T;
};
template <typename T> struct ElementType<std::complex<T>> {
using value_tpye = T;
};
template <typename T> struct gesvd_impl {
void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu,
oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t s_type, void *s, library_data_t u_type,
void *u, std::int64_t ldu, library_data_t vt_type, void *vt,
std::int64_t ldvt, void *device_ws,
std::size_t device_ws_size, int *info) {
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto s_data = dpct::detail::get_memory(
reinterpret_cast<typename ElementType<T>::value_tpye *>(s));
auto u_data = dpct::detail::get_memory(reinterpret_cast<T *>(u));
auto vt_data = dpct::detail::get_memory(reinterpret_cast<T *>(vt));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::gesvd(q, jobu, jobvt, m, n, a_data, lda, s_data,
u_data, ldu, vt_data, ldvt, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T> struct gesvd_conj_impl : public gesvd_impl<T> {
void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu,
oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t s_type, void *s, library_data_t u_type,
void *u, std::int64_t ldu, library_data_t vt_type, void *vt,
std::int64_t ldvt, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using base = gesvd_impl<T>;
base::operator()(q, jobu, jobvt, m, n, a_type, a, lda, s_type, s, u_type, u,
ldu, vt_type, vt, ldvt, device_ws, device_ws_size, info);
auto vt_data = dpct::detail::get_memory(reinterpret_cast<T *>(vt));
oneapi::mkl::blas::row_major::imatcopy(q, oneapi::mkl::transpose::conjtrans,
n, n, T(1.0f), vt_data, ldvt, ldvt);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
template <typename T> struct potrf_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
device_ws_size =
oneapi::mkl::lapack::potrf_scratchpad_size<T>(q, uplo, n, lda);
}
};
template <typename T> struct potrf_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
void *device_ws, std::size_t device_ws_size, int *info) {
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::potrf(q, uplo, n, a_data, lda, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T> struct potrs_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, library_data_t b_type, void *b,
std::int64_t ldb, int *info) {
std::int64_t device_ws_size = oneapi::mkl::lapack::potrs_scratchpad_size<T>(
q, uplo, n, nrhs, lda, ldb);
working_memory<T> device_ws(device_ws_size, q);
auto device_ws_data = device_ws.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
oneapi::mkl::lapack::potrs(q, uplo, n, nrhs, a_data, lda, b_data, ldb,
device_ws_data, device_ws_size);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
device_ws.set_event(e);
}
};
template <typename T> struct value_type_trait {
using value_type = T;
};
template <typename T> struct value_type_trait<std::complex<T>> {
using value_type = T;
};
template <typename T> auto lamch_s() {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
if constexpr (std::is_same_v<T, float>) {
return slamch("S");
} else if constexpr (std::is_same_v<T, double>) {
return dlamch("S");
}
throw std::runtime_error("the type is unsupported");
#endif
}
#define DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(FUNC, ...) \
do { \
if constexpr (std::is_floating_point_v<T>) { \
device_ws_size = oneapi::mkl::lapack::sy##FUNC(__VA_ARGS__); \
} else { \
device_ws_size = oneapi::mkl::lapack::he##FUNC(__VA_ARGS__); \
} \
} while (0)
#define DISPATCH_FLOAT_FOR_CALCULATION(FUNC, ...) \
do { \
if constexpr (std::is_floating_point_v<T>) { \
oneapi::mkl::lapack::sy##FUNC(__VA_ARGS__); \
} else { \
oneapi::mkl::lapack::he##FUNC(__VA_ARGS__); \
} \
} while (0)
template <typename T> struct syheevx_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, std::int64_t lda, void *vl, void *vu,
std::int64_t il, std::int64_t iu,
std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(evx_scratchpad_size<T>, q, jobz, range,
uplo, n, lda, vl_value, vu_value, il, iu,
abstol, lda);
#endif
}
};
template <typename T> constexpr library_data_t get_library_data_t_from_type() {
if constexpr (std::is_same_v<T, float>) {
return library_data_t::real_float;
} else if constexpr (std::is_same_v<T, double>) {
return library_data_t::real_double;
} else if constexpr (std::is_same_v<T, sycl::float2> ||
std::is_same_v<T, std::complex<float>>) {
return library_data_t::complex_float;
} else if constexpr (std::is_same_v<T, sycl::double2> ||
std::is_same_v<T, std::complex<double>>) {
return library_data_t::complex_double;
}
throw std::runtime_error("the type is unsupported");
}
template <typename T> struct syheevx_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *vl, void *vu, std::int64_t il,
std::int64_t iu, std::int64_t *m, library_data_t w_type,
void *w, void *device_ws, std::size_t device_ws_size,
int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
working_memory<T> z(n * lda, q);
working_memory<std::int64_t> m_device(1, q);
auto z_data = z.get_memory();
auto m_device_data = m_device.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_CALCULATION(evx, q, jobz, range, uplo, n, a_data, lda,
vl_value, vu_value, il, iu, abstol,
m_device_data, w_data, z_data, lda,
device_ws_data, device_ws_size);
dpct::async_dpct_memcpy(a, z.get_ptr(), n * lda * sizeof(T),
memcpy_direction::device_to_device, q);
dpct::async_dpct_memcpy(m, m_device.get_ptr(), sizeof(std::int64_t),
memcpy_direction::device_to_host, q);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
z.set_event(e);
m_device.set_event(e);
#endif
}
};
template <typename T> struct syhegvx_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, std::int64_t lda, std::int64_t ldb, void *vl,
void *vu, std::int64_t il, std::int64_t iu,
std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(gvx_scratchpad_size<T>, q, itype, jobz,
range, uplo, n, lda, ldb, vl_value,
vu_value, il, iu, abstol, lda);
#endif
}
};
template <typename T> struct syhegvx_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, void *a, std::int64_t lda, void *b,
std::int64_t ldb, void *vl, void *vu, std::int64_t il,
std::int64_t iu, std::int64_t *m, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
working_memory<T> z(n * lda, q);
working_memory<std::int64_t> m_device(1, q);
auto z_data = z.get_memory();
auto m_device_data = m_device.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_CALCULATION(gvx, q, itype, jobz, range, uplo, n, a_data,
lda, b_data, ldb, vl_value, vu_value, il, iu,
abstol, m_device_data, w_data, z_data, lda,
device_ws_data, device_ws_size);
dpct::async_dpct_memcpy(a, z.get_ptr(), n * lda * sizeof(T),
memcpy_direction::device_to_device, q);
dpct::async_dpct_memcpy(m, m_device.get_ptr(), sizeof(std::int64_t),
memcpy_direction::device_to_host, q);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
z.set_event(e);
m_device.set_event(e);
#endif
}
};
template <typename T> struct syhegvd_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda,
std::int64_t ldb, std::size_t &device_ws_size) {
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(gvd_scratchpad_size<T>, q, itype, jobz,
uplo, n, lda, ldb);
}
};
template <typename T> struct syhegvd_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n, void *a,
std::int64_t lda, void *b, std::int64_t ldb, void *w,
void *device_ws, std::size_t device_ws_size,
int *info) {
using value_t = typename value_type_trait<T>::value_type;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
DISPATCH_FLOAT_FOR_CALCULATION(gvd, q, itype, jobz, uplo, n, a_data, lda,
b_data, ldb, w_data, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
oneapi::mkl::compz job2compz(const oneapi::mkl::job &job) {
oneapi::mkl::compz ret;
if (job == oneapi::mkl::job::novec) {
ret = oneapi::mkl::compz::novectors;
} else if (job == oneapi::mkl::job::vec) {
ret = oneapi::mkl::compz::vectors;
} else {
throw std::runtime_error("the job type is unsupported");
}
return ret;
}
template <typename T> struct syheev_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda,
std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(ev_scratchpad_size<T>, q, jobz, uplo, n,
lda);
#endif
}
};
template <typename T> struct syheev_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::uplo uplo, std::int64_t n, void *a,
std::int64_t lda, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
DISPATCH_FLOAT_FOR_CALCULATION(ev, q, jobz, uplo, n, a_data, lda, w_data,
device_ws_data, device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
template <typename T> struct syheevd_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(evd_scratchpad_size<T>, q, jobz, uplo, n,
lda);
}
};
template <typename T> struct syheevd_impl {
void operator()(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
using value_t = typename value_type_trait<T>::value_type;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
DISPATCH_FLOAT_FOR_CALCULATION(evd, q, jobz, uplo, n, a_data, lda, w_data,
device_ws_data, device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
#undef DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE
#undef DISPATCH_FLOAT_FOR_CALCULATION
template <typename T> struct trtri_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::diag diag, std::int64_t n, library_data_t a_type,
std::int64_t lda, std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
device_ws_size =
oneapi::mkl::lapack::trtri_scratchpad_size<T>(q, uplo, diag, n, lda);
#endif
}
};
template <typename T> struct trtri_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::diag diag, std::int64_t n, library_data_t a_type,
void *a, std::int64_t lda, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::trtri(q, uplo, diag, n, a_data, lda, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
} // namespace detail
/// Computes the size of workspace memory of getrf function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int getrf_scratchpad_size(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::getrf_scratchpad_size_impl>(
q, a_type, nullptr, "getrf_scratchpad_size", q, m, n, a_type, lda,
device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the LU factorization of a general m-by-n matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by L and U. The unit
/// diagonal elements of L are not stored.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] ipiv The pivot indices. If \p ipiv is nullptr, non-pivoting
/// LU factorization is computed.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int getrf(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
std::int64_t *ipiv, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
if (ipiv == nullptr) {
return detail::lapack_shim<detail::getrfnp_impl>(
q, a_type, info, "getrfnp_batch", q, m, n, a_type, a, lda, ipiv,
device_ws, device_ws_size_in_element_number, info);
}
return detail::lapack_shim<detail::getrf_impl>(
q, a_type, info, "getrf", q, m, n, a_type, a, lda, ipiv, device_ws,
device_ws_size_in_element_number, info);
#endif
}
/// Solves a system of linear equations with a LU-factored square coefficient
/// matrix, with multiple right-hand sides.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] trans Indicates the form of the linear equation.
/// \param [in] n The order of the matrix A and the number of rows in matrix B.
/// \param [in] nrhs The number of right hand sides.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] a The input matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] ipiv The pivot indices.
/// \param [in] b_type The data type of the matrix B.
/// \param [in, out] b The matrix B, whose columns are the right-hand sides
/// for the systems of equations.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int getrs(sycl::queue &q, oneapi::mkl::transpose trans, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, std::int64_t *ipiv, library_data_t b_type,
void *b, std::int64_t ldb, int *info) {
return detail::lapack_shim<detail::getrs_impl>(
q, a_type, info, "getrs_scratchpad_size/getrs", q, trans, n, nrhs, a_type,
a, lda, ipiv, b_type, b, ldb, info);
}
/// Computes the size of workspace memory of geqrf function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int geqrf_scratchpad_size(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::geqrf_scratchpad_size_impl>(
q, a_type, nullptr, "geqrf_scratchpad_size", q, m, n, a_type, lda,
device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the QR factorization of a general m-by-n matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by the factorization data.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] tau_type The data type of the array tau.
/// \param [in] tau The array contains scalars that define elementary reflectors
/// for the matrix Q in its decomposition in a product of elementary reflectors.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int geqrf(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t tau_type, void *tau, void *device_ws,
std::size_t device_ws_size, int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::geqrf_impl>(
q, a_type, info, "geqrf", q, m, n, a_type, a, lda, tau_type, tau,
device_ws, device_ws_size_in_element_number, info);
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobu Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] jobvt Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] u_type The data type of the matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int gesvd_scratchpad_size(sycl::queue &q, signed char jobu,
signed char jobvt, std::int64_t m,
std::int64_t n, library_data_t a_type,
std::int64_t lda, library_data_t u_type,
std::int64_t ldu, library_data_t vt_type,
std::int64_t ldvt, std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
oneapi::mkl::jobsvd jobu_enum = detail::char2jobsvd(jobu);
oneapi::mkl::jobsvd jobvt_enum = detail::char2jobsvd(jobvt);
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::gesvd_scratchpad_size_impl>(
q, a_type, nullptr, "gesvd_scratchpad_size", q, jobu_enum, jobvt_enum, m,
n, a_type, lda, u_type, ldu, vt_type, ldvt, device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::vec or job::novec
/// \param [in] all_vec Only have effects when \param jobz is job::vec.If the
/// value is zero, all m columns of U are returned in the matrix U, otherwise
/// the first min( \param m, \param n ) columns of U (the left singular vectors)
/// are returned in the matrix U.
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] u_type The data type of the matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \param a_type.
/// \param [out] host_ws_size The host workspace size as a number of elements
/// of type \param a_type. Currently the value is always zero.
inline int gesvd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
std::int64_t all_vec, std::int64_t m,
std::int64_t n, library_data_t a_type,
std::int64_t lda, library_data_t u_type,
std::int64_t ldu, library_data_t vt_type,
std::int64_t ldvt, int *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
oneapi::mkl::jobsvd jobu;
oneapi::mkl::jobsvd jobvt;
if (jobz == oneapi::mkl::job::vec) {
if (all_vec) {
jobu = jobvt = oneapi::mkl::jobsvd::somevec;
} else {
jobu = jobvt = oneapi::mkl::jobsvd::vectors;
}
} else if (jobz == oneapi::mkl::job::novec) {
jobu = jobvt = oneapi::mkl::jobsvd::novec;
} else {
throw std::runtime_error("the job type is unsupported");
}
std::size_t device_ws_size_64;
int ret = detail::lapack_shim<detail::gesvd_scratchpad_size_impl>(
q, a_type, nullptr, "gesvd_scratchpad_size", q, jobu, jobvt, m, n, a_type,
lda, u_type, ldu, vt_type, ldvt, device_ws_size_64);
*device_ws_size = device_ws_size_64;
return ret;
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobu Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] jobvt Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A and it will be overwritten according
/// to \p jobu and \p jobvt.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] s_type The data type of the matrix S.
/// \param [out] s The output matrix S.
/// \param [in] u_type The data type of the matrix U.
/// \param [out] u The output matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [out] vt The output matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int gesvd(sycl::queue &q, signed char jobu, signed char jobvt,
std::int64_t m, std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, library_data_t s_type, void *s,
library_data_t u_type, void *u, std::int64_t ldu,
library_data_t vt_type, void *vt, std::int64_t ldvt,
void *device_ws, std::size_t device_ws_size, int *info) {
oneapi::mkl::jobsvd jobu_enum = detail::char2jobsvd(jobu);
oneapi::mkl::jobsvd jobvt_enum = detail::char2jobsvd(jobvt);
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::gesvd_impl>(
q, a_type, info, "gesvd", q, jobu_enum, jobvt_enum, m, n, a_type, a, lda,
s_type, s, u_type, u, ldu, vt_type, vt, ldvt, device_ws,
device_ws_size_in_element_number, info);
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::vec or job::novec.
/// \param [in] all_vec Only have effects when \param jobz is job::vec.If the
/// value is zero, all m columns of U are returned in the matrix U, otherwise
/// the first min( \param m, \param n ) columns of U (the left singular vectors)
/// are returned in the matrix U.
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A and it will be overwritten according
/// to \p jobu and \p jobvt.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] s_type The data type of the matrix S.
/// \param [out] s The output matrix S.
/// \param [in] u_type The data type of the matrix U.
/// \param [out] u The output matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [out] vt The output matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \param a_type.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int gesvd(sycl::queue &q, oneapi::mkl::job jobz, std::int64_t all_vec,
std::int64_t m, std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, library_data_t s_type, void *s,
library_data_t u_type, void *u, std::int64_t ldu,
library_data_t vt_type, void *vt, std::int64_t ldvt,
void *device_ws, std::size_t device_ws_size, int *info) {
oneapi::mkl::jobsvd jobu;
oneapi::mkl::jobsvd jobvt;
if (jobz == oneapi::mkl::job::vec) {
if (all_vec) {
jobu = jobvt = oneapi::mkl::jobsvd::somevec;
} else {
jobu = jobvt = oneapi::mkl::jobsvd::vectors;
}
} else if (jobz == oneapi::mkl::job::novec) {
jobu = jobvt = oneapi::mkl::jobsvd::novec;
} else {
throw std::runtime_error("the job type is unsupported");
}
detail::lapack_shim<detail::gesvd_conj_impl>(
q, a_type, info, "gesvd", q, jobu, jobvt, m, n, a_type, a, lda, s_type, s,
u_type, u, ldu, vt_type, vt, ldvt, device_ws, device_ws_size, info);
return 0;
}
/// Computes the size of workspace memory of potrf function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int potrf_scratchpad_size(sycl::queue &q, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type,
std::int64_t lda, std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::potrf_scratchpad_size_impl>(
q, a_type, nullptr, "potrf_scratchpad_size", q, uplo, n, a_type, lda,
device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the Cholesky factorization of a symmetric (Hermitian)
/// positive-definite matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by the Cholesky factor U
/// or L, as specified by \p uplo.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int potrf(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
void *device_ws, std::size_t device_ws_size, int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::potrf_impl>(
q, a_type, info, "potrf", q, uplo, n, a_type, a, lda, device_ws,
device_ws_size_in_element_number, info);
}
/// Solves a system of linear equations with a Cholesky-factored symmetric
/// (Hermitian) positive-definite coefficient matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A and the number of rows in matrix B.
/// \param [in] nrhs The number of right hand sides.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by the Cholesky factor U
/// or L, as specified by \p uplo.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] b_type The data type of the matrix B.
/// \param [in, out] b The matrix B, whose columns are the right-hand sides
/// for the systems of equations.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int potrs(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, library_data_t b_type, void *b,
std::int64_t ldb, int *info) {
return detail::lapack_shim<detail::potrs_impl>(
q, a_type, info, "potrs_scratchpad_size/potrs", q, uplo, n, nrhs, a_type,
a, lda, b_type, b, ldb, info);
}
/// Computes the size of workspace memory of syevx/heevx function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int syheevx_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range,
oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, std::int64_t lda,
void *vl, void *vu, std::int64_t il,
std::int64_t iu, library_data_t w_type,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevx_scratchpad_size_impl>(
q, a_type, nullptr, "syevx_scratchpad_size/heevx_scratchpad_size", q,
compz_jobz, range, uplo, n, lda, vl, vu, il, iu,
device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes selected eigenvalues and, optionally, eigenvectors of a
/// symmetric/Hermitian matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, the lower or upper triangle is
/// overwritten.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] m The total number of eigenvalues found.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int syheevx(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *vl, void *vu, std::int64_t il,
std::int64_t iu, std::int64_t *m, library_data_t w_type,
void *w, void *device_ws, std::size_t device_ws_size,
int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
int ret = detail::lapack_shim<detail::syheevx_impl>(
q, a_type, info, "syevx/heevx", q, compz_jobz, range, uplo, n, a_type, a,
lda, vl, vu, il, iu, m, w_type, w, device_ws,
device_ws_size_in_element_number, info);
q.wait();
return ret;
}
/// Computes the size of workspace memory of syevx/heevx function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T, typename ValueT>
inline int syheevx_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range,
oneapi::mkl::uplo uplo, int n, int lda,
ValueT vl, ValueT vu, int il, int iu,
int *device_ws_size) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevx_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"syevx_scratchpad_size/heevx_scratchpad_size", q, compz_jobz, range, uplo,
n, lda, &vl, &vu, il, iu, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes selected eigenvalues and, optionally, eigenvectors of a
/// symmetric/Hermitian matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, the lower or upper triangle is
/// overwritten.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] m The total number of eigenvalues found.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syheevx(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, int n,
T *a, int lda, ValueT vl, ValueT vu, int il, int iu, int *m,
ValueT *w, T *device_ws, int device_ws_size, int *info) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::int64_t m64;
int ret = detail::lapack_shim<detail::syheevx_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "syevx/heevx", q,
compz_jobz, range, uplo, n, detail::get_library_data_t_from_type<T>(), a,
lda, &vl, &vu, il, iu, &m64,
detail::get_library_data_t_from_type<ValueT>(), w, device_ws,
device_ws_size, info);
q.wait();
*m = (int)m64;
return ret;
}
/// Computes the size of workspace memory of sygvx/hegvx function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T, typename ValueT>
inline int
syhegvx_scratchpad_size(sycl::queue &q, int itype, oneapi::mkl::job jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
int n, int lda, int ldb, ValueT vl, ValueT vu, int il,
int iu, int *device_ws_size) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syhegvx_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"sygvx_scratchpad_size/hegvx_scratchpad_size", q, itype, compz_jobz,
range, uplo, n, lda, ldb, &vl, &vu, il, iu, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes selected eigenvalues and, optionally, eigenvectors of a real
/// generalized symmetric/Hermitian definite eigenproblem.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, the lower or upper triangle is
/// overwritten.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in, out] b The input matrix B.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] m The total number of eigenvalues found.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syhegvx(sycl::queue &q, int itype, oneapi::mkl::job jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, int n,
T *a, int lda, T *b, int ldb, ValueT vl, ValueT vu, int il,
int iu, int *m, ValueT *w, T *device_ws, int device_ws_size,
int *info) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::int64_t m64;
int ret = detail::lapack_shim<detail::syhegvx_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "sygvx/hegvx", q,
itype, compz_jobz, range, uplo, n, a, lda, b, ldb, &vl, &vu, il, iu, &m64,
w, device_ws, device_ws_size, info);
q.wait();
*m = (int)m64;
return ret;
}
/// Computes the size of workspace memory of sygvd/hegvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T>
inline int syhegvd_scratchpad_size(sycl::queue &q, int itype,
oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, int lda,
int ldb, int *device_ws_size) {
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syhegvd_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"sygvd_scratchpad_size/hegvd_scratchpad_size", q, itype, jobz, uplo, n,
lda, ldb, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes all eigenvalues and, optionally, eigenvectors of a real generalized
/// symmetric/Hermitian definite eigenproblem using a divide and conquer method.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by eigenvectors.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in, out] b The input matrix B.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syhegvd(sycl::queue &q, int itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb,
ValueT *w, T *device_ws, int device_ws_size, int *info) {
return detail::lapack_shim<detail::syhegvd_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "sygvd/hegvd", q,
itype, jobz, uplo, n, a, lda, b, ldb, w, device_ws, device_ws_size, info);
}
/// Computes the size of workspace memory of syev/heev function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T>
inline int syheev_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, int lda,
int *device_ws_size) {
std::size_t device_ws_size_tmp;
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
int ret = detail::lapack_shim<detail::syheev_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"syev_scratchpad_size/heev_scratchpad_size", q, compz_jobz, uplo, n, lda,
device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes all eigenvalues and, optionally, eigenvectors of a real symmetric
/// or Hermitian matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// eigenvectors.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syheev(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo,
int n, T *a, int lda, ValueT *w, T *device_ws,
int device_ws_size, int *info) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
return detail::lapack_shim<detail::syheev_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "syev/heev", q,
compz_jobz, uplo, n, a, lda, w, device_ws, device_ws_size, info);
}
/// Computes the size of workspace memory of syevd/heevd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] device_ws_size The device workspace in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int syheevd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, std::int64_t lda,
library_data_t w_type,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevd_scratchpad_size_impl>(
q, a_type, nullptr, "syevd_scratchpad_size/heevd_scratchpad_size", q,
jobz, uplo, n, a_type, lda, device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes all eigenvalues and, optionally, all eigenvectors of a real
/// symmetric or Hermitian matrix using divide and conquer algorithm.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// eigenvectors.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int syheevd(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t w_type, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::syheevd_impl>(
q, a_type, info, "syevd/heevd", q, jobz, uplo, n, a_type, a, lda, w,
device_ws, device_ws_size_in_element_number, info);
}
/// Computes the size of workspace memory of syevd/heevd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T>
inline int syheevd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n,
std::int64_t lda, int *device_ws_size) {
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevd_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"syevd_scratchpad_size/heevd_scratchpad_size", q, jobz, uplo, n,
detail::get_library_data_t_from_type<T>(), lda, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes all eigenvalues and, optionally, all eigenvectors of a real
/// symmetric or Hermitian matrix using divide and conquer algorithm.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// eigenvectors.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syheevd(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n, T *a,
std::int64_t lda, ValueT *w, T *device_ws,
int device_ws_size, int *info) {
return detail::lapack_shim<detail::syheevd_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "syevd/heevd", q,
jobz, uplo, n, detail::get_library_data_t_from_type<T>(), a, lda, w,
device_ws, device_ws_size, info);
}
/// Computes the size of workspace memory of trtri function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] diag Must be diag::nonunit or diag::unit.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int trtri_scratchpad_size(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::diag diag, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::trtri_scratchpad_size_impl>(
q, a_type, nullptr, "trtri_scratchpad_size", q, uplo, diag, n, a_type,
lda, device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the inverse of a triangular matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] diag Must be diag::nonunit or diag::unit.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// the inverse matrix of A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int trtri(sycl::queue &q, oneapi::mkl::uplo uplo, oneapi::mkl::diag diag,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *device_ws, std::size_t device_ws_size,
int *info) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::trtri_impl>(
q, a_type, info, "trtri", q, uplo, diag, n, a_type, a, lda, device_ws,
device_ws_size_in_element_number, info);
#endif
}
} // namespace lapack
} // namespace dpct
#endif // __DPCT_LAPACK_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/fft_utils.hpp | //==---- fft_utils.hpp ----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_FFT_UTILS_HPP__
#define __DPCT_FFT_UTILS_HPP__
#include <sycl/sycl.hpp>
#include <oneapi/mkl.hpp>
#include <optional>
#include <utility>
#include "lib_common_utils.hpp"
namespace dpct {
namespace fft {
/// An enumeration type to describe the FFT direction is forward or backward.
enum fft_direction : int {
forward = 0,
backward
};
/// An enumeration type to describe the types of FFT input and output data.
enum fft_type : int {
real_float_to_complex_float = 0,
complex_float_to_real_float,
real_double_to_complex_double,
complex_double_to_real_double,
complex_float_to_complex_float,
complex_double_to_complex_double,
};
/// A class to perform FFT calculation.
class fft_engine {
public:
/// Default constructor.
fft_engine() {}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] input_type Input data type.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] output_type Output data type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, long long *n,
long long *inembed, long long istride, long long idist,
library_data_t input_type, long long *onembed, long long ostride,
long long odist, library_data_t output_type, long long batch,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
init<long long>(dim, n, inembed, istride, idist, input_type, onembed,
ostride, odist, output_type, batch,
direction_and_placement);
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] input_type Input data type.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] output_type Output data type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, int *n, int *inembed,
int istride, int idist, library_data_t input_type, int *onembed,
int ostride, int odist, library_data_t output_type, int batch,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
init<int>(dim, n, inembed, istride, idist, input_type, onembed, ostride,
odist, output_type, batch, direction_and_placement);
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, long long *n,
long long *inembed, long long istride, long long idist,
long long *onembed, long long ostride, long long odist,
fft_type type, long long batch, size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
commit(exec_queue, dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch, scratchpad_size,
direction_and_placement);
}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, int *n, int *inembed,
int istride, int idist, int *onembed, int ostride, int odist,
fft_type type, int batch, size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
commit(exec_queue, dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch, scratchpad_size,
direction_and_placement);
}
/// Commit the configuration to calculate 1-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n1 The size of the dimension of the data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int n1, fft_type type, int batch,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
_n.resize(1);
_n[0] = n1;
std::tie(_input_type, _output_type) = fft_type_to_data_type(type);
_dim = 1;
_batch = batch;
_is_basic = true;
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
config_and_commit_basic();
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate 2-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n2 The size of the 2nd dimension (outermost) of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int n2, int n1, fft_type type,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
_n.resize(2);
_n[0] = n2;
_n[1] = n1;
std::tie(_input_type, _output_type) = fft_type_to_data_type(type);
_dim = 2;
_is_basic = true;
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
config_and_commit_basic();
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate 3-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n3 The size of the 3rd dimension (outermost) of the data.
/// \param [in] n2 The size of the 2nd dimension of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int n3, int n2, int n1, fft_type type,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
_n.resize(3);
_n[0] = n3;
_n[1] = n2;
_n[2] = n1;
std::tie(_input_type, _output_type) = fft_type_to_data_type(type);
_dim = 3;
_is_basic = true;
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
config_and_commit_basic();
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Create the class for calculate 1-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n1 The size of the dimension of the data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *
create(sycl::queue *exec_queue, int n1, fft_type type, int batch,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, n1, type, batch, nullptr,
direction_and_placement);
return engine;
}
/// Create the class for calculate 2-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n2 The size of the 2nd dimension (outermost) of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *
create(sycl::queue *exec_queue, int n2, int n1, fft_type type,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, n2, n1, type, nullptr, direction_and_placement);
return engine;
}
/// Create the class for calculate 3-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n3 The size of the 3rd dimension (outermost) of the data.
/// \param [in] n2 The size of the 2nd dimension of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *
create(sycl::queue *exec_queue, int n3, int n2, int n1, fft_type type,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, n3, n2, n1, type, nullptr,
direction_and_placement);
return engine;
}
/// Create the class for calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *
create(sycl::queue *exec_queue, int dim, int *n, int *inembed, int istride,
int idist, int *onembed, int ostride, int odist, fft_type type,
int batch,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, dim, n, inembed, istride, idist, onembed,
ostride, odist, type, batch, nullptr,
direction_and_placement);
return engine;
}
/// Create the class for calculate FFT without commit any config.
static fft_engine *create() {
fft_engine *engine = new fft_engine();
return engine;
}
/// Destroy the class for calculate FFT.
/// \param [in] engine Pointer returned from fft_engine::craete.
static void destroy(fft_engine *engine) { delete engine; }
#ifdef __INTEL_MKL__
/// Estimates the workspace size for calculating n-D FFT.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by default.
static void
estimate_size(int dim, long long *n, long long *inembed, long long istride,
long long idist, long long *onembed, long long ostride,
long long odist, fft_type type, long long batch,
size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating n-D FFT.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by default.
static void
estimate_size(int dim, int *n, int *inembed, int istride, int idist,
int *onembed, int ostride, int odist, fft_type type, int batch,
size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating 1-D FFT.
/// \param [in] n1 The size of the dimension of the data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If it is not set, forward direction(if current FFT is
/// complex-to-complex) and out-of-place (false) are set by default.
static void
estimate_size(int n1, fft_type type, int batch,
size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), n1, type, batch,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating 2-D FFT.
/// \param [in] n2 The size of the 2nd dimension (outermost) of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by default.
static void
estimate_size(int n2, int n1, fft_type type,
size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), n2, n1, type,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating 3-D FFT.
/// \param [in] n3 The size of the 3rd dimension (outermost) of the data.
/// \param [in] n2 The size of the 2nd dimension of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by default.
static void
estimate_size(int n3, int n2, int n1, fft_type type,
size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), n3, n2, n1, type,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
#endif
/// Execute the FFT calculation.
/// \param [in] input Pointer to the input data.
/// \param [out] output Pointer to the output data.
/// \param [in] direction The FFT direction.
template <typename input_t, typename output_t>
void compute(input_t *input, output_t *output, fft_direction direction) {
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
compute_complex<float, oneapi::mkl::dft::precision::SINGLE>(
(float *)input, (float *)output, direction);
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
compute_complex<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output, direction);
} else if (_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
} else if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
} else if (_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output);
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output);
}
}
template <>
void compute(float *input, sycl::float2 *output, fft_direction direction) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
}
template <>
void compute(sycl::float2 *input, float *output, fft_direction direction) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
}
template <>
void compute(double *input, sycl::double2 *output, fft_direction direction) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>((double *)input,
(double *)output);
}
template <>
void compute(sycl::double2 *input, double *output, fft_direction direction) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>((double *)input,
(double *)output);
}
template <>
void compute(sycl::float2 *input, sycl::float2 *output,
fft_direction direction) {
compute_complex<float, oneapi::mkl::dft::precision::SINGLE>(
(float *)input, (float *)output, direction);
}
template <>
void compute(sycl::double2 *input, sycl::double2 *output,
fft_direction direction) {
compute_complex<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output, direction);
}
/// Setting the user's SYCL queue for calculation.
/// \param [in] q Pointer to the SYCL queue.
void set_queue(sycl::queue *q) { _q = q; }
#ifdef __INTEL_MKL__
/// Setting whether to use external or internal workspace.
/// \param [in] flag True means using internal workspace. False means using
/// external workspace.
void use_internal_workspace(bool flag = true) {
_use_external_workspace = !flag;
}
/// Specify the external workspace.
/// \param [in] ptr Pointer to the workspace.
void set_workspace(void *ptr) {
if (!_use_external_workspace) {
return;
}
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<float *>(ptr));
_desc_sc->set_workspace(data);
}
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<double *>(ptr));
_desc_dc->set_workspace(data);
}
} else if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float)) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<float *>(ptr));
_desc_sr->set_workspace(data);
}
} else if ((_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) ||
(_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double)) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<double *>(ptr));
_desc_dr->set_workspace(data);
}
} else {
throw sycl::exception(sycl::make_error_code(sycl::errc::invalid),
"invalid fft type");
}
}
#endif
/// Get the workspace size.
/// \param [out] scratchpad_size Workspace size in bytes.
void get_workspace_size(size_t *scratchpad_size) {
if (scratchpad_size) {
*scratchpad_size = _workspace_bytes;
}
}
private:
static std::pair<library_data_t, library_data_t>
fft_type_to_data_type(fft_type type) {
switch (type) {
case fft_type::real_float_to_complex_float: {
return std::make_pair(library_data_t::real_float,
library_data_t::complex_float);
}
case fft_type::complex_float_to_real_float: {
return std::make_pair(library_data_t::complex_float,
library_data_t::real_float);
}
case fft_type::real_double_to_complex_double: {
return std::make_pair(library_data_t::real_double,
library_data_t::complex_double);
}
case fft_type::complex_double_to_real_double: {
return std::make_pair(library_data_t::complex_double,
library_data_t::real_double);
}
case fft_type::complex_float_to_complex_float: {
return std::make_pair(library_data_t::complex_float,
library_data_t::complex_float);
}
case fft_type::complex_double_to_complex_double: {
return std::make_pair(library_data_t::complex_double,
library_data_t::complex_double);
}
}
}
void config_and_commit_basic() {
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
_desc_sc = std::make_shared<
oneapi::mkl::dft::descriptor<oneapi::mkl::dft::precision::SINGLE,
oneapi::mkl::dft::domain::COMPLEX>>(_n);
std::int64_t distance = 1;
for (auto i : _n)
distance = distance * i;
_fwd_dist = distance;
_bwd_dist = distance;
_desc_sc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE,
distance);
_desc_sc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE,
distance);
_desc_sc->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
else
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_sc->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_sc->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_sc->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_sc->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
else
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
_desc_sc->commit(*_q);
#endif
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
_desc_dc = std::make_shared<
oneapi::mkl::dft::descriptor<oneapi::mkl::dft::precision::DOUBLE,
oneapi::mkl::dft::domain::COMPLEX>>(_n);
std::int64_t distance = 1;
for (auto i : _n)
distance = distance * i;
_fwd_dist = distance;
_bwd_dist = distance;
_desc_dc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE,
distance);
_desc_dc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE,
distance);
_desc_dc->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
else
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_dc->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_dc->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_dc->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_dc->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
else
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
_desc_dc->commit(*_q);
#endif
} else if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float)) {
_desc_sr = std::make_shared<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::REAL>>(
_n);
if (_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float)
_direction = fft_direction::forward;
else
_direction = fft_direction::backward;
_desc_sr->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
set_stride_and_distance_basic<true>(_desc_sr);
} else {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_sr);
}
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_sr->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_sr->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_sr->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_sr->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
set_stride_and_distance_basic<true>(_desc_sr);
} else {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_sr);
}
_desc_sr->commit(*_q);
#endif
} else if ((_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) ||
(_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double)) {
_desc_dr = std::make_shared<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::REAL>>(
_n);
if (_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double)
_direction = fft_direction::forward;
else
_direction = fft_direction::backward;
_desc_dr->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
set_stride_and_distance_basic<true>(_desc_dr);
} else {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_dr);
}
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_dr->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_dr->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_dr->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_dr->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
set_stride_and_distance_basic<true>(_desc_dr);
} else {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_dr);
}
_desc_dr->commit(*_q);
#endif
} else {
throw sycl::exception(sycl::make_error_code(sycl::errc::invalid),
"invalid fft type");
}
}
void config_and_commit_advanced() {
#ifdef __INTEL_MKL__
#define CONFIG_AND_COMMIT(DESC, PREC, DOM, TYPE) \
{ \
DESC = std::make_shared<oneapi::mkl::dft::descriptor< \
oneapi::mkl::dft::precision::PREC, oneapi::mkl::dft::domain::DOM>>( \
_n); \
set_stride_advanced(DESC); \
DESC->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _fwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _bwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, \
_batch); \
if (_is_user_specified_dir_and_placement && _is_inplace) \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
DFTI_CONFIG_VALUE::DFTI_INPLACE); \
else \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); \
if (_use_external_workspace) { \
DESC->set_value(oneapi::mkl::dft::config_param::WORKSPACE, \
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL); \
} \
if (_is_estimate_call) { \
if (_q->get_device().is_gpu()) { \
DESC->get_value( \
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES, \
&_workspace_estimate_bytes); \
} \
} else { \
DESC->commit(*_q); \
if (_is_estimate_call) { \
DESC->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES, \
&_workspace_bytes); \
} \
} \
}
#else
#define CONFIG_AND_COMMIT(DESC, PREC, DOM, TYPE) \
{ \
DESC = std::make_shared<oneapi::mkl::dft::descriptor< \
oneapi::mkl::dft::precision::PREC, oneapi::mkl::dft::domain::DOM>>( \
_n); \
set_stride_advanced(DESC); \
DESC->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _fwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _bwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, \
_batch); \
if (_is_user_specified_dir_and_placement && _is_inplace) \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
oneapi::mkl::dft::config_value::INPLACE); \
else \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
oneapi::mkl::dft::config_value::NOT_INPLACE); \
DESC->commit(*_q); \
}
#endif
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
CONFIG_AND_COMMIT(_desc_sc, SINGLE, COMPLEX, float);
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
CONFIG_AND_COMMIT(_desc_dc, DOUBLE, COMPLEX, double);
} else if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float)) {
CONFIG_AND_COMMIT(_desc_sr, SINGLE, REAL, float);
} else if ((_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) ||
(_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double)) {
CONFIG_AND_COMMIT(_desc_dr, DOUBLE, REAL, double);
} else {
throw sycl::exception(sycl::make_error_code(sycl::errc::invalid),
"invalid fft type");
}
#undef CONFIG_AND_COMMIT
}
template <typename T>
void init(int dim, T *n, T *inembed, T istride, T idist,
library_data_t input_type, T *onembed, T ostride, T odist,
library_data_t output_type, T batch,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement) {
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
_n.resize(dim);
_inembed.resize(dim);
_onembed.resize(dim);
_input_type = input_type;
_output_type = output_type;
for (int i = 0; i < dim; i++) {
_n[i] = n[i];
}
if (inembed && onembed) {
for (int i = 0; i < dim; i++) {
_inembed[i] = inembed[i];
_onembed[i] = onembed[i];
}
_istride = istride;
_ostride = ostride;
if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double)) {
_fwd_dist = idist;
_bwd_dist = odist;
} else if ((_output_type == library_data_t::real_float &&
_input_type == library_data_t::complex_float) ||
(_output_type == library_data_t::real_double &&
_input_type == library_data_t::complex_double)) {
_fwd_dist = odist;
_bwd_dist = idist;
} else {
if (_is_user_specified_dir_and_placement &&
(_direction == fft_direction::backward)) {
_fwd_dist = odist;
_bwd_dist = idist;
} else {
_fwd_dist = idist;
_bwd_dist = odist;
}
}
} else {
_is_basic = true;
}
_batch = batch;
_dim = dim;
if (_is_basic)
config_and_commit_basic();
else
config_and_commit_advanced();
}
template <class Desc_t>
void set_stride_advanced(std::shared_ptr<Desc_t> desc) {
if (_dim == 1) {
std::int64_t input_stride[2] = {0, _istride};
std::int64_t output_stride[2] = {0, _ostride};
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES,
input_stride);
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES,
output_stride);
} else if (_dim == 2) {
std::int64_t input_stride[3] = {0, _inembed[1] * _istride, _istride};
std::int64_t output_stride[3] = {0, _onembed[1] * _ostride, _ostride};
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES,
input_stride);
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES,
output_stride);
} else if (_dim == 3) {
std::int64_t input_stride[4] = {0, _inembed[2] * _inembed[1] * _istride,
_inembed[2] * _istride, _istride};
std::int64_t output_stride[4] = {0, _onembed[2] * _onembed[1] * _ostride,
_onembed[2] * _ostride, _ostride};
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES,
input_stride);
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES,
output_stride);
}
}
template <class Desc_t> void swap_distance(std::shared_ptr<Desc_t> desc) {
desc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _bwd_dist);
desc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _fwd_dist);
std::int64_t temp = _bwd_dist;
_bwd_dist = _fwd_dist;
_fwd_dist = temp;
}
template <bool Is_inplace, class Desc_t>
void set_stride_and_distance_basic(std::shared_ptr<Desc_t> desc) {
std::int64_t forward_distance = 0;
std::int64_t backward_distance = 0;
#define SET_STRIDE \
{ \
if (_direction == fft_direction::forward) { \
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES, \
real_stride); \
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES, \
complex_stride); \
} else { \
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES, \
complex_stride); \
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES, \
real_stride); \
} \
}
if (_dim == 1) {
if constexpr (Is_inplace) {
std::int64_t real_stride[2] = {0, 1};
std::int64_t complex_stride[2] = {0, 1};
SET_STRIDE;
forward_distance = 2 * (_n[0] / 2 + 1);
backward_distance = _n[0] / 2 + 1;
} else {
std::int64_t real_stride[2] = {0, 1};
std::int64_t complex_stride[2] = {0, 1};
SET_STRIDE;
forward_distance = _n[0];
backward_distance = _n[0] / 2 + 1;
}
} else if (_dim == 2) {
if constexpr (Is_inplace) {
std::int64_t complex_stride[3] = {0, _n[1] / 2 + 1, 1};
std::int64_t real_stride[3] = {0, 2 * (_n[1] / 2 + 1), 1};
SET_STRIDE;
forward_distance = _n[0] * 2 * (_n[1] / 2 + 1);
backward_distance = _n[0] * (_n[1] / 2 + 1);
} else {
std::int64_t complex_stride[3] = {0, _n[1] / 2 + 1, 1};
std::int64_t real_stride[3] = {0, _n[1], 1};
SET_STRIDE;
forward_distance = _n[0] * _n[1];
backward_distance = _n[0] * (_n[1] / 2 + 1);
}
} else if (_dim == 3) {
if constexpr (Is_inplace) {
std::int64_t complex_stride[4] = {0, _n[1] * (_n[2] / 2 + 1),
_n[2] / 2 + 1, 1};
std::int64_t real_stride[4] = {0, _n[1] * 2 * (_n[2] / 2 + 1),
2 * (_n[2] / 2 + 1), 1};
SET_STRIDE;
forward_distance = _n[0] * _n[1] * 2 * (_n[2] / 2 + 1);
backward_distance = _n[0] * _n[1] * (_n[2] / 2 + 1);
} else {
std::int64_t complex_stride[4] = {0, _n[1] * (_n[2] / 2 + 1),
_n[2] / 2 + 1, 1};
std::int64_t real_stride[4] = {0, _n[1] * _n[2], _n[2], 1};
SET_STRIDE;
forward_distance = _n[0] * _n[1] * _n[2];
backward_distance = _n[0] * _n[1] * (_n[2] / 2 + 1);
}
}
#undef SET_STRIDE
desc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE,
forward_distance);
desc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE,
backward_distance);
}
#define COMPUTE(DESC) \
{ \
if (_is_inplace) { \
auto data_input = \
dpct::detail::get_memory(reinterpret_cast<T *>(input)); \
if (_direction == fft_direction::forward) { \
oneapi::mkl::dft::compute_forward(*DESC, data_input); \
} else { \
oneapi::mkl::dft::compute_backward(*DESC, data_input); \
} \
} else { \
auto data_input = \
dpct::detail::get_memory(reinterpret_cast<T *>(input)); \
auto data_output = \
dpct::detail::get_memory(reinterpret_cast<T *>(output)); \
if (_direction == fft_direction::forward) { \
oneapi::mkl::dft::compute_forward(*DESC, data_input, data_output); \
} else { \
oneapi::mkl::dft::compute_backward(*DESC, data_input, data_output); \
} \
} \
}
template <class T, oneapi::mkl::dft::precision Precision>
void compute_complex(T *input, T *output, fft_direction direction) {
bool is_this_compute_inplace = input == output;
if (!_is_user_specified_dir_and_placement) {
// The complex domain descriptor need different config values if the
// FFT direction or placement is different.
// Here we check the conditions, and new config values are set and
// re-committed if needed.
if (direction != _direction || is_this_compute_inplace != _is_inplace) {
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
if (direction != _direction) {
swap_distance(_desc_sc);
_direction = direction;
}
if (is_this_compute_inplace != _is_inplace) {
_is_inplace = is_this_compute_inplace;
#ifdef __INTEL_MKL__
if (_is_inplace) {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
} else {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
}
#else
if (_is_inplace) {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
} else {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
}
#endif
}
_desc_sc->commit(*_q);
} else {
if (direction != _direction) {
swap_distance(_desc_dc);
_direction = direction;
}
if (is_this_compute_inplace != _is_inplace) {
_is_inplace = is_this_compute_inplace;
#ifdef __INTEL_MKL__
if (_is_inplace) {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
} else {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
}
#else
if (_is_inplace) {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
} else {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
}
#endif
}
_desc_dc->commit(*_q);
}
}
}
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
COMPUTE(_desc_sc);
} else {
COMPUTE(_desc_dc);
}
}
template <class T, oneapi::mkl::dft::precision Precision>
void compute_real(T *input, T *output) {
bool is_this_compute_inplace = input == output;
if (!_is_user_specified_dir_and_placement) {
// The real domain descriptor need different config values if the
// FFT placement is different.
// Here we check the condition, and new config values are set and
// re-committed if needed.
if (is_this_compute_inplace != _is_inplace) {
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
_is_inplace = is_this_compute_inplace;
if (_is_inplace) {
#ifdef __INTEL_MKL__
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
#else
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
#endif
if (_is_basic)
set_stride_and_distance_basic<true>(_desc_sr);
} else {
#ifdef __INTEL_MKL__
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
#else
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
#endif
if (_is_basic)
set_stride_and_distance_basic<false>(_desc_sr);
}
_desc_sr->commit(*_q);
} else {
_is_inplace = is_this_compute_inplace;
if (_is_inplace) {
#ifdef __INTEL_MKL__
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
#else
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
#endif
if (_is_basic)
set_stride_and_distance_basic<true>(_desc_dr);
} else {
#ifdef __INTEL_MKL__
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
#else
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
#endif
if (_is_basic)
set_stride_and_distance_basic<false>(_desc_dr);
}
_desc_dr->commit(*_q);
}
}
}
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
COMPUTE(_desc_sr);
} else {
COMPUTE(_desc_dr);
}
}
#undef COMPUTE
private:
sycl::queue *_q = nullptr;
int _dim;
std::vector<std::int64_t> _n;
std::vector<std::int64_t> _inembed;
std::int64_t _istride;
std::int64_t _fwd_dist;
library_data_t _input_type;
std::vector<std::int64_t> _onembed;
std::int64_t _ostride;
std::int64_t _bwd_dist;
library_data_t _output_type;
std::int64_t _batch = 1;
bool _is_basic = false;
bool _is_inplace = false;
fft_direction _direction = fft_direction::forward;
bool _is_user_specified_dir_and_placement = false;
bool _use_external_workspace = false;
void *_external_workspace_ptr = nullptr;
size_t _workspace_bytes = 0;
bool _is_estimate_call = false;
size_t _workspace_estimate_bytes = 0;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::REAL>>
_desc_sr;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::REAL>>
_desc_dr;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::COMPLEX>>
_desc_sc;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::COMPLEX>>
_desc_dc;
};
using fft_engine_ptr = fft_engine *;
} // namespace fft
} // namespace dpct
#endif // __DPCT_FFT_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/lib_common_utils.hpp | //==---- lib_common_utils.hpp ---------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_LIB_COMMON_UTILS_HPP__
#define __DPCT_LIB_COMMON_UTILS_HPP__
#include <sycl/sycl.hpp>
#include <oneapi/mkl.hpp>
#include "memory.hpp"
#include "util.hpp"
namespace dpct {
namespace detail {
template <typename T> inline auto get_memory(T *x) {
#ifdef DPCT_USM_LEVEL_NONE
return dpct::get_buffer<std::remove_cv_t<T>>(x);
#else
return x;
#endif
}
template <typename T>
inline typename DataType<T>::T2 get_value(const T *s, sycl::queue &q) {
using Ty = typename DataType<T>::T2;
Ty s_h;
detail::dpct_memcpy(q, (void *)&s_h, (void *)s, sizeof(T), automatic).wait();
return s_h;
}
}
enum class version_field : int {
major,
minor,
update,
patch
};
/// Returns the requested field of Intel(R) oneAPI Math Kernel Library version.
/// \param field The version information field (major, minor, update or patch).
/// \param result The result value.
inline void mkl_get_version(version_field field, int *result) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
MKLVersion version;
mkl_get_version(&version);
if (version_field::major == field) {
*result = version.MajorVersion;
} else if (version_field::minor == field) {
*result = version.MinorVersion;
} else if (version_field::update == field) {
*result = version.UpdateVersion;
} else if (version_field::patch == field) {
*result = 0;
} else {
throw std::runtime_error("unknown field");
}
#endif
}
enum class library_data_t : unsigned char {
real_float = 0,
complex_float,
real_double,
complex_double,
real_half,
complex_half,
real_bfloat16,
complex_bfloat16,
real_int4,
complex_int4,
real_uint4,
complex_uint4,
real_int8,
complex_int8,
real_uint8,
complex_uint8,
real_int16,
complex_int16,
real_uint16,
complex_uint16,
real_int32,
complex_int32,
real_uint32,
complex_uint32,
real_int64,
complex_int64,
real_uint64,
complex_uint64,
real_int8_4,
real_int8_32,
real_uint8_4,
library_data_t_size
};
namespace detail {
template <typename ArgT>
inline constexpr std::uint64_t get_type_combination_id(ArgT Val) {
static_assert((unsigned char)library_data_t::library_data_t_size <=
std::numeric_limits<unsigned char>::max() &&
"library_data_t size exceeds limit.");
static_assert(std::is_same_v<ArgT, library_data_t>, "Unsupported ArgT");
return (std::uint64_t)Val;
}
template <typename FirstT, typename... RestT>
inline constexpr std::uint64_t get_type_combination_id(FirstT FirstVal,
RestT... RestVal) {
static_assert((std::uint8_t)library_data_t::library_data_t_size <=
std::numeric_limits<unsigned char>::max() &&
"library_data_t size exceeds limit.");
static_assert(sizeof...(RestT) <= 8 && "Too many parameters");
static_assert(std::is_same_v<FirstT, library_data_t>, "Unsupported FirstT");
return get_type_combination_id(RestVal...) << 8 | ((std::uint64_t)FirstVal);
}
inline constexpr std::size_t library_data_size[] = {
8 * sizeof(float), // real_float
8 * sizeof(std::complex<float>), // complex_float
8 * sizeof(double), // real_double
8 * sizeof(std::complex<double>), // complex_double
8 * sizeof(sycl::half), // real_half
8 * sizeof(std::complex<sycl::half>), // complex_half
16, // real_bfloat16
16 * 2, // complex_bfloat16
4, // real_int4
4 * 2, // complex_int4
4, // real_uint4
4 * 2, // complex_uint4
8, // real_int8
8 * 2, // complex_int8
8, // real_uint8
8 * 2, // complex_uint8
16, // real_int16
16 * 2, // complex_int16
16, // real_uint16
16 * 2, // complex_uint16
32, // real_int32
32 * 2, // complex_int32
32, // real_uint32
32 * 2, // complex_uint32
64, // real_int64
64 * 2, // complex_int64
64, // real_uint64
64 * 2, // complex_uint64
8, // real_int8_4
8, // real_int8_32
8 // real_uint8_4
};
} // namespace detail
} // namespace dpct
#endif // __DPCT_LIB_COMMON_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/sparse_utils.hpp | //==---- sparse_utils.hpp -------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_SPARSE_UTILS_HPP__
#define __DPCT_SPARSE_UTILS_HPP__
#include "lib_common_utils.hpp"
#include <oneapi/mkl.hpp>
#include <sycl/sycl.hpp>
namespace dpct {
namespace sparse {
/// Describes properties of a sparse matrix.
/// The properties are matrix type, diag, uplo and index base.
class matrix_info {
public:
/// Matrix types are:
/// ge: General matrix
/// sy: Symmetric matrix
/// he: Hermitian matrix
/// tr: Triangular matrix
enum class matrix_type : int { ge = 0, sy, he, tr };
auto get_matrix_type() const { return _matrix_type; }
auto get_diag() const { return _diag; }
auto get_uplo() const { return _uplo; }
auto get_index_base() const { return _index_base; }
void set_matrix_type(matrix_type mt) { _matrix_type = mt; }
void set_diag(oneapi::mkl::diag d) { _diag = d; }
void set_uplo(oneapi::mkl::uplo u) { _uplo = u; }
void set_index_base(oneapi::mkl::index_base ib) { _index_base = ib; }
private:
matrix_type _matrix_type = matrix_type::ge;
oneapi::mkl::diag _diag = oneapi::mkl::diag::nonunit;
oneapi::mkl::uplo _uplo = oneapi::mkl::uplo::upper;
oneapi::mkl::index_base _index_base = oneapi::mkl::index_base::zero;
};
/// Computes a CSR format sparse matrix-dense vector product.
/// y = alpha * op(A) * x + beta * y
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans The operation applied to the matrix A.
/// \param [in] num_rows Number of rows of the matrix A.
/// \param [in] num_cols Number of columns of the matrix A.
/// \param [in] alpha Scaling factor for the matrix A.
/// \param [in] info Matrix info of the matrix A.
/// \param [in] val An array containing the non-zero elements of the matrix A.
/// \param [in] row_ptr An array of length \p num_rows + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] x Data of the vector x.
/// \param [in] beta Scaling factor for the vector x.
/// \param [in, out] y Data of the vector y.
template <typename T>
void csrmv(sycl::queue &queue, oneapi::mkl::transpose trans, int num_rows,
int num_cols, const T *alpha,
const std::shared_ptr<matrix_info> info, const T *val,
const int *row_ptr, const int *col_ind, const T *x, const T *beta,
T *y) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using Ty = typename dpct::DataType<T>::T2;
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
oneapi::mkl::sparse::matrix_handle_t *sparse_matrix_handle =
new oneapi::mkl::sparse::matrix_handle_t;
oneapi::mkl::sparse::init_matrix_handle(sparse_matrix_handle);
auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr));
auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind));
auto data_val =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val)));
oneapi::mkl::sparse::set_csr_data(queue, *sparse_matrix_handle, num_rows,
num_cols, info->get_index_base(),
data_row_ptr, data_col_ind, data_val);
auto data_x =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(x)));
auto data_y = dpct::detail::get_memory(reinterpret_cast<Ty *>(y));
switch (info->get_matrix_type()) {
case matrix_info::matrix_type::ge: {
oneapi::mkl::sparse::optimize_gemv(queue, trans, *sparse_matrix_handle);
oneapi::mkl::sparse::gemv(queue, trans, alpha_value, *sparse_matrix_handle,
data_x, beta_value, data_y);
break;
}
case matrix_info::matrix_type::sy: {
oneapi::mkl::sparse::symv(queue, info->get_uplo(), alpha_value,
*sparse_matrix_handle, data_x, beta_value,
data_y);
break;
}
case matrix_info::matrix_type::tr: {
oneapi::mkl::sparse::optimize_trmv(queue, info->get_uplo(), trans,
info->get_diag(), *sparse_matrix_handle);
oneapi::mkl::sparse::trmv(queue, info->get_uplo(), trans, info->get_diag(),
alpha_value, *sparse_matrix_handle, data_x,
beta_value, data_y);
break;
}
default:
throw std::runtime_error(
"the spmv does not support matrix_info::matrix_type::he");
}
sycl::event e =
oneapi::mkl::sparse::release_matrix_handle(queue, sparse_matrix_handle);
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { delete sparse_matrix_handle; });
});
#endif
}
/// Computes a CSR format sparse matrix-dense matrix product.
/// C = alpha * op(A) * B + beta * C
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans The operation applied to the matrix A.
/// \param [in] sparse_rows Number of rows of the matrix A.
/// \param [in] dense_cols Number of columns of the matrix B or C.
/// \param [in] sparse_cols Number of columns of the matrix A.
/// \param [in] alpha Scaling factor for the matrix A.
/// \param [in] info Matrix info of the matrix A.
/// \param [in] val An array containing the non-zero elements of the matrix A.
/// \param [in] row_ptr An array of length \p num_rows + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] b Data of the matrix B.
/// \param [in] ldb Leading dimension of the matrix B.
/// \param [in] beta Scaling factor for the matrix B.
/// \param [in, out] c Data of the matrix C.
/// \param [in] ldc Leading dimension of the matrix C.
template <typename T>
void csrmm(sycl::queue &queue, oneapi::mkl::transpose trans, int sparse_rows,
int dense_cols, int sparse_cols, const T *alpha,
const std::shared_ptr<matrix_info> info, const T *val,
const int *row_ptr, const int *col_ind, const T *b, int ldb,
const T *beta, T *c, int ldc) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using Ty = typename dpct::DataType<T>::T2;
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
oneapi::mkl::sparse::matrix_handle_t *sparse_matrix_handle =
new oneapi::mkl::sparse::matrix_handle_t;
oneapi::mkl::sparse::init_matrix_handle(sparse_matrix_handle);
auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr));
auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind));
auto data_val =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val)));
oneapi::mkl::sparse::set_csr_data(queue, *sparse_matrix_handle, sparse_rows,
sparse_cols, info->get_index_base(),
data_row_ptr, data_col_ind, data_val);
auto data_b =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(b)));
auto data_c = dpct::detail::get_memory(reinterpret_cast<Ty *>(c));
switch (info->get_matrix_type()) {
case matrix_info::matrix_type::ge: {
oneapi::mkl::sparse::gemm(queue, oneapi::mkl::layout::row_major, trans,
oneapi::mkl::transpose::nontrans, alpha_value,
*sparse_matrix_handle, data_b, dense_cols, ldb,
beta_value, data_c, ldc);
break;
}
default:
throw std::runtime_error(
"the csrmm does not support matrix_info::matrix_type::sy, "
"matrix_info::matrix_type::tr and matrix_info::matrix_type::he");
}
sycl::event e =
oneapi::mkl::sparse::release_matrix_handle(queue, sparse_matrix_handle);
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { delete sparse_matrix_handle; });
});
#endif
}
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
/// Saving the optimization information for solving a system of linear
/// equations.
class optimize_info {
public:
/// Constructor
optimize_info() { oneapi::mkl::sparse::init_matrix_handle(&_matrix_handle); }
/// Destructor
~optimize_info() {
oneapi::mkl::sparse::release_matrix_handle(get_default_queue(),
&_matrix_handle, _deps)
.wait();
}
/// Add dependency for the destructor.
/// \param [in] e The event which the destructor depends on.
void add_dependency(sycl::event e) { _deps.push_back(e); }
/// Get the internal saved matrix handle.
/// \return Returns the matrix handle.
oneapi::mkl::sparse::matrix_handle_t get_matrix_handle() const noexcept {
return _matrix_handle;
}
private:
oneapi::mkl::sparse::matrix_handle_t _matrix_handle = nullptr;
std::vector<sycl::event> _deps;
};
#endif
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
/// Performs internal optimizations for solving a system of linear equations for
/// a CSR format sparse matrix.
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans The operation applied to the sparse matrix.
/// \param [in] row_col Number of rows of the sparse matrix.
/// \param [in] info Matrix info of the sparse matrix.
/// \param [in] val An array containing the non-zero elements of the sparse matrix.
/// \param [in] row_ptr An array of length \p num_rows + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [out] optimize_info The result of the optimizations.
template <typename T>
void optimize_csrsv(sycl::queue &queue, oneapi::mkl::transpose trans,
int row_col, const std::shared_ptr<matrix_info> info,
const T *val, const int *row_ptr, const int *col_ind,
std::shared_ptr<optimize_info> optimize_info) {
using Ty = typename dpct::DataType<T>::T2;
auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr));
auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind));
auto data_val =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val)));
oneapi::mkl::sparse::set_csr_data(queue, optimize_info->get_matrix_handle(),
row_col, row_col, info->get_index_base(),
data_row_ptr, data_col_ind, data_val);
if (info->get_matrix_type() != matrix_info::matrix_type::tr)
return;
#ifndef DPCT_USM_LEVEL_NONE
sycl::event e;
e =
#endif
oneapi::mkl::sparse::optimize_trsv(queue, info->get_uplo(), trans,
info->get_diag(),
optimize_info->get_matrix_handle());
#ifndef DPCT_USM_LEVEL_NONE
optimize_info->add_dependency(e);
#endif
}
#endif
class sparse_matrix_desc;
using sparse_matrix_desc_t = std::shared_ptr<sparse_matrix_desc>;
/// Structure for describe a dense vector
class dense_vector_desc {
public:
dense_vector_desc(std::int64_t ele_num, void *value,
library_data_t value_type)
: _ele_num(ele_num), _value(value), _value_type(value_type) {}
void get_desc(std::int64_t *ele_num, const void **value,
library_data_t *value_type) const noexcept {
*ele_num = _ele_num;
*value = _value;
*value_type = _value_type;
}
void get_desc(std::int64_t *ele_num, void **value,
library_data_t *value_type) const noexcept {
get_desc(ele_num, const_cast<const void **>(value), value_type);
}
void *get_value() const noexcept { return _value; }
void set_value(void *value) { _value = value; }
private:
std::int64_t _ele_num;
void *_value;
library_data_t _value_type;
};
/// Structure for describe a dense matrix
class dense_matrix_desc {
public:
dense_matrix_desc(std::int64_t row_num, std::int64_t col_num,
std::int64_t leading_dim, void *value,
library_data_t value_type, oneapi::mkl::layout layout)
: _row_num(row_num), _col_num(col_num), _leading_dim(leading_dim),
_value(value), _value_type(value_type), _layout(layout) {}
void get_desc(std::int64_t *row_num, std::int64_t *col_num,
std::int64_t *leading_dim, void **value,
library_data_t *value_type,
oneapi::mkl::layout *layout) const noexcept {
*row_num = _row_num;
*col_num = _col_num;
*leading_dim = _leading_dim;
*value = _value;
*value_type = _value_type;
*layout = _layout;
}
void *get_value() const noexcept { return _value; }
void set_value(void *value) { _value = value; }
std::int64_t get_col_num() const noexcept { return _col_num; }
std::int64_t get_leading_dim() const noexcept { return _leading_dim; }
oneapi::mkl::layout get_layout() const noexcept { return _layout; }
private:
std::int64_t _row_num;
std::int64_t _col_num;
std::int64_t _leading_dim;
void *_value;
library_data_t _value_type;
oneapi::mkl::layout _layout;
};
/// Sparse matrix data format
enum matrix_format : int {
csr = 1,
};
/// Sparse matrix attribute
enum matrix_attribute : int { uplo = 0, diag };
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
/// Structure for describe a sparse matrix
class sparse_matrix_desc {
public:
/// Constructor
/// \param [out] desc The descriptor to be created
/// \param [in] row_num Number of rows of the sparse matrix.
/// \param [in] col_num Number of colums of the sparse matrix.
/// \param [in] nnz Non-zero elements in the sparse matrix.
/// \param [in] row_ptr An array of length \p row_num + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] value An array containing the non-zero elements of the sparse matrix.
/// \param [in] row_ptr_type Data type of the \p row_ptr .
/// \param [in] col_ind_type Data type of the \p col_ind .
/// \param [in] base Indicates how input arrays are indexed.
/// \param [in] value_type Data type of the \p value .
/// \param [in] data_format The matrix data format.
sparse_matrix_desc(std::int64_t row_num, std::int64_t col_num,
std::int64_t nnz, void *row_ptr, void *col_ind,
void *value, library_data_t row_ptr_type,
library_data_t col_ind_type, oneapi::mkl::index_base base,
library_data_t value_type, matrix_format data_format)
: _row_num(row_num), _col_num(col_num), _nnz(nnz), _row_ptr(row_ptr),
_col_ind(col_ind), _value(value), _row_ptr_type(row_ptr_type),
_col_ind_type(col_ind_type), _base(base), _value_type(value_type),
_data_format(data_format) {
if (_data_format != matrix_format::csr) {
throw std::runtime_error("the sparse matrix data format is unsupported");
}
oneapi::mkl::sparse::init_matrix_handle(&_matrix_handle);
construct();
}
/// Destructor
~sparse_matrix_desc() {
oneapi::mkl::sparse::release_matrix_handle(get_default_queue(),
&_matrix_handle, _deps)
.wait();
}
/// Add dependency for the destroy method.
/// \param [in] e The event which the destroy method depends on.
void add_dependency(sycl::event e) { _deps.push_back(e); }
/// Get the internal saved matrix handle.
/// \return Returns the matrix handle.
oneapi::mkl::sparse::matrix_handle_t get_matrix_handle() const noexcept {
return _matrix_handle;
}
/// Get the values saved in the descriptor
/// \param [out] row_num Number of rows of the sparse matrix.
/// \param [out] col_num Number of colums of the sparse matrix.
/// \param [out] nnz Non-zero elements in the sparse matrix.
/// \param [out] row_ptr An array of length \p row_num + 1.
/// \param [out] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [out] value An array containing the non-zero elements of the sparse matrix.
/// \param [out] row_ptr_type Data type of the \p row_ptr .
/// \param [out] col_ind_type Data type of the \p col_ind .
/// \param [out] base Indicates how input arrays are indexed.
/// \param [out] value_type Data type of the \p value .
void get_desc(int64_t *row_num, int64_t *col_num, int64_t *nnz,
void **row_ptr, void **col_ind, void **value,
library_data_t *row_ptr_type, library_data_t *col_ind_type,
oneapi::mkl::index_base *base,
library_data_t *value_type) const noexcept {
*row_num = _row_num;
*col_num = _col_num;
*nnz = _nnz;
*row_ptr = _row_ptr;
*col_ind = _col_ind;
*value = _value;
*row_ptr_type = _row_ptr_type;
*col_ind_type = _col_ind_type;
*base = _base;
*value_type = _value_type;
}
/// Get the sparse matrix data format of this descriptor
/// \param [out] format The matrix data format result
void get_format(matrix_format *data_format) const noexcept {
*data_format = _data_format;
}
/// Get the index base of this descriptor
/// \param [out] base The index base result
void get_base(oneapi::mkl::index_base *base) const noexcept { *base = _base; }
/// Get the value pointer of this descriptor
/// \param [out] value The value pointer result
void get_value(void **value) const noexcept { *value = _value; }
/// Set the value pointer of this descriptor
/// \param [in] value The input value pointer
void set_value(void *value) {
// Assume the new data is different from the old data
_value = value;
construct();
}
/// Get the size of the sparse matrix
/// \param [out] row_num Number of rows of the sparse matrix.
/// \param [out] col_num Number of colums of the sparse matrix.
/// \param [out] nnz Non-zero elements in the sparse matrix.
void get_size(int64_t *row_num, int64_t *col_num,
int64_t *nnz) const noexcept {
*row_num = _row_num;
*col_num = _col_num;
*nnz = _nnz;
}
/// Set the sparse matrix attribute
/// \param [in] attribute The attribute type
/// \param [in] data The attribute value
/// \param [in] data_size The data size of the attribute value
void set_attribute(matrix_attribute attribute, const void *data,
size_t data_size) {
if (attribute == matrix_attribute::diag) {
const oneapi::mkl::diag *diag_ptr =
reinterpret_cast<const oneapi::mkl::diag *>(data);
if (*diag_ptr == oneapi::mkl::diag::unit) {
_diag = oneapi::mkl::diag::unit;
} else if (*diag_ptr == oneapi::mkl::diag::nonunit) {
_diag = oneapi::mkl::diag::nonunit;
} else {
throw std::runtime_error("unsupported diag value");
}
} else if (attribute == matrix_attribute::uplo) {
const oneapi::mkl::uplo *uplo_ptr =
reinterpret_cast<const oneapi::mkl::uplo *>(data);
if (*uplo_ptr == oneapi::mkl::uplo::upper) {
_uplo = oneapi::mkl::uplo::upper;
} else if (*uplo_ptr == oneapi::mkl::uplo::lower) {
_uplo = oneapi::mkl::uplo::lower;
} else {
throw std::runtime_error("unsupported uplo value");
}
} else {
throw std::runtime_error("unsupported attribute");
}
}
/// Get the sparse matrix attribute
/// \param [out] attribute The attribute type
/// \param [out] data The attribute value
/// \param [out] data_size The data size of the attribute value
void get_attribute(matrix_attribute attribute, void *data,
size_t data_size) const {
if (attribute == matrix_attribute::diag) {
oneapi::mkl::diag *diag_ptr = reinterpret_cast<oneapi::mkl::diag *>(data);
if (_diag.has_value()) {
*diag_ptr = _diag.value();
} else {
*diag_ptr = oneapi::mkl::diag::nonunit;
}
} else if (attribute == matrix_attribute::uplo) {
oneapi::mkl::uplo *uplo_ptr = reinterpret_cast<oneapi::mkl::uplo *>(data);
if (_uplo.has_value()) {
*uplo_ptr = _uplo.value();
} else {
*uplo_ptr = oneapi::mkl::uplo::lower;
}
} else {
throw std::runtime_error("unsupported attribute");
}
}
/// Set the pointers for describing the sparse matrix
/// \param [in] row_ptr An array of length \p row_num + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] value An array containing the non-zero elements of the sparse matrix.
void set_pointers(void *row_ptr, void *col_ind, void *value) {
// Assume the new data is different from the old data
_row_ptr = row_ptr;
_col_ind = col_ind;
_value = value;
construct();
}
/// Get the diag attribute
/// \return diag value
std::optional<oneapi::mkl::diag> get_diag() const noexcept { return _diag; }
/// Get the uplo attribute
/// \return uplo value
std::optional<oneapi::mkl::uplo> get_uplo() const noexcept { return _uplo; }
private:
template <typename index_t, typename value_t> void set_data() {
auto data_row_ptr =
dpct::detail::get_memory(reinterpret_cast<index_t *>(_row_ptr));
auto data_col_ind =
dpct::detail::get_memory(reinterpret_cast<index_t *>(_col_ind));
auto data_value =
dpct::detail::get_memory(reinterpret_cast<value_t *>(_value));
oneapi::mkl::sparse::set_csr_data(get_default_queue(), _matrix_handle,
_row_num, _col_num, _base, data_row_ptr,
data_col_ind, data_value);
get_default_queue().wait();
}
void construct() {
std::uint64_t key = dpct::detail::get_type_combination_id(
_row_ptr_type, _col_ind_type, _value_type);
switch (key) {
case dpct::detail::get_type_combination_id(library_data_t::real_int32,
library_data_t::real_int32,
library_data_t::real_float): {
set_data<std::int32_t, float>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int32,
library_data_t::real_int32,
library_data_t::real_double): {
set_data<std::int32_t, double>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int32,
library_data_t::real_int32,
library_data_t::complex_float): {
set_data<std::int32_t, std::complex<float>>();
break;
}
case dpct::detail::get_type_combination_id(
library_data_t::real_int32, library_data_t::real_int32,
library_data_t::complex_double): {
set_data<std::int32_t, std::complex<double>>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int64,
library_data_t::real_int64,
library_data_t::real_float): {
set_data<std::int64_t, float>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int64,
library_data_t::real_int64,
library_data_t::real_double): {
set_data<std::int64_t, double>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int64,
library_data_t::real_int64,
library_data_t::complex_float): {
set_data<std::int64_t, std::complex<float>>();
break;
}
case dpct::detail::get_type_combination_id(
library_data_t::real_int64, library_data_t::real_int64,
library_data_t::complex_double): {
set_data<std::int64_t, std::complex<double>>();
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
std::int64_t _row_num;
std::int64_t _col_num;
std::int64_t _nnz;
void *_row_ptr;
void *_col_ind;
void *_value;
library_data_t _row_ptr_type;
library_data_t _col_ind_type;
oneapi::mkl::index_base _base;
library_data_t _value_type;
oneapi::mkl::sparse::matrix_handle_t _matrix_handle = nullptr;
std::vector<sycl::event> _deps;
matrix_format _data_format;
std::optional<oneapi::mkl::uplo> _uplo;
std::optional<oneapi::mkl::diag> _diag;
};
namespace detail {
#ifdef DPCT_USM_LEVEL_NONE
#define SPARSE_CALL(X) \
do { \
X; \
} while (0)
#else
#define SPARSE_CALL(X) \
do { \
sycl::event e = X; \
a->add_dependency(e); \
} while (0)
#endif
template <typename Ty>
inline void spmv_impl(sycl::queue queue, oneapi::mkl::transpose trans,
const void *alpha, sparse_matrix_desc_t a,
std::shared_ptr<dense_vector_desc> x, const void *beta,
std::shared_ptr<dense_vector_desc> y) {
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
auto data_x =
dpct::detail::get_memory(reinterpret_cast<Ty *>(x->get_value()));
auto data_y =
dpct::detail::get_memory(reinterpret_cast<Ty *>(y->get_value()));
if (a->get_diag().has_value() && a->get_uplo().has_value()) {
oneapi::mkl::sparse::optimize_trmv(queue, a->get_uplo().value(), trans,
a->get_diag().value(),
a->get_matrix_handle());
SPARSE_CALL(oneapi::mkl::sparse::trmv(
queue, a->get_uplo().value(), trans, a->get_diag().value(), alpha_value,
a->get_matrix_handle(), data_x, beta_value, data_y));
} else {
oneapi::mkl::sparse::optimize_gemv(queue, trans, a->get_matrix_handle());
SPARSE_CALL(oneapi::mkl::sparse::gemv(queue, trans, alpha_value,
a->get_matrix_handle(), data_x,
beta_value, data_y));
}
}
template <typename Ty>
inline void spmm_impl(sycl::queue queue, oneapi::mkl::transpose trans_a,
oneapi::mkl::transpose trans_b, const void *alpha,
sparse_matrix_desc_t a,
std::shared_ptr<dense_matrix_desc> b, const void *beta,
std::shared_ptr<dense_matrix_desc> c) {
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
auto data_b =
dpct::detail::get_memory(reinterpret_cast<Ty *>(b->get_value()));
auto data_c =
dpct::detail::get_memory(reinterpret_cast<Ty *>(c->get_value()));
SPARSE_CALL(oneapi::mkl::sparse::gemm(
queue, b->get_layout(), trans_a, trans_b, alpha_value,
a->get_matrix_handle(), data_b, b->get_col_num(), b->get_leading_dim(),
beta_value, data_c, c->get_leading_dim()));
}
#undef SPARSE_CALL
} // namespace detail
/// Computes a sparse matrix-dense vector product: y = alpha * op(a) * x + beta * y.
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans Specifies operation on input matrix.
/// \param [in] alpha Specifies the scalar alpha.
/// \param [in] a Specifies the sparse matrix a.
/// \param [in] x Specifies the dense vector x.
/// \param [in] beta Specifies the scalar beta.
/// \param [in, out] y Specifies the dense vector y.
/// \param [in] data_type Specifies the data type of \param a, \param x and \param y .
inline void spmv(sycl::queue queue, oneapi::mkl::transpose trans,
const void *alpha, sparse_matrix_desc_t a,
std::shared_ptr<dense_vector_desc> x, const void *beta,
std::shared_ptr<dense_vector_desc> y,
library_data_t data_type) {
switch (data_type) {
case library_data_t::real_float: {
detail::spmv_impl<float>(queue, trans, alpha, a, x, beta, y);
break;
}
case library_data_t::real_double: {
detail::spmv_impl<double>(queue, trans, alpha, a, x, beta, y);
break;
}
case library_data_t::complex_float: {
detail::spmv_impl<std::complex<float>>(queue, trans, alpha, a, x, beta, y);
break;
}
case library_data_t::complex_double: {
detail::spmv_impl<std::complex<double>>(queue, trans, alpha, a, x, beta, y);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes a sparse matrix-dense matrix product: c = alpha * op(a) * op(b) + beta * c.
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans_a Specifies operation on input matrix a.
/// \param [in] trans_b Specifies operation on input matrix b.
/// \param [in] alpha Specifies the scalar alpha.
/// \param [in] a Specifies the sparse matrix a.
/// \param [in] b Specifies the dense matrix b.
/// \param [in] beta Specifies the scalar beta.
/// \param [in, out] c Specifies the dense matrix c.
/// \param [in] data_type Specifies the data type of \param a, \param b and \param c .
inline void spmm(sycl::queue queue, oneapi::mkl::transpose trans_a,
oneapi::mkl::transpose trans_b, const void *alpha,
sparse_matrix_desc_t a, std::shared_ptr<dense_matrix_desc> b,
const void *beta, std::shared_ptr<dense_matrix_desc> c,
library_data_t data_type) {
if (b->get_layout() != c->get_layout())
throw std::runtime_error("the layout of b and c are different");
switch (data_type) {
case library_data_t::real_float: {
detail::spmm_impl<float>(queue, trans_a, trans_b, alpha, a, b, beta, c);
break;
}
case library_data_t::real_double: {
detail::spmm_impl<double>(queue, trans_a, trans_b, alpha, a, b, beta, c);
break;
}
case library_data_t::complex_float: {
detail::spmm_impl<std::complex<float>>(queue, trans_a, trans_b, alpha, a, b,
beta, c);
break;
}
case library_data_t::complex_double: {
detail::spmm_impl<std::complex<double>>(queue, trans_a, trans_b, alpha, a,
b, beta, c);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
#endif
} // namespace sparse
} // namespace dpct
#endif // __DPCT_SPARSE_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/device.hpp | //==---- device.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_DEVICE_HPP__
#define __DPCT_DEVICE_HPP__
#include <sycl/sycl.hpp>
#include <algorithm>
#include <array>
#include <cstring>
#include <iostream>
#include <mutex>
#include <set>
#include <sstream>
#include <map>
#include <vector>
#include <thread>
#if defined(__linux__)
#include <unistd.h>
#include <sys/syscall.h>
#endif
#if defined(_WIN64)
#define NOMINMAX
#include <windows.h>
#endif
namespace dpct {
/// SYCL default exception handler
inline auto exception_handler = [](sycl::exception_list exceptions) {
for (std::exception_ptr const &e : exceptions) {
try {
std::rethrow_exception(e);
} catch (sycl::exception const &e) {
std::cerr << "Caught asynchronous SYCL exception:" << std::endl
<< e.what() << std::endl
<< "Exception caught at file:" << __FILE__
<< ", line:" << __LINE__ << std::endl;
}
}
};
typedef sycl::event *event_ptr;
typedef sycl::queue *queue_ptr;
typedef char *device_ptr;
/// Destroy \p event pointed memory.
///
/// \param event Pointer to the sycl::event address.
static void destroy_event(event_ptr event) {
delete event;
}
class device_info {
public:
// get interface
const char *get_name() const { return _name; }
char *get_name() { return _name; }
template <typename WorkItemSizesTy = sycl::id<3>,
std::enable_if_t<std::is_same_v<WorkItemSizesTy, sycl::id<3>> ||
std::is_same_v<WorkItemSizesTy, int *>,
int> = 0>
auto get_max_work_item_sizes() const {
if constexpr (std::is_same_v<WorkItemSizesTy, sycl::id<3>>)
return _max_work_item_sizes;
else
return _max_work_item_sizes_i;
}
template <typename WorkItemSizesTy = sycl::id<3>,
std::enable_if_t<std::is_same_v<WorkItemSizesTy, sycl::id<3>> ||
std::is_same_v<WorkItemSizesTy, int *>,
int> = 0>
auto get_max_work_item_sizes() {
if constexpr (std::is_same_v<WorkItemSizesTy, sycl::id<3>>)
return _max_work_item_sizes;
else
return _max_work_item_sizes_i;
}
bool get_host_unified_memory() const { return _host_unified_memory; }
int get_major_version() const { return _major; }
int get_minor_version() const { return _minor; }
int get_integrated() const { return _integrated; }
int get_max_clock_frequency() const { return _frequency; }
int get_max_compute_units() const { return _max_compute_units; }
int get_max_work_group_size() const { return _max_work_group_size; }
int get_max_sub_group_size() const { return _max_sub_group_size; }
int get_max_work_items_per_compute_unit() const {
return _max_work_items_per_compute_unit;
}
int get_max_register_size_per_work_group() const {
return _max_register_size_per_work_group;
}
template <typename NDRangeSizeTy = size_t *,
std::enable_if_t<std::is_same_v<NDRangeSizeTy, size_t *> ||
std::is_same_v<NDRangeSizeTy, int *>,
int> = 0>
auto get_max_nd_range_size() const {
if constexpr (std::is_same_v<NDRangeSizeTy, size_t *>)
return _max_nd_range_size;
else
return _max_nd_range_size_i;
}
template <typename NDRangeSizeTy = size_t *,
std::enable_if_t<std::is_same_v<NDRangeSizeTy, size_t *> ||
std::is_same_v<NDRangeSizeTy, int *>,
int> = 0>
auto get_max_nd_range_size() {
if constexpr (std::is_same_v<NDRangeSizeTy, size_t *>)
return _max_nd_range_size;
else
return _max_nd_range_size_i;
}
size_t get_global_mem_size() const { return _global_mem_size; }
size_t get_local_mem_size() const { return _local_mem_size; }
/// Returns the maximum clock rate of device's global memory in kHz. If
/// compiler does not support this API then returns default value 3200000 kHz.
unsigned int get_memory_clock_rate() const { return _memory_clock_rate; }
/// Returns the maximum bus width between device and memory in bits. If
/// compiler does not support this API then returns default value 64 bits.
unsigned int get_memory_bus_width() const { return _memory_bus_width; }
uint32_t get_device_id() const { return _device_id; }
std::array<unsigned char, 16> get_uuid() const { return _uuid; }
// set interface
void set_name(const char* name) {
size_t length = strlen(name);
if (length < 256) {
std::memcpy(_name, name, length + 1);
} else {
std::memcpy(_name, name, 255);
_name[255] = '\0';
}
}
void set_max_work_item_sizes(const sycl::id<3> max_work_item_sizes) {
_max_work_item_sizes = max_work_item_sizes;
for (int i = 0; i < 3; ++i)
_max_work_item_sizes_i[i] = max_work_item_sizes[i];
}
void set_host_unified_memory(bool host_unified_memory) {
_host_unified_memory = host_unified_memory;
}
void set_major_version(int major) { _major = major; }
void set_minor_version(int minor) { _minor = minor; }
void set_integrated(int integrated) { _integrated = integrated; }
void set_max_clock_frequency(int frequency) { _frequency = frequency; }
void set_max_compute_units(int max_compute_units) {
_max_compute_units = max_compute_units;
}
void set_global_mem_size(size_t global_mem_size) {
_global_mem_size = global_mem_size;
}
void set_local_mem_size(size_t local_mem_size) {
_local_mem_size = local_mem_size;
}
void set_max_work_group_size(int max_work_group_size) {
_max_work_group_size = max_work_group_size;
}
void set_max_sub_group_size(int max_sub_group_size) {
_max_sub_group_size = max_sub_group_size;
}
void
set_max_work_items_per_compute_unit(int max_work_items_per_compute_unit) {
_max_work_items_per_compute_unit = max_work_items_per_compute_unit;
}
void set_max_nd_range_size(int max_nd_range_size[]) {
for (int i = 0; i < 3; i++) {
_max_nd_range_size[i] = max_nd_range_size[i];
_max_nd_range_size_i[i] = max_nd_range_size[i];
}
}
void set_memory_clock_rate(unsigned int memory_clock_rate) {
_memory_clock_rate = memory_clock_rate;
}
void set_memory_bus_width(unsigned int memory_bus_width) {
_memory_bus_width = memory_bus_width;
}
void
set_max_register_size_per_work_group(int max_register_size_per_work_group) {
_max_register_size_per_work_group = max_register_size_per_work_group;
}
void set_device_id(uint32_t device_id) {
_device_id = device_id;
}
void set_uuid(std::array<unsigned char, 16> uuid) {
_uuid = std::move(uuid);
}
private:
char _name[256];
sycl::id<3> _max_work_item_sizes;
int _max_work_item_sizes_i[3];
bool _host_unified_memory = false;
int _major;
int _minor;
int _integrated = 0;
int _frequency;
// Set estimated value 3200000 kHz as default value.
unsigned int _memory_clock_rate = 3200000;
// Set estimated value 64 bits as default value.
unsigned int _memory_bus_width = 64;
int _max_compute_units;
int _max_work_group_size;
int _max_sub_group_size;
int _max_work_items_per_compute_unit;
int _max_register_size_per_work_group;
size_t _global_mem_size;
size_t _local_mem_size;
size_t _max_nd_range_size[3];
int _max_nd_range_size_i[3];
uint32_t _device_id;
std::array<unsigned char, 16> _uuid;
};
/// dpct device extension
class device_ext : public sycl::device {
typedef std::mutex mutex_type;
public:
device_ext() : sycl::device(), _ctx(*this) {}
~device_ext() {
std::lock_guard<mutex_type> lock(m_mutex);
clear_queues();
}
device_ext(const sycl::device &base) : sycl::device(base), _ctx(*this) {
std::lock_guard<mutex_type> lock(m_mutex);
init_queues();
}
int is_native_atomic_supported() { return 0; }
int get_major_version() const {
int major, minor;
get_version(major, minor);
return major;
}
int get_minor_version() const {
int major, minor;
get_version(major, minor);
return minor;
}
int get_max_compute_units() const {
return get_device_info().get_max_compute_units();
}
/// Return the maximum clock frequency of this device in KHz.
int get_max_clock_frequency() const {
return get_device_info().get_max_clock_frequency();
}
int get_integrated() const { return get_device_info().get_integrated(); }
int get_max_sub_group_size() const {
return get_device_info().get_max_sub_group_size();
}
int get_max_register_size_per_work_group() const {
return get_device_info().get_max_register_size_per_work_group();
}
int get_max_work_group_size() const {
return get_device_info().get_max_work_group_size();
}
int get_mem_base_addr_align() const {
return get_info<sycl::info::device::mem_base_addr_align>();
}
size_t get_global_mem_size() const {
return get_device_info().get_global_mem_size();
}
/// Get the number of bytes of free and total memory on the SYCL device.
/// \param [out] free_memory The number of bytes of free memory on the SYCL device.
/// \param [out] total_memory The number of bytes of total memory on the SYCL device.
void get_memory_info(size_t &free_memory, size_t &total_memory) {
#if (defined(__SYCL_COMPILER_VERSION) && __SYCL_COMPILER_VERSION >= 20221105)
if (!has(sycl::aspect::ext_intel_free_memory)) {
std::cerr << "get_memory_info: ext_intel_free_memory is not supported." << std::endl;
free_memory = 0;
} else {
free_memory = get_info<sycl::ext::intel::info::device::free_memory>();
}
#else
std::cerr << "get_memory_info: ext_intel_free_memory is not supported." << std::endl;
free_memory = 0;
#if defined(_MSC_VER) && !defined(__clang__)
#pragma message("Querying the number of bytes of free memory is not supported")
#else
#warning "Querying the number of bytes of free memory is not supported"
#endif
#endif
total_memory = get_device_info().get_global_mem_size();
}
void get_device_info(device_info &out) const {
device_info prop;
prop.set_name(get_info<sycl::info::device::name>().c_str());
int major, minor;
get_version(major, minor);
prop.set_major_version(major);
prop.set_minor_version(minor);
prop.set_max_work_item_sizes(
#if (__SYCL_COMPILER_VERSION && __SYCL_COMPILER_VERSION<20220902)
// oneAPI DPC++ compiler older than 2022/09/02, where max_work_item_sizes is an enum class element
get_info<sycl::info::device::max_work_item_sizes>());
#else
// SYCL 2020-conformant code, max_work_item_sizes is a struct templated by an int
get_info<sycl::info::device::max_work_item_sizes<3>>());
#endif
prop.set_host_unified_memory(
this->has(sycl::aspect::usm_host_allocations));
prop.set_max_clock_frequency(
get_info<sycl::info::device::max_clock_frequency>() * 1000);
prop.set_max_compute_units(
get_info<sycl::info::device::max_compute_units>());
prop.set_max_work_group_size(
get_info<sycl::info::device::max_work_group_size>());
prop.set_global_mem_size(
get_info<sycl::info::device::global_mem_size>());
prop.set_local_mem_size(get_info<sycl::info::device::local_mem_size>());
#if (defined(SYCL_EXT_INTEL_DEVICE_INFO) && SYCL_EXT_INTEL_DEVICE_INFO >= 6)
if (this->has(sycl::aspect::ext_intel_memory_clock_rate)) {
unsigned int tmp =
this->get_info<sycl::ext::intel::info::device::memory_clock_rate>();
if (tmp != 0)
prop.set_memory_clock_rate(1000 * tmp);
}
if (this->has(sycl::aspect::ext_intel_memory_bus_width)) {
prop.set_memory_bus_width(
this->get_info<sycl::ext::intel::info::device::memory_bus_width>());
}
if (this->has(sycl::aspect::ext_intel_device_id)) {
prop.set_device_id(
this->get_info<sycl::ext::intel::info::device::device_id>());
}
if (this->has(sycl::aspect::ext_intel_device_info_uuid)) {
prop.set_uuid(
this->get_info<sycl::ext::intel::info::device::uuid>());
}
#elif defined(_MSC_VER) && !defined(__clang__)
#pragma message("get_device_info: querying memory_clock_rate and \
memory_bus_width are not supported by the compiler used. \
Use 3200000 kHz as memory_clock_rate default value. \
Use 64 bits as memory_bus_width default value.")
#else
#warning "get_device_info: querying memory_clock_rate and \
memory_bus_width are not supported by the compiler used. \
Use 3200000 kHz as memory_clock_rate default value. \
Use 64 bits as memory_bus_width default value."
#endif
size_t max_sub_group_size = 1;
std::vector<size_t> sub_group_sizes =
get_info<sycl::info::device::sub_group_sizes>();
for (const auto &sub_group_size : sub_group_sizes) {
if (max_sub_group_size < sub_group_size)
max_sub_group_size = sub_group_size;
}
prop.set_max_sub_group_size(max_sub_group_size);
prop.set_max_work_items_per_compute_unit(
get_info<sycl::info::device::max_work_group_size>());
int max_nd_range_size[] = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
prop.set_max_nd_range_size(max_nd_range_size);
// Estimates max register size per work group, feel free to update the value
// according to device properties.
prop.set_max_register_size_per_work_group(65536);
out = prop;
}
device_info get_device_info() const {
device_info prop;
get_device_info(prop);
return prop;
}
void reset() {
std::lock_guard<mutex_type> lock(m_mutex);
clear_queues();
init_queues();
}
sycl::queue &in_order_queue() { return *_q_in_order; }
sycl::queue &out_of_order_queue() { return *_q_out_of_order; }
sycl::queue &default_queue() {
#ifdef DPCT_USM_LEVEL_NONE
return out_of_order_queue();
#else
return in_order_queue();
#endif // DPCT_USM_LEVEL_NONE
}
void queues_wait_and_throw() {
std::unique_lock<mutex_type> lock(m_mutex);
std::vector<std::shared_ptr<sycl::queue>> current_queues(
_queues);
lock.unlock();
for (const auto &q : current_queues) {
q->wait_and_throw();
}
// Guard the destruct of current_queues to make sure the ref count is safe.
lock.lock();
}
sycl::queue *create_queue(bool enable_exception_handler = false) {
#ifdef DPCT_USM_LEVEL_NONE
return create_out_of_order_queue(enable_exception_handler);
#else
return create_in_order_queue(enable_exception_handler);
#endif // DPCT_USM_LEVEL_NONE
}
sycl::queue *create_in_order_queue(bool enable_exception_handler = false) {
std::lock_guard<mutex_type> lock(m_mutex);
return create_queue_impl(enable_exception_handler,
sycl::property::queue::in_order());
}
sycl::queue *create_out_of_order_queue(bool enable_exception_handler = false) {
std::lock_guard<mutex_type> lock(m_mutex);
return create_queue_impl(enable_exception_handler);
}
void destroy_queue(sycl::queue *&queue) {
std::lock_guard<mutex_type> lock(m_mutex);
_queues.erase(std::remove_if(_queues.begin(), _queues.end(),
[=](const std::shared_ptr<sycl::queue> &q) -> bool {
return q.get() == queue;
}),
_queues.end());
queue = nullptr;
}
void set_saved_queue(sycl::queue* q) {
std::lock_guard<mutex_type> lock(m_mutex);
_saved_queue = q;
}
sycl::queue *get_saved_queue() const {
std::lock_guard<mutex_type> lock(m_mutex);
return _saved_queue;
}
sycl::context get_context() const { return _ctx; }
private:
void clear_queues() {
_queues.clear();
_q_in_order = _q_out_of_order = _saved_queue = nullptr;
}
void init_queues() {
_q_in_order = create_queue_impl(true, sycl::property::queue::in_order());
_q_out_of_order = create_queue_impl(true);
_saved_queue = &default_queue();
}
/// Caller should acquire resource \p m_mutex before calling this function.
template <class... Properties>
sycl::queue *create_queue_impl(bool enable_exception_handler,
Properties... properties) {
sycl::async_handler eh = {};
if (enable_exception_handler) {
eh = exception_handler;
}
_queues.push_back(std::make_shared<sycl::queue>(
_ctx, *this, eh,
sycl::property_list(
#ifdef DPCT_PROFILING_ENABLED
sycl::property::queue::enable_profiling(),
#endif
properties...)));
return _queues.back().get();
}
void get_version(int &major, int &minor) const {
// Version string has the following format:
// a. OpenCL<space><major.minor><space><vendor-specific-information>
// b. <major.minor>
std::string ver;
ver = get_info<sycl::info::device::version>();
std::string::size_type i = 0;
while (i < ver.size()) {
if (isdigit(ver[i]))
break;
i++;
}
major = std::stoi(&(ver[i]));
while (i < ver.size()) {
if (ver[i] == '.')
break;
i++;
}
i++;
minor = std::stoi(&(ver[i]));
}
sycl::queue *_q_in_order, *_q_out_of_order;
sycl::queue *_saved_queue;
sycl::context _ctx;
std::vector<std::shared_ptr<sycl::queue>> _queues;
mutable mutex_type m_mutex;
};
static inline unsigned int get_tid() {
#if defined(__linux__)
return syscall(SYS_gettid);
#elif defined(_WIN64)
return GetCurrentThreadId();
#else
#error "Only support Windows and Linux."
#endif
}
/// device manager
class dev_mgr {
public:
device_ext ¤t_device() {
unsigned int dev_id=current_device_id();
check_id(dev_id);
return *_devs[dev_id];
}
device_ext &cpu_device() const {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
if (_cpu_device == -1) {
throw std::runtime_error("no valid cpu device");
} else {
return *_devs[_cpu_device];
}
}
device_ext &get_device(unsigned int id) const {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
check_id(id);
return *_devs[id];
}
unsigned int current_device_id() const {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
auto it=_thread2dev_map.find(get_tid());
if(it != _thread2dev_map.end())
return it->second;
return DEFAULT_DEVICE_ID;
}
/// Select device with a device ID.
/// \param [in] id The id of the device which can
/// be obtained through get_device_id(const sycl::device).
void select_device(unsigned int id) {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
check_id(id);
_thread2dev_map[get_tid()]=id;
}
unsigned int device_count() { return _devs.size(); }
unsigned int get_device_id(const sycl::device &dev) {
unsigned int id = 0;
for(auto dev_item : _devs) {
if (*dev_item == dev) {
break;
}
id++;
}
return id;
}
template <class DeviceSelector>
std::enable_if_t<
std::is_invocable_r_v<int, DeviceSelector, const sycl::device &>>
select_device(const DeviceSelector &selector = sycl::gpu_selector_v) {
sycl::device selected_device = sycl::device(selector);
unsigned int selected_device_id = get_device_id(selected_device);
select_device(selected_device_id);
}
/// Returns the instance of device manager singleton.
static dev_mgr &instance() {
static dev_mgr d_m;
return d_m;
}
dev_mgr(const dev_mgr &) = delete;
dev_mgr &operator=(const dev_mgr &) = delete;
dev_mgr(dev_mgr &&) = delete;
dev_mgr &operator=(dev_mgr &&) = delete;
private:
mutable std::recursive_mutex m_mutex;
dev_mgr() {
sycl::device default_device =
sycl::device(sycl::default_selector_v);
_devs.push_back(std::make_shared<device_ext>(default_device));
std::vector<sycl::device> sycl_all_devs =
sycl::device::get_devices(sycl::info::device_type::all);
// Collect other devices except for the default device.
if (default_device.is_cpu())
_cpu_device = 0;
for (auto &dev : sycl_all_devs) {
if (dev == default_device) {
continue;
}
_devs.push_back(std::make_shared<device_ext>(dev));
if (_cpu_device == -1 && dev.is_cpu()) {
_cpu_device = _devs.size() - 1;
}
}
}
void check_id(unsigned int id) const {
if (id >= _devs.size()) {
throw std::runtime_error("invalid device id");
}
}
std::vector<std::shared_ptr<device_ext>> _devs;
/// DEFAULT_DEVICE_ID is used, if current_device_id() can not find current
/// thread id in _thread2dev_map, which means default device should be used
/// for the current thread.
const unsigned int DEFAULT_DEVICE_ID = 0;
/// thread-id to device-id map.
std::map<unsigned int, unsigned int> _thread2dev_map;
int _cpu_device = -1;
};
/// Util function to get the default queue of current selected device depends on
/// the USM config. Return the default out-of-ordered queue when USM-none is
/// enabled, otherwise return the default in-ordered queue.
static inline sycl::queue &get_default_queue() {
return dev_mgr::instance().current_device().default_queue();
}
/// Util function to get the default in-ordered queue of current device in
/// dpct device manager.
static inline sycl::queue &get_in_order_queue() {
return dev_mgr::instance().current_device().in_order_queue();
}
/// Util function to get the default out-of-ordered queue of current device in
/// dpct device manager.
static inline sycl::queue &get_out_of_order_queue() {
return dev_mgr::instance().current_device().out_of_order_queue();
}
/// Util function to get the id of current device in
/// dpct device manager.
static inline unsigned int get_current_device_id() {
return dev_mgr::instance().current_device_id();
}
/// Util function to get the current device.
static inline device_ext &get_current_device() {
return dev_mgr::instance().current_device();
}
/// Util function to get a device by id.
static inline device_ext &get_device(unsigned int id) {
return dev_mgr::instance().get_device(id);
}
/// Util function to get the context of the default queue of current
/// device in dpct device manager.
static inline sycl::context get_default_context() {
return dpct::get_current_device().get_context();
}
/// Util function to get a CPU device.
static inline device_ext &cpu_device() {
return dev_mgr::instance().cpu_device();
}
static inline unsigned int select_device(unsigned int id) {
dev_mgr::instance().select_device(id);
return id;
}
template <class DeviceSelector>
static inline std::enable_if_t<
std::is_invocable_r_v<int, DeviceSelector, const sycl::device &>>
select_device(const DeviceSelector &selector = sycl::gpu_selector_v) {
dev_mgr::instance().select_device(selector);
}
static inline unsigned int get_device_id(const sycl::device &dev){
return dev_mgr::instance().get_device_id(dev);
}
/// Util function to check whether a device supports some kinds of sycl::aspect.
inline void
has_capability_or_fail(const sycl::device &dev,
const std::initializer_list<sycl::aspect> &props) {
for (const auto &it : props) {
if (dev.has(it))
continue;
switch (it) {
case sycl::aspect::fp64:
throw std::runtime_error("'double' is not supported in '" +
dev.get_info<sycl::info::device::name>() +
"' device");
break;
case sycl::aspect::fp16:
throw std::runtime_error("'half' is not supported in '" +
dev.get_info<sycl::info::device::name>() +
"' device");
break;
default:
#define __SYCL_ASPECT(ASPECT, ID) \
case sycl::aspect::ASPECT: \
return #ASPECT;
#define __SYCL_ASPECT_DEPRECATED(ASPECT, ID, MESSAGE) __SYCL_ASPECT(ASPECT, ID)
#define __SYCL_ASPECT_DEPRECATED_ALIAS(ASPECT, ID, MESSAGE)
auto getAspectNameStr = [](sycl::aspect AspectNum) -> std::string {
switch (AspectNum) {
#include <sycl/info/aspects.def>
#include <sycl/info/aspects_deprecated.def>
default:
return "unknown aspect";
}
};
#undef __SYCL_ASPECT_DEPRECATED_ALIAS
#undef __SYCL_ASPECT_DEPRECATED
#undef __SYCL_ASPECT
throw std::runtime_error(
"'" + getAspectNameStr(it) + "' is not supported in '" +
dev.get_info<sycl::info::device::name>() + "' device");
}
break;
}
}
} // namespace dpct
#endif // __DPCT_DEVICE_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/memory.hpp | //==---- memory.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MEMORY_HPP__
#define __DPCT_MEMORY_HPP__
#include "device.hpp"
#include <sycl/sycl.hpp>
#include <cassert>
#include <cstdint>
#include <cstring>
#include <mutex>
#include <unordered_map>
#include <map>
#include <utility>
#include <thread>
#include <type_traits>
#if defined(__linux__)
#include <sys/mman.h>
#elif defined(_WIN64)
#define NOMINMAX
#include <windows.h>
#else
#error "Only support Windows and Linux."
#endif
namespace dpct {
enum memcpy_direction {
host_to_host,
host_to_device,
device_to_host,
device_to_device,
automatic
};
enum memory_region {
global = 0, // device global memory
constant, // device constant memory
local, // device local memory
shared, // memory which can be accessed by host and device
};
typedef uint8_t byte_t;
/// Buffer type to be used in Memory Management runtime.
typedef sycl::buffer<byte_t> buffer_t;
/// Pitched 2D/3D memory data.
class pitched_data {
public:
pitched_data() : pitched_data(nullptr, 0, 0, 0) {}
pitched_data(void *data, size_t pitch, size_t x, size_t y)
: _data(data), _pitch(pitch), _x(x), _y(y) {}
void *get_data_ptr() { return _data; }
void set_data_ptr(void *data) { _data = data; }
size_t get_pitch() { return _pitch; }
void set_pitch(size_t pitch) { _pitch = pitch; }
size_t get_x() { return _x; }
void set_x(size_t x) { _x = x; };
size_t get_y() { return _y; }
void set_y(size_t y) { _y = y; }
private:
void *_data;
size_t _pitch, _x, _y;
};
namespace detail {
class mem_mgr {
mem_mgr() {
// Reserved address space, no real memory allocation happens here.
#if defined(__linux__)
mapped_address_space =
(byte_t *)mmap(nullptr, mapped_region_size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
#elif defined(_WIN64)
mapped_address_space = (byte_t *)VirtualAlloc(
NULL, // NULL specified as the base address parameter
mapped_region_size, // Size of allocation
MEM_RESERVE, // Allocate reserved pages
PAGE_NOACCESS); // Protection = no access
#else
#error "Only support Windows and Linux."
#endif
next_free = mapped_address_space;
};
public:
using buffer_id_t = int;
struct allocation {
buffer_t buffer;
byte_t *alloc_ptr;
size_t size;
};
~mem_mgr() {
#if defined(__linux__)
munmap(mapped_address_space, mapped_region_size);
#elif defined(_WIN64)
VirtualFree(mapped_address_space, 0, MEM_RELEASE);
#else
#error "Only support Windows and Linux."
#endif
};
mem_mgr(const mem_mgr &) = delete;
mem_mgr &operator=(const mem_mgr &) = delete;
mem_mgr(mem_mgr &&) = delete;
mem_mgr &operator=(mem_mgr &&) = delete;
/// Allocate
void *mem_alloc(size_t size) {
if (!size)
return nullptr;
std::lock_guard<std::mutex> lock(m_mutex);
if (next_free + size > mapped_address_space + mapped_region_size) {
throw std::runtime_error("dpct_malloc: out of memory for virtual memory pool");
}
// Allocation
sycl::range<1> r(size);
buffer_t buf(r);
allocation A{buf, next_free, size};
// Map allocation to device pointer
void *result = next_free;
m_map.emplace(next_free + size, A);
// Update pointer to the next free space.
next_free += (size + extra_padding + alignment - 1) & ~(alignment - 1);
return result;
}
/// Deallocate
void mem_free(const void *ptr) {
if (!ptr)
return;
std::lock_guard<std::mutex> lock(m_mutex);
auto it = get_map_iterator(ptr);
m_map.erase(it);
}
/// map: device pointer -> allocation(buffer, alloc_ptr, size)
allocation translate_ptr(const void *ptr) {
std::lock_guard<std::mutex> lock(m_mutex);
auto it = get_map_iterator(ptr);
return it->second;
}
/// Check if the pointer represents device pointer or not.
bool is_device_ptr(const void *ptr) const {
std::lock_guard<std::mutex> lock(m_mutex);
return (mapped_address_space <= ptr) &&
(ptr < mapped_address_space + mapped_region_size);
}
/// Returns the instance of memory manager singleton.
static mem_mgr &instance() {
static mem_mgr m;
return m;
}
private:
std::map<byte_t *, allocation> m_map;
mutable std::mutex m_mutex;
byte_t *mapped_address_space;
byte_t *next_free;
const size_t mapped_region_size = 128ull * 1024 * 1024 * 1024;
const size_t alignment = 256;
/// This padding may be defined to some positive value to debug
/// out of bound accesses.
const size_t extra_padding = 0;
std::map<byte_t *, allocation>::iterator get_map_iterator(const void *ptr) {
auto it = m_map.upper_bound((byte_t *)ptr);
if (it == m_map.end()) {
// Not a virtual pointer.
throw std::runtime_error("can not get buffer from non-virtual pointer");
}
const allocation &alloc = it->second;
if (ptr < alloc.alloc_ptr) {
// Out of bound.
// This may happen if there's a gap between allocations due to alignment
// or extra padding and pointer points to this gap.
throw std::runtime_error("invalid virtual pointer");
}
return it;
}
};
template <class T, memory_region Memory, size_t Dimension> class accessor;
template <memory_region Memory, class T = byte_t> class memory_traits {
public:
static constexpr sycl::access::target target =
sycl::access::target::device;
static constexpr sycl::access_mode mode =
(Memory == constant) ? sycl::access_mode::read
: sycl::access_mode::read_write;
static constexpr size_t type_size = sizeof(T);
using element_t =
typename std::conditional<Memory == constant, const T, T>::type;
using value_t = typename std::remove_cv<T>::type;
template <size_t Dimension = 1>
using accessor_t = typename std::conditional<
Memory == local, sycl::local_accessor<value_t, Dimension>,
sycl::accessor<T, Dimension, mode, target>>::type;
using pointer_t = T *;
};
static inline void *dpct_malloc(size_t size, sycl::queue &q) {
#ifdef DPCT_USM_LEVEL_NONE
return mem_mgr::instance().mem_alloc(size * sizeof(byte_t));
#else
return sycl::malloc_device(size, q.get_device(), q.get_context());
#endif // DPCT_USM_LEVEL_NONE
}
#define PITCH_DEFAULT_ALIGN(x) (((x) + 31) & ~(0x1F))
static inline void *dpct_malloc(size_t &pitch, size_t x, size_t y, size_t z,
sycl::queue &q) {
pitch = PITCH_DEFAULT_ALIGN(x);
return dpct_malloc(pitch * y * z, q);
}
/// Set \p value to the first \p size bytes starting from \p dev_ptr in \p q.
///
/// \param q The queue in which the operation is done.
/// \param dev_ptr Pointer to the device memory address.
/// \param value Value to be set.
/// \param size Number of bytes to be set to the value.
/// \returns An event representing the memset operation.
static inline sycl::event dpct_memset(sycl::queue &q, void *dev_ptr,
int value, size_t size) {
#ifdef DPCT_USM_LEVEL_NONE
auto &mm = mem_mgr::instance();
assert(mm.is_device_ptr(dev_ptr));
auto alloc = mm.translate_ptr(dev_ptr);
size_t offset = (byte_t *)dev_ptr - alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
auto r = sycl::range<1>(size);
auto o = sycl::id<1>(offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
acc(alloc.buffer, cgh, r, o);
cgh.fill(acc, (byte_t)value);
});
#else
return q.memset(dev_ptr, value, size);
#endif // DPCT_USM_LEVEL_NONE
}
/// Set \p value to the 3D memory region pointed by \p data in \p q. \p size
/// specifies the 3D memory size to set.
///
/// \param q The queue in which the operation is done.
/// \param data Pointer to the device memory region.
/// \param value Value to be set.
/// \param size Memory region size.
/// \returns An event list representing the memset operations.
static inline std::vector<sycl::event>
dpct_memset(sycl::queue &q, pitched_data data, int value,
sycl::range<3> size) {
std::vector<sycl::event> event_list;
size_t slice = data.get_pitch() * data.get_y();
unsigned char *data_surface = (unsigned char *)data.get_data_ptr();
for (size_t z = 0; z < size.get(2); ++z) {
unsigned char *data_ptr = data_surface;
for (size_t y = 0; y < size.get(1); ++y) {
event_list.push_back(dpct_memset(q, data_ptr, value, size.get(0)));
data_ptr += data.get_pitch();
}
data_surface += slice;
}
return event_list;
}
/// memset 2D matrix with pitch.
static inline std::vector<sycl::event>
dpct_memset(sycl::queue &q, void *ptr, size_t pitch, int val, size_t x,
size_t y) {
return dpct_memset(q, pitched_data(ptr, pitch, x, 1), val,
sycl::range<3>(x, y, 1));
}
enum class pointer_access_attribute {
host_only = 0,
device_only,
host_device,
end
};
static pointer_access_attribute get_pointer_attribute(sycl::queue &q,
const void *ptr) {
#ifdef DPCT_USM_LEVEL_NONE
return mem_mgr::instance().is_device_ptr(ptr)
? pointer_access_attribute::device_only
: pointer_access_attribute::host_only;
#else
switch (sycl::get_pointer_type(ptr, q.get_context())) {
case sycl::usm::alloc::unknown:
return pointer_access_attribute::host_only;
case sycl::usm::alloc::device:
return pointer_access_attribute::device_only;
case sycl::usm::alloc::shared:
case sycl::usm::alloc::host:
return pointer_access_attribute::host_device;
}
#endif
}
static memcpy_direction deduce_memcpy_direction(sycl::queue &q, void *to_ptr,
const void *from_ptr,
memcpy_direction dir) {
switch (dir) {
case memcpy_direction::host_to_host:
case memcpy_direction::host_to_device:
case memcpy_direction::device_to_host:
case memcpy_direction::device_to_device:
return dir;
case memcpy_direction::automatic: {
// table[to_attribute][from_attribute]
static const memcpy_direction
direction_table[static_cast<unsigned>(pointer_access_attribute::end)]
[static_cast<unsigned>(pointer_access_attribute::end)] =
{{memcpy_direction::host_to_host,
memcpy_direction::device_to_host,
memcpy_direction::host_to_host},
{memcpy_direction::host_to_device,
memcpy_direction::device_to_device,
memcpy_direction::device_to_device},
{memcpy_direction::host_to_host,
memcpy_direction::device_to_device,
memcpy_direction::device_to_device}};
return direction_table[static_cast<unsigned>(get_pointer_attribute(
q, to_ptr))][static_cast<unsigned>(get_pointer_attribute(q, from_ptr))];
}
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
}
static sycl::event
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction,
const std::vector<sycl::event> &dep_events = {}) {
if (!size)
return sycl::event{};
#ifdef DPCT_USM_LEVEL_NONE
auto &mm = mem_mgr::instance();
auto real_direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction);
switch (real_direction) {
case host_to_host:
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
cgh.host_task([=] { std::memcpy(to_ptr, from_ptr, size); });
});
case host_to_device: {
auto alloc = mm.translate_ptr(to_ptr);
size_t offset = (byte_t *)to_ptr - alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto r = sycl::range<1>(size);
auto o = sycl::id<1>(offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
acc(alloc.buffer, cgh, r, o);
cgh.copy(from_ptr, acc);
});
}
case device_to_host: {
auto alloc = mm.translate_ptr(from_ptr);
size_t offset = (byte_t *)from_ptr - alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto r = sycl::range<1>(size);
auto o = sycl::id<1>(offset);
sycl::accessor<byte_t, 1, sycl::access_mode::read,
sycl::access::target::device>
acc(alloc.buffer, cgh, r, o);
cgh.copy(acc, to_ptr);
});
}
case device_to_device: {
auto to_alloc = mm.translate_ptr(to_ptr);
auto from_alloc = mm.translate_ptr(from_ptr);
size_t to_offset = (byte_t *)to_ptr - to_alloc.alloc_ptr;
size_t from_offset = (byte_t *)from_ptr - from_alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto r = sycl::range<1>(size);
auto to_o = sycl::id<1>(to_offset);
auto from_o = sycl::id<1>(from_offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
to_acc(to_alloc.buffer, cgh, r, to_o);
sycl::accessor<byte_t, 1, sycl::access_mode::read,
sycl::access::target::device>
from_acc(from_alloc.buffer, cgh, r, from_o);
cgh.copy(from_acc, to_acc);
});
}
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
#else
return q.memcpy(to_ptr, from_ptr, size, dep_events);
#endif // DPCT_USM_LEVEL_NONE
}
// Get actual copy range and make sure it will not exceed range.
static inline size_t get_copy_range(sycl::range<3> size, size_t slice,
size_t pitch) {
return slice * (size.get(2) - 1) + pitch * (size.get(1) - 1) + size.get(0);
}
static inline size_t get_offset(sycl::id<3> id, size_t slice,
size_t pitch) {
return slice * id.get(2) + pitch * id.get(1) + id.get(0);
}
/// copy 3D matrix specified by \p size from 3D matrix specified by \p from_ptr
/// and \p from_range to another specified by \p to_ptr and \p to_range.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
sycl::range<3> to_range, sycl::range<3> from_range,
sycl::id<3> to_id, sycl::id<3> from_id,
sycl::range<3> size, memcpy_direction direction,
const std::vector<sycl::event> &dep_events = {}) {
// RAII for host pointer
class host_buffer {
void *_buf;
size_t _size;
sycl::queue &_q;
const std::vector<sycl::event> &_deps; // free operation depends
public:
host_buffer(size_t size, sycl::queue &q,
const std::vector<sycl::event> &deps)
: _buf(std::malloc(size)), _size(size), _q(q), _deps(deps) {}
void *get_ptr() const { return _buf; }
size_t get_size() const { return _size; }
~host_buffer() {
if (_buf) {
_q.submit([&](sycl::handler &cgh) {
cgh.depends_on(_deps);
cgh.host_task([buf = _buf] { std::free(buf); });
});
}
}
};
std::vector<sycl::event> event_list;
size_t to_slice = to_range.get(1) * to_range.get(0),
from_slice = from_range.get(1) * from_range.get(0);
unsigned char *to_surface =
(unsigned char *)to_ptr + get_offset(to_id, to_slice, to_range.get(0));
const unsigned char *from_surface =
(const unsigned char *)from_ptr +
get_offset(from_id, from_slice, from_range.get(0));
if (to_slice == from_slice && to_slice == size.get(1) * size.get(0)) {
return {dpct_memcpy(q, to_surface, from_surface, to_slice * size.get(2),
direction, dep_events)};
}
direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction);
size_t size_slice = size.get(1) * size.get(0);
switch (direction) {
case host_to_host:
for (size_t z = 0; z < size.get(2); ++z) {
unsigned char *to_ptr = to_surface;
const unsigned char *from_ptr = from_surface;
if (to_range.get(0) == from_range.get(0) &&
to_range.get(0) == size.get(0)) {
event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size_slice,
direction, dep_events));
} else {
for (size_t y = 0; y < size.get(1); ++y) {
event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size.get(0),
direction, dep_events));
to_ptr += to_range.get(0);
from_ptr += from_range.get(0);
}
}
to_surface += to_slice;
from_surface += from_slice;
}
break;
case host_to_device: {
host_buffer buf(get_copy_range(size, to_slice, to_range.get(0)), q,
event_list);
std::vector<sycl::event> host_events;
if (to_slice == size_slice) {
// Copy host data to a temp host buffer with the shape of target.
host_events =
dpct_memcpy(q, buf.get_ptr(), from_surface, to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size,
host_to_host, dep_events);
} else {
// Copy host data to a temp host buffer with the shape of target.
host_events = dpct_memcpy(
q, buf.get_ptr(), from_surface, to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host,
// If has padding data, not sure whether it is useless. So fill temp
// buffer with it.
std::vector<sycl::event>{
dpct_memcpy(q, buf.get_ptr(), to_surface, buf.get_size(),
device_to_host, dep_events)});
}
// Copy from temp host buffer to device with only one submit.
event_list.push_back(dpct_memcpy(q, to_surface, buf.get_ptr(),
buf.get_size(), host_to_device,
host_events));
break;
}
case device_to_host: {
host_buffer buf(get_copy_range(size, from_slice, from_range.get(0)), q,
event_list);
// Copy from host temp buffer to host target with reshaping.
event_list = dpct_memcpy(
q, to_surface, buf.get_ptr(), to_range, from_range, sycl::id<3>(0, 0, 0),
sycl::id<3>(0, 0, 0), size, host_to_host,
// Copy from device to temp host buffer with only one submit.
std::vector<sycl::event>{dpct_memcpy(q, buf.get_ptr(), from_surface,
buf.get_size(),
device_to_host, dep_events)});
break;
}
case device_to_device:
#ifdef DPCT_USM_LEVEL_NONE
{
auto &mm = mem_mgr::instance();
auto to_alloc = mm.translate_ptr(to_surface);
auto from_alloc = mm.translate_ptr(from_surface);
size_t to_offset = (byte_t *)to_surface - to_alloc.alloc_ptr;
size_t from_offset = (byte_t *)from_surface - from_alloc.alloc_ptr;
event_list.push_back(q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto to_o = sycl::id<1>(to_offset);
auto from_o = sycl::id<1>(from_offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
to_acc(to_alloc.buffer, cgh,
get_copy_range(size, to_slice, to_range.get(0)), to_o);
sycl::accessor<byte_t, 1, sycl::access_mode::read,
sycl::access::target::device>
from_acc(from_alloc.buffer, cgh,
get_copy_range(size, from_slice, from_range.get(0)), from_o);
cgh.parallel_for<class dpct_memcpy_3d_detail_usmnone>(
size,
[=](sycl::id<3> id) {
to_acc[get_offset(id, to_slice, to_range.get(0))] =
from_acc[get_offset(id, from_slice, from_range.get(0))];
});
}));
}
#else
event_list.push_back(q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
cgh.parallel_for<class dpct_memcpy_3d_detail>(
size,
[=](sycl::id<3> id) {
to_surface[get_offset(id, to_slice, to_range.get(0))] =
from_surface[get_offset(id, from_slice, from_range.get(0))];
});
}));
#endif
break;
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
return event_list;
}
/// memcpy 2D/3D matrix specified by pitched_data.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, pitched_data to, sycl::id<3> to_id,
pitched_data from, sycl::id<3> from_id, sycl::range<3> size,
memcpy_direction direction = automatic) {
return dpct_memcpy(q, to.get_data_ptr(), from.get_data_ptr(),
sycl::range<3>(to.get_pitch(), to.get_y(), 1),
sycl::range<3>(from.get_pitch(), from.get_y(), 1), to_id, from_id,
size, direction);
}
/// memcpy 2D matrix with pitch.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
size_t to_pitch, size_t from_pitch, size_t x, size_t y,
memcpy_direction direction = automatic) {
return dpct_memcpy(q, to_ptr, from_ptr, sycl::range<3>(to_pitch, y, 1),
sycl::range<3>(from_pitch, y, 1),
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0),
sycl::range<3>(x, y, 1), direction);
}
namespace deprecated {
template <typename T, sycl::usm::alloc AllocKind>
class usm_allocator {
private:
using Alloc = sycl::usm_allocator<T, AllocKind>;
Alloc _impl;
public:
using value_type = typename std::allocator_traits<Alloc>::value_type;
using pointer = typename std::allocator_traits<Alloc>::pointer;
using const_pointer = typename std::allocator_traits<Alloc>::const_pointer;
using void_pointer = typename std::allocator_traits<Alloc>::void_pointer;
using const_void_pointer =
typename std::allocator_traits<Alloc>::const_void_pointer;
using reference = typename std::allocator_traits<Alloc>::value_type &;
using const_reference =
const typename std::allocator_traits<Alloc>::value_type &;
using difference_type =
typename std::allocator_traits<Alloc>::difference_type;
using size_type = typename std::allocator_traits<Alloc>::size_type;
using propagate_on_container_copy_assignment = typename std::allocator_traits<
Alloc>::propagate_on_container_copy_assignment;
using propagate_on_container_move_assignment = typename std::allocator_traits<
Alloc>::propagate_on_container_move_assignment;
using propagate_on_container_swap =
typename std::allocator_traits<Alloc>::propagate_on_container_swap;
using is_always_equal =
typename std::allocator_traits<Alloc>::is_always_equal;
template <typename U> struct rebind {
typedef usm_allocator<U, AllocKind> other;
};
usm_allocator() : _impl(dpct::get_default_queue()) {}
~usm_allocator() {}
usm_allocator(const usm_allocator &other) : _impl(other._impl) {}
usm_allocator(usm_allocator &&other) : _impl(std::move(other._impl)) {}
pointer address(reference r) { return &r; }
const_pointer address(const_reference r) { return &r; }
pointer allocate(size_type cnt, const_void_pointer hint = nullptr) {
return std::allocator_traits<Alloc>::allocate(_impl, cnt, hint);
}
void deallocate(pointer p, size_type cnt) {
std::allocator_traits<Alloc>::deallocate(_impl, p, cnt);
}
size_type max_size() const {
return std::allocator_traits<Alloc>::max_size(_impl);
}
bool operator==(const usm_allocator &other) const { return _impl == other._impl; }
bool operator!=(const usm_allocator &other) const { return _impl != other._impl; }
};
} // namespace deprecated
inline void dpct_free(void *ptr,
const sycl::queue &q) {
if (ptr) {
#ifdef DPCT_USM_LEVEL_NONE
detail::mem_mgr::instance().mem_free(ptr);
#else
sycl::free(ptr, q.get_context());
#endif // DPCT_USM_LEVEL_NONE
}
}
} // namespace detail
#ifdef DPCT_USM_LEVEL_NONE
/// Check if the pointer \p ptr represents device pointer or not.
///
/// \param ptr The pointer to be checked.
/// \returns true if \p ptr is a device pointer.
template<class T>
static inline bool is_device_ptr(T ptr) {
if constexpr (std::is_pointer<T>::value) {
return detail::mem_mgr::instance().is_device_ptr(ptr);
}
return false;
}
#endif
/// Get the buffer and the offset of a piece of memory pointed to by \p ptr.
///
/// \param ptr Pointer to a piece of memory.
/// If NULL is passed as an argument, an exception will be thrown.
/// \returns a pair containing both the buffer and the offset.
static std::pair<buffer_t, size_t> get_buffer_and_offset(const void *ptr) {
if (ptr) {
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
size_t offset = (byte_t *)ptr - alloc.alloc_ptr;
return std::make_pair(alloc.buffer, offset);
} else {
throw std::runtime_error(
"NULL pointer argument in get_buffer_and_offset function is invalid");
}
}
/// Get the data pointed from \p ptr as a 1D buffer reinterpreted as type T.
template <typename T> static sycl::buffer<T> get_buffer(const void *ptr) {
if (!ptr)
return sycl::buffer<T>(sycl::range<1>(0));
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
return alloc.buffer.reinterpret<T>(
sycl::range<1>(alloc.size / sizeof(T)));
}
/// Get the buffer of a piece of memory pointed to by \p ptr.
///
/// \param ptr Pointer to a piece of memory.
/// \returns the buffer.
static buffer_t get_buffer(const void *ptr) {
return detail::mem_mgr::instance().translate_ptr(ptr).buffer;
}
/// A wrapper class contains an accessor and an offset.
template <typename dataT,
sycl::access_mode accessMode = sycl::access_mode::read_write>
class access_wrapper {
sycl::accessor<byte_t, 1, accessMode> accessor;
size_t offset;
public:
/// Construct the accessor wrapper for memory pointed by \p ptr.
///
/// \param ptr Pointer to memory.
/// \param cgh The command group handler.
access_wrapper(const void *ptr, sycl::handler &cgh)
: accessor(get_buffer(ptr).get_access<accessMode>(cgh)), offset(0) {
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
offset = (byte_t *)ptr - alloc.alloc_ptr;
}
/// Get the device pointer.
///
/// \returns a device pointer with offset.
dataT get_raw_pointer() const { return (dataT)(&accessor[0] + offset); }
};
/// Get the accessor for memory pointed by \p ptr.
///
/// \param ptr Pointer to memory.
/// If NULL is passed as an argument, an exception will be thrown.
/// \param cgh The command group handler.
/// \returns an accessor.
template <sycl::access_mode accessMode = sycl::access_mode::read_write>
static sycl::accessor<byte_t, 1, accessMode>
get_access(const void *ptr, sycl::handler &cgh) {
if (ptr) {
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
return alloc.buffer.get_access<accessMode>(cgh);
} else {
throw std::runtime_error(
"NULL pointer argument in get_access function is invalid");
}
}
/// Allocate memory block on the device.
/// \param num_bytes Number of bytes to allocate.
/// \param q Queue to execute the allocate task.
/// \returns A pointer to the newly allocated memory.
template <typename T>
static inline void *dpct_malloc(T num_bytes,
sycl::queue &q = get_default_queue()) {
return detail::dpct_malloc(static_cast<size_t>(num_bytes), q);
}
/// Get the host pointer from a buffer that is mapped to virtual pointer ptr.
/// \param ptr Virtual Pointer mapped to device buffer
/// \returns A host pointer
template <typename T> static inline T *get_host_ptr(const void *ptr) {
auto BufferOffset = get_buffer_and_offset(ptr);
auto host_ptr =
BufferOffset.first.get_host_access()
.get_pointer();
return (T *)(host_ptr + BufferOffset.second);
}
/// Allocate memory block for 3D array on the device.
/// \param size Size of the memory block, in bytes.
/// \param q Queue to execute the allocate task.
/// \returns A pitched_data object which stores the memory info.
static inline pitched_data
dpct_malloc(sycl::range<3> size, sycl::queue &q = get_default_queue()) {
pitched_data pitch(nullptr, 0, size.get(0), size.get(1));
size_t pitch_size;
pitch.set_data_ptr(detail::dpct_malloc(pitch_size, size.get(0), size.get(1),
size.get(2), q));
pitch.set_pitch(pitch_size);
return pitch;
}
/// Allocate memory block for 2D array on the device.
/// \param [out] pitch Aligned size of x in bytes.
/// \param x Range in dim x.
/// \param y Range in dim y.
/// \param q Queue to execute the allocate task.
/// \returns A pointer to the newly allocated memory.
static inline void *dpct_malloc(size_t &pitch, size_t x, size_t y,
sycl::queue &q = get_default_queue()) {
return detail::dpct_malloc(pitch, x, y, 1, q);
}
/// free
/// \param ptr Point to free.
/// \param q Queue to execute the free task.
/// \returns no return value.
static inline void dpct_free(void *ptr,
sycl::queue &q = get_default_queue()) {
detail::dpct_free(ptr, q);
}
/// Free the device memory pointed by a batch of pointers in \p pointers which
/// are related to \p q after \p events completed.
///
/// \param pointers The pointers point to the device memory requested to be freed.
/// \param events The events to be waited.
/// \param q The sycl::queue the memory relates to.
inline void async_dpct_free(const std::vector<void *> &pointers,
const std::vector<sycl::event> &events,
sycl::queue &q = get_default_queue()) {
q.submit([&](sycl::handler &cgh) {
cgh.depends_on(events);
cgh.host_task([=] {
for (auto p : pointers)
if (p) {
detail::dpct_free(p, q);
}
});
});
}
/// Synchronously copies \p size bytes from the address specified by \p from_ptr
/// to the address specified by \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device,
/// \a device_to_host, \a device_to_device or \a automatic. The function will
/// return after the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param from_ptr Pointer to source memory address.
/// \param size Number of bytes to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static void dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction).wait();
}
/// Asynchronously copies \p size bytes from the address specified by \p
/// from_ptr to the address specified by \p to_ptr. The value of \p direction is
/// used to set the copy direction, it can be \a host_to_host, \a
/// host_to_device, \a device_to_host, \a device_to_device or \a automatic. The
/// return of the function does NOT guarantee the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param from_ptr Pointer to source memory address.
/// \param size Number of bytes to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static void async_dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction);
}
/// Synchronously copies 2D matrix specified by \p x and \p y from the address
/// specified by \p from_ptr to the address specified by \p to_ptr, while \p
/// from_pitch and \p to_pitch are the range of dim x in bytes of the matrix
/// specified by \p from_ptr and \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device, \a
/// device_to_host, \a device_to_device or \a automatic. The function will
/// return after the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param to_pitch Range of dim x in bytes of destination matrix.
/// \param from_ptr Pointer to source memory address.
/// \param from_pitch Range of dim x in bytes of source matrix.
/// \param x Range of dim x of matrix to be copied.
/// \param y Range of dim y of matrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void dpct_memcpy(void *to_ptr, size_t to_pitch,
const void *from_ptr, size_t from_pitch,
size_t x, size_t y,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
sycl::event::wait(detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch,
from_pitch, x, y, direction));
}
/// Asynchronously copies 2D matrix specified by \p x and \p y from the address
/// specified by \p from_ptr to the address specified by \p to_ptr, while \p
/// \p from_pitch and \p to_pitch are the range of dim x in bytes of the matrix
/// specified by \p from_ptr and \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device, \a
/// device_to_host, \a device_to_device or \a automatic. The return of the
/// function does NOT guarantee the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param to_pitch Range of dim x in bytes of destination matrix.
/// \param from_ptr Pointer to source memory address.
/// \param from_pitch Range of dim x in bytes of source matrix.
/// \param x Range of dim x of matrix to be copied.
/// \param y Range of dim y of matrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void
async_dpct_memcpy(void *to_ptr, size_t to_pitch, const void *from_ptr,
size_t from_pitch, size_t x, size_t y,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch, from_pitch, x, y,
direction);
}
/// Synchronously copies a subset of a 3D matrix specified by \p to to another
/// 3D matrix specified by \p from. The from and to position info are specified
/// by \p from_pos and \p to_pos The copied matrix size is specified by \p size.
/// The value of \p direction is used to set the copy direction, it can be \a
/// host_to_host, \a host_to_device, \a device_to_host, \a device_to_device or
/// \a automatic. The function will return after the copy is completed.
///
/// \param to Destination matrix info.
/// \param to_pos Position of destination.
/// \param from Source matrix info.
/// \param from_pos Position of destination.
/// \param size Range of the submatrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void dpct_memcpy(pitched_data to, sycl::id<3> to_pos,
pitched_data from, sycl::id<3> from_pos,
sycl::range<3> size,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
sycl::event::wait(
detail::dpct_memcpy(q, to, to_pos, from, from_pos, size, direction));
}
/// Asynchronously copies a subset of a 3D matrix specified by \p to to another
/// 3D matrix specified by \p from. The from and to position info are specified
/// by \p from_pos and \p to_pos The copied matrix size is specified by \p size.
/// The value of \p direction is used to set the copy direction, it can be \a
/// host_to_host, \a host_to_device, \a device_to_host, \a device_to_device or
/// \a automatic. The return of the function does NOT guarantee the copy is
/// completed.
///
/// \param to Destination matrix info.
/// \param to_pos Position of destination.
/// \param from Source matrix info.
/// \param from_pos Position of destination.
/// \param size Range of the submatrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void
async_dpct_memcpy(pitched_data to, sycl::id<3> to_pos, pitched_data from,
sycl::id<3> from_pos, sycl::range<3> size,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to, to_pos, from, from_pos, size, direction);
}
/// Synchronously sets \p value to the first \p size bytes starting from \p
/// dev_ptr. The function will return after the memset operation is completed.
///
/// \param dev_ptr Pointer to the device memory address.
/// \param value Value to be set.
/// \param size Number of bytes to be set to the value.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static void dpct_memset(void *dev_ptr, int value, size_t size,
sycl::queue &q = get_default_queue()) {
detail::dpct_memset(q, dev_ptr, value, size).wait();
}
/// Asynchronously sets \p value to the first \p size bytes starting from \p
/// dev_ptr. The return of the function does NOT guarantee the memset operation
/// is completed.
///
/// \param dev_ptr Pointer to the device memory address.
/// \param value Value to be set.
/// \param size Number of bytes to be set to the value.
/// \returns no return value.
static void async_dpct_memset(void *dev_ptr, int value, size_t size,
sycl::queue &q = dpct::get_default_queue()) {
detail::dpct_memset(q, dev_ptr, value, size);
}
/// Sets \p value to the 2D memory region pointed by \p ptr in \p q. \p x and
/// \p y specify the setted 2D memory size. \p pitch is the bytes in linear
/// dimension, including padding bytes. The function will return after the
/// memset operation is completed.
///
/// \param ptr Pointer to the device memory region.
/// \param pitch Bytes in linear dimension, including padding bytes.
/// \param value Value to be set.
/// \param x The setted memory size in linear dimension.
/// \param y The setted memory size in second dimension.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void dpct_memset(void *ptr, size_t pitch, int val, size_t x,
size_t y,
sycl::queue &q = get_default_queue()) {
sycl::event::wait(detail::dpct_memset(q, ptr, pitch, val, x, y));
}
/// Sets \p value to the 2D memory region pointed by \p ptr in \p q. \p x and
/// \p y specify the setted 2D memory size. \p pitch is the bytes in linear
/// dimension, including padding bytes. The return of the function does NOT
/// guarantee the memset operation is completed.
///
/// \param ptr Pointer to the device memory region.
/// \param pitch Bytes in linear dimension, including padding bytes.
/// \param value Value to be set.
/// \param x The setted memory size in linear dimension.
/// \param y The setted memory size in second dimension.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void async_dpct_memset(void *ptr, size_t pitch, int val, size_t x,
size_t y,
sycl::queue &q = get_default_queue()) {
detail::dpct_memset(q, ptr, pitch, val, x, y);
}
/// Sets \p value to the 3D memory region specified by \p pitch in \p q. \p size
/// specify the setted 3D memory size. The function will return after the
/// memset operation is completed.
///
/// \param pitch Specify the 3D memory region.
/// \param value Value to be set.
/// \param size The setted 3D memory size.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void dpct_memset(pitched_data pitch, int val,
sycl::range<3> size,
sycl::queue &q = get_default_queue()) {
sycl::event::wait(detail::dpct_memset(q, pitch, val, size));
}
/// Sets \p value to the 3D memory region specified by \p pitch in \p q. \p size
/// specify the setted 3D memory size. The return of the function does NOT
/// guarantee the memset operation is completed.
///
/// \param pitch Specify the 3D memory region.
/// \param value Value to be set.
/// \param size The setted 3D memory size.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void async_dpct_memset(pitched_data pitch, int val,
sycl::range<3> size,
sycl::queue &q = get_default_queue()) {
detail::dpct_memset(q, pitch, val, size);
}
/// dpct accessor used as device function parameter.
template <class T, memory_region Memory, size_t Dimension> class accessor;
template <class T, memory_region Memory> class accessor<T, Memory, 3> {
public:
using memory_t = detail::memory_traits<Memory, T>;
using element_t = typename memory_t::element_t;
using pointer_t = typename memory_t::pointer_t;
using accessor_t = typename memory_t::template accessor_t<3>;
accessor(pointer_t data, const sycl::range<3> &in_range)
: _data(data), _range(in_range) {}
template <memory_region M = Memory>
accessor(typename std::enable_if<M != local, const accessor_t>::type &acc)
: accessor(acc, acc.get_range()) {}
accessor(const accessor_t &acc, const sycl::range<3> &in_range)
: accessor(acc.get_pointer(), in_range) {}
accessor<T, Memory, 2> operator[](size_t index) const {
sycl::range<2> sub(_range.get(1), _range.get(2));
return accessor<T, Memory, 2>(_data + index * sub.size(), sub);
}
pointer_t get_ptr() const { return _data; }
private:
pointer_t _data;
sycl::range<3> _range;
};
template <class T, memory_region Memory> class accessor<T, Memory, 2> {
public:
using memory_t = detail::memory_traits<Memory, T>;
using element_t = typename memory_t::element_t;
using pointer_t = typename memory_t::pointer_t;
using accessor_t = typename memory_t::template accessor_t<2>;
accessor(pointer_t data, const sycl::range<2> &in_range)
: _data(data), _range(in_range) {}
template <memory_region M = Memory>
accessor(typename std::enable_if<M != local, const accessor_t>::type &acc)
: accessor(acc, acc.get_range()) {}
accessor(const accessor_t &acc, const sycl::range<2> &in_range)
: accessor(acc.get_pointer(), in_range) {}
pointer_t operator[](size_t index) const {
return _data + _range.get(1) * index;
}
pointer_t get_ptr() const { return _data; }
private:
pointer_t _data;
sycl::range<2> _range;
};
namespace detail {
/// Device variable with address space of shared, global or constant.
template <class T, memory_region Memory, size_t Dimension>
class device_memory {
public:
using accessor_t =
typename detail::memory_traits<Memory, T>::template accessor_t<Dimension>;
using value_t = typename detail::memory_traits<Memory, T>::value_t;
using dpct_accessor_t = dpct::accessor<T, Memory, Dimension>;
device_memory() : device_memory(sycl::range<Dimension>(1)) {}
/// Constructor of 1-D array with initializer list
device_memory(
const sycl::range<Dimension> &in_range,
std::initializer_list<value_t> &&init_list)
: device_memory(in_range) {
assert(init_list.size() <= in_range.size());
_host_ptr = (value_t *)std::malloc(_size);
std::memset(_host_ptr, 0, _size);
std::memcpy(_host_ptr, init_list.begin(), init_list.size() * sizeof(T));
}
/// Constructor of 2-D array with initializer list
template <size_t D = Dimension>
device_memory(
const typename std::enable_if<D == 2, sycl::range<2>>::type &in_range,
std::initializer_list<std::initializer_list<value_t>> &&init_list)
: device_memory(in_range) {
assert(init_list.size() <= in_range[0]);
_host_ptr = (value_t *)std::malloc(_size);
std::memset(_host_ptr, 0, _size);
auto tmp_data = _host_ptr;
for (auto sub_list : init_list) {
assert(sub_list.size() <= in_range[1]);
std::memcpy(tmp_data, sub_list.begin(), sub_list.size() * sizeof(T));
tmp_data += in_range[1];
}
}
/// Constructor with range
device_memory(const sycl::range<Dimension> &range_in)
: _size(range_in.size() * sizeof(T)), _range(range_in), _reference(false),
_host_ptr(nullptr), _device_ptr(nullptr) {
static_assert(
(Memory == global) || (Memory == constant) || (Memory == shared),
"device memory region should be global, constant or shared");
// Make sure that singleton class mem_mgr and dev_mgr will destruct later
// than this.
detail::mem_mgr::instance();
dev_mgr::instance();
}
/// Constructor with range
template <class... Args>
device_memory(Args... Arguments)
: device_memory(sycl::range<Dimension>(Arguments...)) {}
~device_memory() {
if (_device_ptr && !_reference)
dpct::dpct_free(_device_ptr);
if (_host_ptr)
std::free(_host_ptr);
}
/// Allocate memory with default queue, and init memory if has initial value.
void init() {
init(dpct::get_default_queue());
}
/// Allocate memory with specified queue, and init memory if has initial value.
void init(sycl::queue &q) {
if (_device_ptr)
return;
if (!_size)
return;
allocate_device(q);
if (_host_ptr)
detail::dpct_memcpy(q, _device_ptr, _host_ptr, _size, host_to_device);
}
/// The variable is assigned to a device pointer.
void assign(value_t *src, size_t size) {
this->~device_memory();
new (this) device_memory(src, size);
}
/// Get memory pointer of the memory object, which is virtual pointer when
/// usm is not used, and device pointer when usm is used.
value_t *get_ptr() {
return get_ptr(get_default_queue());
}
/// Get memory pointer of the memory object, which is virtual pointer when
/// usm is not used, and device pointer when usm is used.
value_t *get_ptr(sycl::queue &q) {
init(q);
return _device_ptr;
}
/// Get the device memory object size in bytes.
size_t get_size() { return _size; }
template <size_t D = Dimension>
typename std::enable_if<D == 1, T>::type &operator[](size_t index) {
init();
#ifdef DPCT_USM_LEVEL_NONE
return dpct::get_buffer<typename std::enable_if<D == 1, T>::type>(
_device_ptr)
.template get_access<sycl::access_mode::read_write>()[index];
#else
return _device_ptr[index];
#endif // DPCT_USM_LEVEL_NONE
}
#ifdef DPCT_USM_LEVEL_NONE
/// Get sycl::accessor for the device memory object when usm is not used.
accessor_t get_access(sycl::handler &cgh) {
return get_buffer(_device_ptr)
.template reinterpret<T, Dimension>(_range)
.template get_access<detail::memory_traits<Memory, T>::mode,
detail::memory_traits<Memory, T>::target>(cgh);
}
#else
/// Get dpct::accessor with dimension info for the device memory object
/// when usm is used and dimension is greater than 1.
template <size_t D = Dimension>
typename std::enable_if<D != 1, dpct_accessor_t>::type
get_access(sycl::handler &cgh) {
return dpct_accessor_t((T *)_device_ptr, _range);
}
#endif // DPCT_USM_LEVEL_NONE
private:
device_memory(value_t *memory_ptr, size_t size)
: _size(size), _range(size / sizeof(T)), _reference(true),
_device_ptr(memory_ptr) {}
void allocate_device(sycl::queue &q) {
#ifndef DPCT_USM_LEVEL_NONE
if (Memory == shared) {
_device_ptr = (value_t *)sycl::malloc_shared(
_size, q.get_device(), q.get_context());
return;
}
#endif
_device_ptr = (value_t *)detail::dpct_malloc(_size, q);
}
size_t _size;
sycl::range<Dimension> _range;
bool _reference;
value_t *_host_ptr;
value_t *_device_ptr;
};
template <class T, memory_region Memory>
class device_memory<T, Memory, 0> : public device_memory<T, Memory, 1> {
public:
using base = device_memory<T, Memory, 1>;
using value_t = typename base::value_t;
using accessor_t =
typename detail::memory_traits<Memory, T>::template accessor_t<0>;
/// Constructor with initial value.
device_memory(const value_t &val) : base(sycl::range<1>(1), {val}) {}
/// Default constructor
device_memory() : base(1) {}
#ifdef DPCT_USM_LEVEL_NONE
/// Get sycl::accessor for the device memory object when usm is not used.
accessor_t get_access(sycl::handler &cgh) {
auto buf = get_buffer(base::get_ptr())
.template reinterpret<T, 1>(sycl::range<1>(1));
return accessor_t(buf, cgh);
}
#endif // DPCT_USM_LEVEL_NONE
};
}
template <class T, size_t Dimension>
using global_memory = detail::device_memory<T, global, Dimension>;
template <class T, size_t Dimension>
using constant_memory = detail::device_memory<T, constant, Dimension>;
template <class T, size_t Dimension>
using shared_memory = detail::device_memory<T, shared, Dimension>;
// dpct::deprecated:: is for functionality that was introduced for compatibility
// purpose, but relies on deprecated C++ features, which are either removed or
// will be removed in the future standards.
// Direct use of deprecated functionality in this namespace should be avoided.
namespace deprecated {
template <typename T>
using usm_host_allocator = detail::deprecated::usm_allocator<T, sycl::usm::alloc::host>;
template <typename T>
using usm_device_allocator = detail::deprecated::usm_allocator<T, sycl::usm::alloc::shared>;
} // namespace deprecated
class pointer_attributes {
public:
void init(const void *ptr,
sycl::queue &q = dpct::get_default_queue()) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error(
"dpct::pointer_attributes: only works for USM pointer.");
#else
memory_type = sycl::get_pointer_type(ptr, q.get_context());
device_pointer = (memory_type !=
sycl::usm::alloc::unknown) ? ptr : nullptr;
host_pointer = (memory_type !=
sycl::usm::alloc::unknown) &&
(memory_type != sycl::usm::alloc::device) ? ptr : nullptr;
sycl::device device_obj = sycl::get_pointer_device(ptr, q.get_context());
device_id = dpct::dev_mgr::instance().get_device_id(device_obj);
#endif
}
sycl::usm::alloc get_memory_type() {
return memory_type;
}
const void *get_device_pointer() {
return device_pointer;
}
const void *get_host_pointer() {
return host_pointer;
}
bool is_memory_shared() {
return memory_type == sycl::usm::alloc::shared;
}
unsigned int get_device_id() {
return device_id;
}
private:
sycl::usm::alloc memory_type = sycl::usm::alloc::unknown;
const void *device_pointer = nullptr;
const void *host_pointer = nullptr;
unsigned int device_id = 0;
};
} // namespace dpct
#endif // __DPCT_MEMORY_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/dpl_utils.hpp | //==---- dpl_utils.hpp ----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_DPL_UTILS_HPP__
#define __DPCT_DPL_UTILS_HPP__
#define ONEDPL_USE_DPCPP_BACKEND 1
#define __USE_DPCT 1
#include <oneapi/dpl/execution>
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/numeric>
#include "dpl_extras/memory.h"
#include "dpl_extras/algorithm.h"
#include "dpl_extras/numeric.h"
#include "dpl_extras/iterators.h"
#include "dpl_extras/vector.h"
#include "dpl_extras/dpcpp_extensions.h"
#endif // __DPCT_DPL_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/math.hpp | //==---- math.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MATH_HPP__
#define __DPCT_MATH_HPP__
#include <sycl/sycl.hpp>
namespace dpct {
namespace detail {
template <typename VecT, class BinaryOperation, class = void>
class vectorized_binary {
public:
inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op) {
VecT v4;
for (size_t i = 0; i < v4.size(); ++i) {
v4[i] = binary_op(a[i], b[i]);
}
return v4;
}
};
template <typename VecT, class BinaryOperation>
class vectorized_binary<
VecT, BinaryOperation,
std::void_t<std::invoke_result_t<BinaryOperation, VecT, VecT>>> {
public:
inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op) {
return binary_op(a, b).template as<VecT>();
}
};
template <typename T> bool isnan(const T a) { return sycl::isnan(a); }
// TODO: Need add more specialization such as bfloat16 version.
} // namespace detail
/// Compute fast_length for variable-length array
/// \param [in] a The array
/// \param [in] len Length of the array
/// \returns The computed fast_length
inline float fast_length(const float *a, int len) {
switch (len) {
case 1:
return a[0];
case 2:
return sycl::fast_length(sycl::float2(a[0], a[1]));
case 3:
return sycl::fast_length(sycl::float3(a[0], a[1], a[2]));
case 4:
return sycl::fast_length(sycl::float4(a[0], a[1], a[2], a[3]));
case 0:
return 0;
default:
float f = 0;
for (int i = 0; i < len; ++i)
f += a[i] * a[i];
return sycl::sqrt(f);
}
}
/// Calculate the square root of the input array.
/// \param [in] a The array pointer
/// \param [in] len Length of the array
/// \returns The square root
template <typename T> inline T length(const T *a, const int len) {
switch (len) {
case 1:
return a[0];
case 2:
return sycl::length(sycl::vec<T, 2>(a[0], a[1]));
case 3:
return sycl::length(sycl::vec<T, 3>(a[0], a[1], a[2]));
case 4:
return sycl::length(sycl::vec<T, 4>(a[0], a[1], a[2], a[3]));
default:
T ret = 0;
for (int i = 0; i < len; ++i)
ret += a[i] * a[i];
return sycl::sqrt(ret);
}
}
/// Performs comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<
std::is_same_v<std::invoke_result_t<BinaryOperation, T, T>, bool>, bool>
compare(const T a, const T b, const BinaryOperation binary_op) {
return binary_op(a, b);
}
template <typename T>
inline std::enable_if_t<
std::is_same_v<std::invoke_result_t<std::not_equal_to<>, T, T>, bool>, bool>
compare(const T a, const T b, const std::not_equal_to<> binary_op) {
return !detail::isnan(a) && !detail::isnan(b) && binary_op(a, b);
}
/// Performs unordered comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<
std::is_same_v<std::invoke_result_t<BinaryOperation, T, T>, bool>, bool>
unordered_compare(const T a, const T b, const BinaryOperation binary_op) {
return detail::isnan(a) || detail::isnan(b) || binary_op(a, b);
}
/// Performs 2 element comparison and return true if both results are true.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, bool>
compare_both(const T a, const T b, const BinaryOperation binary_op) {
return compare(a[0], b[0], binary_op) && compare(a[1], b[1], binary_op);
}
/// Performs 2 element unordered comparison and return true if both results are
/// true.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, bool>
unordered_compare_both(const T a, const T b, const BinaryOperation binary_op) {
return unordered_compare(a[0], b[0], binary_op) &&
unordered_compare(a[1], b[1], binary_op);
}
/// Performs 2 element comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, T>
compare(const T a, const T b, const BinaryOperation binary_op) {
return {compare(a[0], b[0], binary_op), compare(a[1], b[1], binary_op)};
}
/// Performs 2 elements comparison, compare result of each element is 0 (false)
/// or 0xffff (true), returns an unsigned int by composing compare result of two
/// elements.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline unsigned compare_mask(const sycl::vec<T, 2> a, const sycl::vec<T, 2> b,
const BinaryOperation binary_op) {
return sycl::vec<short, 2>(-compare(a[0], b[0], binary_op),
-compare(a[1], b[1], binary_op))
.as<sycl::vec<unsigned, 1>>();
}
/// Performs 2 element unordered comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, T>
unordered_compare(const T a, const T b, const BinaryOperation binary_op) {
return {unordered_compare(a[0], b[0], binary_op),
unordered_compare(a[1], b[1], binary_op)};
}
/// Performs 2 elements unordered comparison, compare result of each element is
/// 0 (false) or 0xffff (true), returns an unsigned int by composing compare
/// result of two elements.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline unsigned unordered_compare_mask(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b,
const BinaryOperation binary_op) {
return sycl::vec<short, 2>(-unordered_compare(a[0], b[0], binary_op),
-unordered_compare(a[1], b[1], binary_op))
.as<sycl::vec<unsigned, 1>>();
}
/// Determine whether 2 element value is NaN.
/// \param [in] a The input value
/// \returns the comparison result
template <typename T>
inline std::enable_if_t<T::size() == 2, T> isnan(const T a) {
return {detail::isnan(a[0]), detail::isnan(a[1])};
}
// min function overloads.
// For floating-point types, `float` or `double` arguments are acceptable.
// For integer types, `std::uint32_t`, `std::int32_t`, `std::uint64_t` or
// `std::int64_t` type arguments are acceptable.
inline double min(const double a, const float b) {
return sycl::fmin(a, static_cast<double>(b));
}
inline double min(const float a, const double b) {
return sycl::fmin(static_cast<double>(a), b);
}
inline float min(const float a, const float b) { return sycl::fmin(a, b); }
inline double min(const double a, const double b) { return sycl::fmin(a, b); }
inline std::uint32_t min(const std::uint32_t a, const std::int32_t b) {
return sycl::min(a, static_cast<std::uint32_t>(b));
}
inline std::uint32_t min(const std::int32_t a, const std::uint32_t b) {
return sycl::min(static_cast<std::uint32_t>(a), b);
}
inline std::int32_t min(const std::int32_t a, const std::int32_t b) {
return sycl::min(a, b);
}
inline std::uint32_t min(const std::uint32_t a, const std::uint32_t b) {
return sycl::min(a, b);
}
inline std::uint64_t min(const std::uint64_t a, const std::int64_t b) {
return sycl::min(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t min(const std::int64_t a, const std::uint64_t b) {
return sycl::min(static_cast<std::uint64_t>(a), b);
}
inline std::int64_t min(const std::int64_t a, const std::int64_t b) {
return sycl::min(a, b);
}
inline std::uint64_t min(const std::uint64_t a, const std::uint64_t b) {
return sycl::min(a, b);
}
inline std::uint64_t min(const std::uint64_t a, const std::int32_t b) {
return sycl::min(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t min(const std::int32_t a, const std::uint64_t b) {
return sycl::min(static_cast<std::uint64_t>(a), b);
}
inline std::uint64_t min(const std::uint64_t a, const std::uint32_t b) {
return sycl::min(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t min(const std::uint32_t a, const std::uint64_t b) {
return sycl::min(static_cast<std::uint64_t>(a), b);
}
// max function overloads.
// For floating-point types, `float` or `double` arguments are acceptable.
// For integer types, `std::uint32_t`, `std::int32_t`, `std::uint64_t` or
// `std::int64_t` type arguments are acceptable.
inline double max(const double a, const float b) {
return sycl::fmax(a, static_cast<double>(b));
}
inline double max(const float a, const double b) {
return sycl::fmax(static_cast<double>(a), b);
}
inline float max(const float a, const float b) { return sycl::fmax(a, b); }
inline double max(const double a, const double b) { return sycl::fmax(a, b); }
inline std::uint32_t max(const std::uint32_t a, const std::int32_t b) {
return sycl::max(a, static_cast<std::uint32_t>(b));
}
inline std::uint32_t max(const std::int32_t a, const std::uint32_t b) {
return sycl::max(static_cast<std::uint32_t>(a), b);
}
inline std::int32_t max(const std::int32_t a, const std::int32_t b) {
return sycl::max(a, b);
}
inline std::uint32_t max(const std::uint32_t a, const std::uint32_t b) {
return sycl::max(a, b);
}
inline std::uint64_t max(const std::uint64_t a, const std::int64_t b) {
return sycl::max(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t max(const std::int64_t a, const std::uint64_t b) {
return sycl::max(static_cast<std::uint64_t>(a), b);
}
inline std::int64_t max(const std::int64_t a, const std::int64_t b) {
return sycl::max(a, b);
}
inline std::uint64_t max(const std::uint64_t a, const std::uint64_t b) {
return sycl::max(a, b);
}
inline std::uint64_t max(const std::uint64_t a, const std::int32_t b) {
return sycl::max(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t max(const std::int32_t a, const std::uint64_t b) {
return sycl::max(static_cast<std::uint64_t>(a), b);
}
inline std::uint64_t max(const std::uint64_t a, const std::uint32_t b) {
return sycl::max(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t max(const std::uint32_t a, const std::uint64_t b) {
return sycl::max(static_cast<std::uint64_t>(a), b);
}
/// Performs relu saturation.
/// \param [in] a The input value
/// \returns the relu saturation result
template <typename T> inline T relu(const T a) {
if (!detail::isnan(a) && a < 0.f)
return 0.f;
return a;
}
template <class T> inline sycl::vec<T, 2> relu(const sycl::vec<T, 2> a) {
return {relu(a[0]), relu(a[1])};
}
/// Performs complex number multiply addition.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] c The third value
/// \returns the operation result
template <typename T>
inline sycl::vec<T, 2> complex_mul_add(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b,
const sycl::vec<T, 2> c) {
return sycl::vec<T, 2>{a[0] * b[0] - a[1] * b[1] + c[0],
a[0] * b[1] + a[1] * b[0] + c[1]};
}
/// Performs 2 elements comparison and returns the bigger one. If either of
/// inputs is NaN, then return NaN.
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns the bigger value
template <typename T> inline T fmax_nan(const T a, const T b) {
if (detail::isnan(a) || detail::isnan(b))
return NAN;
return sycl::fmax(a, b);
}
template <typename T>
inline sycl::vec<T, 2> fmax_nan(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b) {
return {fmax_nan(a[0], b[0]), fmax_nan(a[1], b[1])};
}
/// Performs 2 elements comparison and returns the smaller one. If either of
/// inputs is NaN, then return NaN.
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns the smaller value
template <typename T> inline T fmin_nan(const T a, const T b) {
if (detail::isnan(a) || detail::isnan(b))
return NAN;
return sycl::fmin(a, b);
}
template <typename T>
inline sycl::vec<T, 2> fmin_nan(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b) {
return {fmin_nan(a[0], b[0]), fmin_nan(a[1], b[1])};
}
/// A sycl::abs wrapper functors.
struct abs {
template <typename T> auto operator()(const T x) const {
return sycl::abs(x);
}
};
/// A sycl::abs_diff wrapper functors.
struct abs_diff {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::abs_diff(x, y);
}
};
/// A sycl::add_sat wrapper functors.
struct add_sat {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::add_sat(x, y);
}
};
/// A sycl::rhadd wrapper functors.
struct rhadd {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::rhadd(x, y);
}
};
/// A sycl::hadd wrapper functors.
struct hadd {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::hadd(x, y);
}
};
/// A sycl::max wrapper functors.
struct maximum {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::max(x, y);
}
};
/// A sycl::min wrapper functors.
struct minimum {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::min(x, y);
}
};
/// A sycl::sub_sat wrapper functors.
struct sub_sat {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::sub_sat(x, y);
}
};
/// Compute vectorized binary operation value for two values, with each value
/// treated as a vector type \p VecT.
/// \tparam [in] VecT The type of the vector
/// \tparam [in] BinaryOperation The binary operation class
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized binary operation value of the two values
template <typename VecT, class BinaryOperation>
inline unsigned vectorized_binary(unsigned a, unsigned b,
const BinaryOperation binary_op) {
sycl::vec<unsigned, 1> v0{a}, v1{b};
auto v2 = v0.as<VecT>();
auto v3 = v1.as<VecT>();
auto v4 =
detail::vectorized_binary<VecT, BinaryOperation>()(v2, v3, binary_op);
v0 = v4.template as<sycl::vec<unsigned, 1>>();
return v0;
}
/// Compute vectorized isgreater for two values, with each value treated as a
/// vector type \p S.
/// \tparam [in] S The type of the vector
/// \tparam [in] T The type of the original values
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized greater than of the two values
template <typename S, typename T> inline T vectorized_isgreater(T a, T b) {
sycl::vec<T, 1> v0{a}, v1{b};
auto v2 = v0.template as<S>();
auto v3 = v1.template as<S>();
auto v4 = v2 > v3;
v0 = v4.template as<sycl::vec<T, 1>>();
return v0;
}
/// Compute vectorized max for two values, with each value treated as a vector
/// type \p S.
/// \tparam [in] S The type of the vector
/// \tparam [in] T The type of the original values
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized max of the two values
template <typename S, typename T> inline T vectorized_max(T a, T b) {
sycl::vec<T, 1> v0{a}, v1{b};
auto v2 = v0.template as<S>();
auto v3 = v1.template as<S>();
auto v4 = sycl::max(v2, v3);
v0 = v4.template as<sycl::vec<T, 1>>();
return v0;
}
/// Compute vectorized min for two values, with each value treated as a vector
/// type \p S.
/// \tparam [in] S The type of the vector
/// \tparam [in] T The type of the original values
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized min of the two values
template <typename S, typename T> inline T vectorized_min(T a, T b) {
sycl::vec<T, 1> v0{a}, v1{b};
auto v2 = v0.template as<S>();
auto v3 = v1.template as<S>();
auto v4 = sycl::min(v2, v3);
v0 = v4.template as<sycl::vec<T, 1>>();
return v0;
}
/// Compute vectorized unary operation for a value, with the value treated as a
/// vector type \p VecT.
/// \tparam [in] VecT The type of the vector
/// \tparam [in] UnaryOperation The unary operation class
/// \param [in] a The input value
/// \returns The vectorized unary operation value of the input value
template <typename VecT, class UnaryOperation>
inline unsigned vectorized_unary(unsigned a, const UnaryOperation unary_op) {
sycl::vec<unsigned, 1> v0{a};
auto v1 = v0.as<VecT>();
auto v2 = unary_op(v1);
v0 = v2.template as<sycl::vec<unsigned, 1>>();
return v0;
}
/// Compute vectorized absolute difference for two values without modulo
/// overflow, with each value treated as a vector type \p VecT.
/// \tparam [in] VecT The type of the vector
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized absolute difference of the two values
template <typename VecT>
inline unsigned vectorized_sum_abs_diff(unsigned a, unsigned b) {
sycl::vec<unsigned, 1> v0{a}, v1{b};
auto v2 = v0.as<VecT>();
auto v3 = v1.as<VecT>();
auto v4 = sycl::abs_diff(v2, v3);
unsigned sum = 0;
for (size_t i = 0; i < v4.size(); ++i) {
sum += v4[i];
}
return sum;
}
} // namespace dpct
#endif // __DPCT_MATH_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/blas_utils.hpp | //==---- blas_utils.hpp----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_BLAS_UTILS_HPP__
#define __DPCT_BLAS_UTILS_HPP__
#include "memory.hpp"
#include "util.hpp"
#include "lib_common_utils.hpp"
#include <sycl/sycl.hpp>
#include <oneapi/mkl.hpp>
#include <utility>
#include <vector>
#include <thread>
namespace dpct {
/// Get the value of \p s.
/// Copy the data to host synchronously, then return the data.
/// \param [in] p The pointer points the data.
/// \param [in] q The queue where the memory copy should be executed.
template <typename T>
inline auto get_value(const T *s, sycl::queue &q) {
return detail::get_value(s, q);
}
namespace detail {
inline void mem_free(sycl::queue *exec_queue,
std::vector<void *> pointers_array, sycl::event e) {
e.wait();
for (auto p : pointers_array)
sycl::free(p, *exec_queue);
}
inline int stride_for(int num_elems, int mem_align_in_elems) {
return ((num_elems - 1) / mem_align_in_elems + 1) * mem_align_in_elems;
}
#ifndef DPCT_USM_LEVEL_NONE
template<typename T>
class working_memory {
T *_input_ptr;
T *_temp_ptr;
bool _is_sycl_malloced = false;
bool _is_scalar_value = false;
sycl::queue _q;
sycl::event _e;
public:
working_memory(size_t size, sycl::queue q) : _q(q) {
_is_scalar_value = false;
_temp_ptr = (T *)sycl::malloc_device(size, q);
}
working_memory(T *result_ptr, sycl::queue q) : _input_ptr(result_ptr), _q(q) {
_is_scalar_value = true;
_is_sycl_malloced = sycl::get_pointer_type(_input_ptr, _q.get_context()) !=
sycl::usm::alloc::unknown;
if (!_is_sycl_malloced)
_temp_ptr = sycl::malloc_shared<T>(1, _q);
}
auto get_ptr() {
if (_is_scalar_value && _is_sycl_malloced)
return _input_ptr;
return _temp_ptr;
}
void set_event(sycl::event e) { _e = e; }
~working_memory() {
if (_is_scalar_value) {
if (!_is_sycl_malloced) {
_q.memcpy(_input_ptr, _temp_ptr, sizeof(T)).wait();
sycl::free(_temp_ptr, _q);
}
} else {
std::vector<void *> ptrs{_temp_ptr};
dpct::async_dpct_free(ptrs, {_e});
}
}
};
#endif
template <typename Tx, typename Tr>
inline void nrm2_impl(sycl::queue &q, int n, const void *x, int incx,
void *result) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
#ifdef DPCT_USM_LEVEL_NONE
auto x_buffer = dpct::get_buffer<Tx>(x);
auto r_buffer =
sycl::buffer<Tr, 1>(reinterpret_cast<Tr *>(result), sycl::range<1>(1));
if (dpct::is_device_ptr(result))
r_buffer = dpct::get_buffer<Tr>(result);
oneapi::mkl::blas::column_major::nrm2(q, n, x_buffer, incx, r_buffer);
#else
working_memory<Tr> res_mem(reinterpret_cast<Tr *>(result), q);
oneapi::mkl::blas::column_major::nrm2(q, n, reinterpret_cast<const Tx *>(x),
incx, res_mem.get_ptr());
#endif
#endif
}
template <bool is_conjugate, class Txy, class Tr>
inline void dotuc_impl(sycl::queue &q, int n, const Txy *x, int incx,
const Txy *y, int incy, Tr *result) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
#ifdef DPCT_USM_LEVEL_NONE
auto x_buffer = dpct::get_buffer<Txy>(x);
auto y_buffer = dpct::get_buffer<Txy>(y);
auto r_buffer = sycl::buffer<Tr, 1>((Tr *)result, sycl::range<1>(1));
if (dpct::is_device_ptr(result))
r_buffer = dpct::get_buffer<Tr>(result);
if constexpr (std::is_same_v<Txy, std::complex<float>> ||
std::is_same_v<Txy, std::complex<double>>) {
if constexpr (is_conjugate)
oneapi::mkl::blas::column_major::dotc(q, n, x_buffer, incx, y_buffer,
incy, r_buffer);
else
oneapi::mkl::blas::column_major::dotu(q, n, x_buffer, incx, y_buffer,
incy, r_buffer);
} else
oneapi::mkl::blas::column_major::dot(q, n, x_buffer, incx, y_buffer, incy,
r_buffer);
#else
working_memory<Tr> res_mem(result, q);
if constexpr (std::is_same_v<Txy, std::complex<float>> ||
std::is_same_v<Txy, std::complex<double>>) {
if constexpr (is_conjugate)
oneapi::mkl::blas::column_major::dotc(q, n, x, incx, y, incy, res_mem.get_ptr());
else
oneapi::mkl::blas::column_major::dotu(q, n, x, incx, y, incy, res_mem.get_ptr());
} else
oneapi::mkl::blas::column_major::dot(q, n, x, incx, y, incy, res_mem.get_ptr());
#endif
#endif
}
template <bool is_conjugate>
inline void dotuc(sycl::queue &q, int n, const void *x,
library_data_t x_type, int incx, const void *y,
library_data_t y_type, int incy, void *result,
library_data_t result_type) {
std::uint64_t key = detail::get_type_combination_id(x_type, y_type, result_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const float *>(x), incx,
reinterpret_cast<const float *>(y), incy,
reinterpret_cast<float *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const double *>(x), incx,
reinterpret_cast<const double *>(y), incy,
reinterpret_cast<double *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float,
library_data_t::complex_float): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const std::complex<float> *>(x), incx,
reinterpret_cast<const std::complex<float> *>(y), incy,
reinterpret_cast<std::complex<float> *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double,
library_data_t::complex_double): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const std::complex<double> *>(x), incx,
reinterpret_cast<const std::complex<double> *>(y), incy,
reinterpret_cast<std::complex<double> *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const sycl::half *>(x), incx,
reinterpret_cast<const sycl::half *>(y), incy,
reinterpret_cast<sycl::half *>(result));
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
template <class Tx, class Te>
inline void scal_impl(sycl::queue &q, int n, const void *alpha, void *x,
int incx) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Te alpha_val = dpct::get_value(reinterpret_cast<const Te *>(alpha), q);
auto data_x = get_memory(reinterpret_cast<Tx *>(x));
oneapi::mkl::blas::column_major::scal(q, n, alpha_val,
data_x, incx);
#endif
}
template <class Txy, class Te>
inline void axpy_impl(sycl::queue &q, int n, const void *alpha, const void *x,
int incx, void *y, int incy) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Te alpha_val = dpct::get_value(reinterpret_cast<const Te *>(alpha), q);
auto data_x = get_memory(reinterpret_cast<const Txy *>(x));
auto data_y = get_memory(reinterpret_cast<Txy *>(y));
oneapi::mkl::blas::column_major::axpy(q, n, alpha_val,
data_x, incx,
data_y, incy);
#endif
}
template <class Txy, class Tc, class Ts>
inline void rot_impl(sycl::queue &q, int n, void *x, int incx, void *y,
int incy, const void *c, const void *s) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Tc c_value = dpct::get_value(reinterpret_cast<const Tc *>(c), q);
Ts s_value = dpct::get_value(reinterpret_cast<const Ts *>(s), q);
auto data_x = get_memory(reinterpret_cast<Txy *>(x));
auto data_y = get_memory(reinterpret_cast<Txy *>(y));
oneapi::mkl::blas::column_major::rot(q, n, data_x, incx,
data_y, incy, c_value,
s_value);
#endif
}
template <class Ta, class Tb, class Tc, class Ts>
inline void gemm_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a, int lda, const void *b,
int ldb, const void *beta, void *c, int ldc) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
auto data_a = get_memory(reinterpret_cast<const Ta *>(a));
auto data_b = get_memory(reinterpret_cast<const Tb *>(b));
auto data_c = get_memory(reinterpret_cast<Tc *>(c));
oneapi::mkl::blas::column_major::gemm(
q, a_trans, b_trans, m, n, k, alpha_value, data_a, lda,
data_b, ldb, beta_value, data_c, ldc);
#endif
}
template <class Ta, class Tb, class Tc, class Ts>
inline void gemm_batch_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void **a, int lda,
const void **b, int ldb, const void *beta, void **c,
int ldc, int batch_size) {
struct matrix_info_t {
oneapi::mkl::transpose transpose_info[2];
Ts value_info[2];
std::int64_t size_info[3];
std::int64_t ld_info[3];
std::int64_t groupsize_info;
};
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
matrix_info_t *matrix_info =
(matrix_info_t *)std::malloc(sizeof(matrix_info_t));
matrix_info->transpose_info[0] = a_trans;
matrix_info->transpose_info[1] = b_trans;
matrix_info->value_info[0] = alpha_value;
matrix_info->value_info[1] = beta_value;
matrix_info->size_info[0] = m;
matrix_info->size_info[1] = n;
matrix_info->size_info[2] = k;
matrix_info->ld_info[0] = lda;
matrix_info->ld_info[1] = ldb;
matrix_info->ld_info[2] = ldc;
matrix_info->groupsize_info = batch_size;
sycl::event e = oneapi::mkl::blas::column_major::gemm_batch(
q, matrix_info->transpose_info, matrix_info->transpose_info + 1,
matrix_info->size_info, matrix_info->size_info + 1,
matrix_info->size_info + 2, matrix_info->value_info,
reinterpret_cast<const Ta **>(a), matrix_info->ld_info,
reinterpret_cast<const Tb **>(b), matrix_info->ld_info + 1,
matrix_info->value_info + 1, reinterpret_cast<Tc **>(c),
matrix_info->ld_info + 2, 1, &(matrix_info->groupsize_info));
q.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { std::free(matrix_info); });
});
}
template <class Ta, class Tb, class Tc, class Ts>
inline void
gemm_batch_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n,
int k, const void *alpha, const void *a, int lda,
long long int stride_a, const void *b, int ldb,
long long int stride_b, const void *beta, void *c,
int ldc, long long int stride_c, int batch_size) {
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
auto data_a = get_memory(reinterpret_cast<const Ta *>(a));
auto data_b = get_memory(reinterpret_cast<const Tb *>(b));
auto data_c = get_memory(reinterpret_cast<Tc *>(c));
oneapi::mkl::blas::column_major::gemm_batch(
q, a_trans, b_trans, m, n, k, alpha_value, data_a, lda,
stride_a, data_b, ldb, stride_b, beta_value,
data_c, ldc, stride_c, batch_size);
}
template <bool is_hermitian, class T, class Tbeta>
inline void rk_impl(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::transpose trans, int n, int k,
const T *alpha, const T *a, int lda, const T *b,
int ldb, const Tbeta *beta, T *c, int ldc) {
// For symmetric matrix, this function performs: C = alpha*OP(A)*(OP(B))^T + beta*C
// For Hermitian matrix, this function performs: C = alpha*OP(A)*(OP(B))^H + beta*C
// The gemmt() function performs: C = alpha*OPA(A)*OPB(B) + beta*C
// So the OPB need be updated before we call gemmt().
using Ty = typename dpct::DataType<T>::T2;
using Ts = typename dpct::DataType<Tbeta>::T2;
Ty alpha_value = dpct::get_value(reinterpret_cast<const Ty *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
oneapi::mkl::transpose trans_A = trans, trans_B = trans;
int origin_b_rows = trans == oneapi::mkl::transpose::nontrans ? n : k;
int origin_b_cols = trans == oneapi::mkl::transpose::nontrans ? k : n;
if ((is_hermitian && trans == oneapi::mkl::transpose::trans) ||
(!is_hermitian && !std::is_floating_point_v<Ty> && trans == oneapi::mkl::transpose::conjtrans)) {
// In this case, OPB need be a conjugate operation,
// but only notrans, conjtrans and trans are available.
// So we need do a conjtrans operation first, then do a trans operation.
trans_B = oneapi::mkl::transpose::trans;
auto data_a = get_memory(reinterpret_cast<const Ty *>(a));
auto data_c = get_memory(reinterpret_cast<Ty *>(c));
#ifdef DPCT_USM_LEVEL_NONE
auto new_B_buffer = sycl::buffer<Ty, 1>(sycl::range<1>(origin_b_rows * origin_b_cols));
auto from_buffer = dpct::get_buffer<Ty>(b);
oneapi::mkl::blas::column_major::omatcopy_batch(
q, oneapi::mkl::transpose::conjtrans, origin_b_rows, origin_b_cols,
Ts(1.0), from_buffer, ldb, origin_b_rows * ldb, new_B_buffer,
origin_b_cols, origin_b_rows * origin_b_cols, 1);
oneapi::mkl::blas::column_major::gemmt(
q, uplo, trans_A, trans_B, n, k, alpha_value,
data_a, lda, new_B_buffer, origin_b_cols, beta_value, data_c, ldc);
#else
working_memory<T> new_B(origin_b_rows * origin_b_cols * sizeof(T), q);
oneapi::mkl::blas::column_major::omatcopy_batch(
q, oneapi::mkl::transpose::conjtrans, origin_b_rows, origin_b_cols,
Ts(1.0), reinterpret_cast<const Ty *>(b), ldb, origin_b_rows * ldb,
reinterpret_cast<Ty *>(new_B.get_ptr()), origin_b_cols,
origin_b_rows * origin_b_cols, 1);
sycl::event e = oneapi::mkl::blas::column_major::gemmt(
q, uplo, trans_A, trans_B, n, k, alpha_value,
data_a, lda, reinterpret_cast<Ty *>(new_B.get_ptr()), origin_b_cols,
beta_value, data_c, ldc);
new_B.set_event(e);
#endif
} else {
if constexpr (is_hermitian) {
trans_B = trans == oneapi::mkl::transpose::nontrans
? oneapi::mkl::transpose::conjtrans
: oneapi::mkl::transpose::nontrans;
} else {
trans_B = trans == oneapi::mkl::transpose::nontrans
? oneapi::mkl::transpose::trans
: oneapi::mkl::transpose::nontrans;
}
auto data_a = get_memory(reinterpret_cast<const Ty *>(a));
auto data_b = get_memory(reinterpret_cast<const Ty *>(b));
auto data_c = get_memory(reinterpret_cast<Ty *>(c));
oneapi::mkl::blas::column_major::gemmt(
q, uplo, trans_A, trans_B, n, k, alpha_value,
data_a, lda, data_b, ldb, beta_value, data_c, ldc);
}
}
template <class Ta, class Tb, class Ts>
inline void
trsm_batch_impl(sycl::queue &q, oneapi::mkl::side left_right,
oneapi::mkl::uplo upper_lower, oneapi::mkl::transpose trans,
oneapi::mkl::diag unit_diag, int m, int n, const void *alpha,
const void **a, int lda, void **b, int ldb, int batch_size) {
struct matrix_info_t {
matrix_info_t(oneapi::mkl::side side_info, oneapi::mkl::uplo uplo_info,
oneapi::mkl::transpose transpose_info,
oneapi::mkl::diag diag_info, Ts value_info, std::int64_t m,
std::int64_t n, std::int64_t lda, std::int64_t ldb,
std::int64_t groupsize_info)
: side_info(side_info), uplo_info(uplo_info),
transpose_info(transpose_info), diag_info(diag_info),
value_info(value_info), groupsize_info(groupsize_info) {
size_info[0] = m;
size_info[1] = n;
ld_info[0] = lda;
ld_info[1] = ldb;
}
oneapi::mkl::side side_info;
oneapi::mkl::uplo uplo_info;
oneapi::mkl::transpose transpose_info;
oneapi::mkl::diag diag_info;
Ts value_info;
std::int64_t size_info[2];
std::int64_t ld_info[2];
std::int64_t groupsize_info;
};
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
matrix_info_t *matrix_info =
new matrix_info_t(left_right, upper_lower, trans, unit_diag, alpha_value,
m, n, lda, ldb, batch_size);
sycl::event e = oneapi::mkl::blas::column_major::trsm_batch(
q, &(matrix_info->side_info), &(matrix_info->uplo_info),
&(matrix_info->transpose_info), &(matrix_info->diag_info),
matrix_info->size_info, matrix_info->size_info + 1,
&(matrix_info->value_info), reinterpret_cast<const Ta **>(a),
matrix_info->ld_info, reinterpret_cast<Tb **>(b),
matrix_info->ld_info + 1, 1, &(matrix_info->groupsize_info));
q.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { delete matrix_info; });
});
}
template <typename T>
inline void getrfnp_batch_wrapper(sycl::queue &exec_queue, int n, T *a[],
int lda, int *info, int batch_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using Ty = typename DataType<T>::T2;
// Set the info array value to 0
detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size);
std::int64_t stride_a = n * lda;
std::int64_t scratchpad_size =
oneapi::mkl::lapack::getrfnp_batch_scratchpad_size<Ty>(
exec_queue, n, n, lda, stride_a, batch_size);
Ty *a_strided_mem =
(Ty *)dpct::dpct_malloc(stride_a * batch_size * sizeof(Ty), exec_queue);
T **host_a = (T **)malloc(batch_size * sizeof(T *));
dpct::dpct_memcpy(host_a, a, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i)
dpct::dpct_memcpy(a_strided_mem + i * stride_a, host_a[i],
n * lda * sizeof(T));
#ifdef DPCT_USM_LEVEL_NONE
{
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
auto a_buffer = get_buffer<Ty>(a_strided_mem);
oneapi::mkl::lapack::getrfnp_batch(exec_queue, n, n, a_buffer, lda,
stride_a, batch_size, scratchpad,
scratchpad_size);
}
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_strided_mem + i * stride_a,
n * lda * sizeof(T), automatic));
#else
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
sycl::event e = oneapi::mkl::lapack::getrfnp_batch(
exec_queue, n, n, a_strided_mem, lda, stride_a, batch_size, scratchpad,
scratchpad_size);
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_strided_mem + i * stride_a,
n * lda * sizeof(T), automatic, {e}));
std::vector<void *> ptrs{scratchpad, a_strided_mem};
dpct::async_dpct_free(ptrs, events, exec_queue);
#endif
exec_queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(events);
cgh.host_task([=] { free(host_a); });
});
#endif
}
} // namespace detail
inline oneapi::mkl::transpose get_transpose(int t) {
if (t == 0) {
return oneapi::mkl::transpose::nontrans;
} else if (t == 1) {
return oneapi::mkl::transpose::trans;
} else {
return oneapi::mkl::transpose::conjtrans;
}
}
/// Computes the LU factorizations of a batch of general matrices.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] n The order of the matrices.
/// \param [in, out] a Array of pointers to matrices. These matrices will be
/// overwritten by lower triangulars with unit diagonal elements and upper
/// triangulars.
/// \param [in] lda The leading dimension of the matrices.
/// \param [out] ipiv An array stores the pivot indices. If \p ipiv is nullptr,
/// non-pivoting LU factorization is computed.
/// \param [out] info An array stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void getrf_batch_wrapper(sycl::queue &exec_queue, int n, T *a[],
int lda, int *ipiv, int *info, int batch_size) {
if (ipiv == nullptr) {
detail::getrfnp_batch_wrapper(exec_queue, n, a, lda, info, batch_size);
return;
}
using Ty = typename DataType<T>::T2;
// Set the info array value to 0
detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size);
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_a = n * lda;
std::int64_t stride_ipiv = n;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getrf_batch_scratchpad_size<Ty>(
exec_queue, n, n, lda, stride_a, stride_ipiv, batch_size);
T *a_buffer_ptr;
a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i)
dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T));
{
sycl::buffer<std::int64_t, 1> ipiv_buf(
sycl::range<1>(batch_size * stride_ipiv));
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
auto a_buffer = get_buffer<Ty>(a_buffer_ptr);
oneapi::mkl::lapack::getrf_batch(exec_queue, n, n, a_buffer, lda, stride_a,
ipiv_buf, stride_ipiv, batch_size, scratchpad,
scratchpad_size);
auto to_buffer = get_buffer<int>(ipiv);
exec_queue.submit([&](sycl::handler &cgh) {
auto from_acc = ipiv_buf.get_access<sycl::access_mode::read>(cgh);
auto to_acc = to_buffer.get_access<sycl::access_mode::write>(cgh);
cgh.parallel_for<dpct_kernel_name<class getrf_device_int64_to_int, T>>(
sycl::range<2>(batch_size, n), [=](sycl::id<2> id) {
to_acc[id.get(0) * n + id.get(1)] =
static_cast<int>(from_acc[id.get(0) * stride_ipiv + id.get(1)]);
});
});
}
// Copy back to the original buffers
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_buffer_ptr + i * stride_a,
n * lda * sizeof(T), automatic));
std::vector<void *> ptrs{host_a};
std::thread mem_free_thread(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptrs, events);
mem_free_thread.detach();
#else
std::int64_t m_int64 = n;
std::int64_t n_int64 = n;
std::int64_t lda_int64 = lda;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getrf_batch_scratchpad_size<Ty>(
exec_queue, &m_int64, &n_int64, &lda_int64, 1, &group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
std::int64_t *ipiv_int64 =
sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue);
std::int64_t **ipiv_int64_ptr =
sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue);
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *)).wait();
for (std::int64_t i = 0; i < batch_size; ++i)
ipiv_int64_ptr[i] = ipiv_int64 + n * i;
oneapi::mkl::lapack::getrf_batch(exec_queue, &m_int64, &n_int64, (Ty **)a_shared, &lda_int64,
ipiv_int64_ptr, 1, &group_sizes, scratchpad,
scratchpad_size);
sycl::event e = exec_queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for<dpct_kernel_name<class getrf_device_int64_to_int, T>>(
sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) {
ipiv[idx] = static_cast<int>(ipiv_int64[idx]);
});
});
std::vector<void *> ptrs{scratchpad, ipiv_int64, ipiv_int64_ptr, a_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Solves a system of linear equations with a batch of LU-factored square
/// coefficient matrices, with multiple right-hand sides.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] trans Indicates the form of the linear equations.
/// \param [in] n The order of the matrices.
/// \param [in] nrhs The number of right hand sides.
/// \param [in] a Array of pointers to matrices.
/// \param [in] lda The leading dimension of the matrices in \p a.
/// \param [in] ipiv An array stores the pivots.
/// \param [in, out] b Array of pointers to matrices, whose columns are
/// the right-hand sides for the systems of equations.
/// \param [in] ldb The leading dimension of the matrices in \p b.
/// \param [out] info A value stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void getrs_batch_wrapper(sycl::queue &exec_queue,
oneapi::mkl::transpose trans, int n, int nrhs,
const T *a[], int lda, int *ipiv, T *b[],
int ldb, int *info, int batch_size) {
using Ty = typename DataType<T>::T2;
// Set the info value to 0
*info = 0;
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_a = n * lda;
std::int64_t stride_b = nrhs * ldb;
std::int64_t stride_ipiv = n;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getrs_batch_scratchpad_size<Ty>(
exec_queue, trans, n, nrhs, lda, stride_a, stride_ipiv, ldb, stride_b,
batch_size);
T *a_buffer_ptr, *b_buffer_ptr;
a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T));
b_buffer_ptr = (T *)dpct_malloc(stride_b * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
T **host_b = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
dpct_memcpy(host_b, b, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i) {
dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T));
dpct_memcpy(b_buffer_ptr + i * stride_b, host_b[i], nrhs * ldb * sizeof(T));
}
{
auto a_buffer = get_buffer<Ty>(a_buffer_ptr);
auto b_buffer = get_buffer<Ty>(b_buffer_ptr);
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
sycl::buffer<std::int64_t, 1> ipiv_buf(
sycl::range<1>(batch_size * stride_ipiv));
auto from_buf = get_buffer<int>(ipiv);
exec_queue.submit([&](sycl::handler &cgh) {
auto from_acc = from_buf.get_access<sycl::access_mode::read>(cgh);
auto to_acc = ipiv_buf.get_access<sycl::access_mode::write>(cgh);
cgh.parallel_for<dpct_kernel_name<class getrs_device_int64_to_int, T>>(
sycl::range<2>(batch_size, n), [=](sycl::id<2> id) {
to_acc[id.get(0) * stride_ipiv + id.get(1)] =
static_cast<std::int64_t>(from_acc[id.get(0) * n + id.get(1)]);
});
});
oneapi::mkl::lapack::getrs_batch(exec_queue, trans, n, nrhs, a_buffer, lda,
stride_a, ipiv_buf, stride_ipiv, b_buffer, ldb,
stride_b, batch_size, scratchpad, scratchpad_size);
}
// Copy back to the original buffers
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_b[i],
b_buffer_ptr + i * stride_b,
nrhs * ldb * sizeof(T), automatic));
std::vector<void *> ptrs{host_a, host_b};
std::thread mem_free_thread(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptrs, events);
mem_free_thread.detach();
#else
std::int64_t n_int64 = n;
std::int64_t nrhs_int64 = nrhs;
std::int64_t lda_int64 = lda;
std::int64_t ldb_int64 = ldb;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getrs_batch_scratchpad_size<Ty>(
exec_queue, &trans, &n_int64, &nrhs_int64, &lda_int64, &ldb_int64, 1,
&group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
std::int64_t *ipiv_int64 =
sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue);
std::int64_t **ipiv_int64_ptr =
sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue);
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
T **b_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *));
exec_queue.memcpy(b_shared, b, batch_size * sizeof(T *));
exec_queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for<dpct_kernel_name<class getrs_device_int64_to_int, T>>(
sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) {
ipiv_int64[idx] = static_cast<std::int64_t>(ipiv[idx]);
});
}).wait();
for (std::int64_t i = 0; i < batch_size; ++i)
ipiv_int64_ptr[i] = ipiv_int64 + n * i;
sycl::event e = oneapi::mkl::lapack::getrs_batch(
exec_queue, &trans, &n_int64, &nrhs_int64, (Ty **)a_shared, &lda_int64,
ipiv_int64_ptr, (Ty **)b_shared, &ldb_int64, 1, &group_sizes, scratchpad,
scratchpad_size);
std::vector<void *> ptrs{scratchpad, ipiv_int64_ptr, ipiv_int64, a_shared, b_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Computes the inverses of a batch of LU-factored matrices.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] n The order of the matrices.
/// \param [in] a Array of pointers to matrices.
/// \param [in] lda The leading dimension of the matrices in \p a.
/// \param [in] ipiv An array stores the pivots.
/// \param [out] b Array of pointers to inverse matrices.
/// \param [in] ldb The leading dimension of the matrices in \p b.
/// \param [out] info An array stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void getri_batch_wrapper(sycl::queue &exec_queue, int n,
const T *a[], int lda, int *ipiv, T *b[],
int ldb, int *info, int batch_size) {
using Ty = typename DataType<T>::T2;
// Set the info array value to 0
detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size);
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_b = n * ldb;
std::int64_t stride_ipiv = n;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getri_batch_scratchpad_size<Ty>(
exec_queue, n, ldb, stride_b, stride_ipiv, batch_size);
T *b_buffer_ptr;
b_buffer_ptr = (T *)dpct_malloc(stride_b * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
T **host_b = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
dpct_memcpy(host_b, b, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i) {
// Need to create a copy of input matrices "a" to keep them unchanged.
// Matrices "b" (copy of matrices "a") will be used as input and output
// parameter in oneapi::mkl::lapack::getri_batch call.
matrix_mem_copy(b_buffer_ptr + i * stride_b, host_a[i], ldb, lda, n, n,
dpct::device_to_device, exec_queue);
}
{
auto b_buffer = get_buffer<Ty>(b_buffer_ptr);
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
sycl::buffer<std::int64_t, 1> ipiv_buf(
sycl::range<1>(batch_size * stride_ipiv));
auto from_buf = get_buffer<int>(ipiv);
exec_queue.submit([&](sycl::handler &cgh) {
auto from_acc = from_buf.get_access<sycl::access_mode::read>(cgh);
auto to_acc = ipiv_buf.get_access<sycl::access_mode::write>(cgh);
cgh.parallel_for<dpct_kernel_name<class getri_device_int64_to_int, T>>(
sycl::range<2>(batch_size, n), [=](sycl::id<2> id) {
to_acc[id.get(0) * stride_ipiv + id.get(1)] =
static_cast<std::int64_t>(from_acc[id.get(0) * n + id.get(1)]);
});
});
oneapi::mkl::lapack::getri_batch(exec_queue, n, b_buffer, ldb, stride_b, ipiv_buf,
stride_ipiv, batch_size, scratchpad,
scratchpad_size);
}
// Copy back to the original buffers
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_b[i],
b_buffer_ptr + i * stride_b,
n * ldb * sizeof(T), automatic));
std::vector<void *> ptrs{host_a, host_b};
std::thread mem_free_thread(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptrs, events);
mem_free_thread.detach();
#else
std::int64_t n_int64 = n;
std::int64_t ldb_int64 = ldb;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getri_batch_scratchpad_size<Ty>(
exec_queue, &n_int64, &ldb_int64, 1, &group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
std::int64_t *ipiv_int64 =
sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue);
std::int64_t **ipiv_int64_ptr =
sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue);
exec_queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for<dpct_kernel_name<class getri_device_int64_to_int, T>>(
sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) {
ipiv_int64[idx] = static_cast<std::int64_t>(ipiv[idx]);
});
});
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
T **b_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *));
exec_queue.memcpy(b_shared, b, batch_size * sizeof(T *)).wait();
for (std::int64_t i = 0; i < batch_size; ++i) {
ipiv_int64_ptr[i] = ipiv_int64 + n * i;
// Need to create a copy of input matrices "a" to keep them unchanged.
// Matrices "b" (copy of matrices "a") will be used as input and output
// parameter in oneapi::mkl::lapack::getri_batch call.
matrix_mem_copy(b_shared[i], a_shared[i], ldb, lda, n, n, dpct::device_to_device,
exec_queue);
}
sycl::event e = oneapi::mkl::lapack::getri_batch(
exec_queue, &n_int64, (Ty **)b_shared, &ldb_int64, ipiv_int64_ptr, 1,
&group_sizes, scratchpad, scratchpad_size);
std::vector<void *> ptrs{scratchpad, ipiv_int64_ptr, ipiv_int64, a_shared, b_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Computes the QR factorizations of a batch of general matrices.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] m The number of rows in the matrices.
/// \param [in] n The number of columns in the matrices.
/// \param [in, out] a Array of pointers to matrices. These
/// matrices will be overwritten by the factorization data.
/// \param [in] lda The leading dimension of the matrices in \p a.
/// \param [out] tau An array stores the scalars.
/// \param [out] info A value stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void geqrf_batch_wrapper(sycl::queue exec_queue, int m, int n,
T *a[], int lda, T *tau[], int *info,
int batch_size) {
using Ty = typename DataType<T>::T2;
// Set the info value to 0
*info = 0;
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_a = n * lda;
std::int64_t stride_tau = std::max(1, std::min(m, n));
std::int64_t scratchpad_size = oneapi::mkl::lapack::geqrf_batch_scratchpad_size<Ty>(
exec_queue, m, n, lda, stride_a, stride_tau, batch_size);
T *a_buffer_ptr, *tau_buffer_ptr;
a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T));
tau_buffer_ptr = (T *)dpct_malloc(stride_tau * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
T **host_tau = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
dpct_memcpy(host_tau, tau, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i)
dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T));
{
auto a_buffer = get_buffer<Ty>(a_buffer_ptr);
auto tau_buffer = get_buffer<Ty>(tau_buffer_ptr);
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
oneapi::mkl::lapack::geqrf_batch(exec_queue, m, n, a_buffer, lda, stride_a,
tau_buffer, stride_tau, batch_size, scratchpad,
scratchpad_size);
}
// Copy back to the original buffers
std::vector<sycl::event> events_a;
std::vector<sycl::event> events_tau;
for (std::int64_t i = 0; i < batch_size; ++i) {
events_a.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_buffer_ptr + i * stride_a,
n * lda * sizeof(T), automatic));
events_tau.push_back(detail::dpct_memcpy(
exec_queue, host_tau[i], tau_buffer_ptr + i * stride_tau,
std::max(1, std::min(m, n)) * sizeof(T), automatic));
}
std::vector<void *> ptr_a{host_a};
std::vector<void *> ptr_tau{host_tau};
std::thread mem_free_thread_a(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptr_a, events_a);
std::thread mem_free_thread_tau(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptr_tau, events_tau);
mem_free_thread_a.detach();
mem_free_thread_tau.detach();
#else
std::int64_t m_int64 = n;
std::int64_t n_int64 = n;
std::int64_t lda_int64 = lda;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size = oneapi::mkl::lapack::geqrf_batch_scratchpad_size<Ty>(
exec_queue, &m_int64, &n_int64, &lda_int64, 1, &group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
T **tau_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *));
exec_queue.memcpy(tau_shared, tau, batch_size * sizeof(T *)).wait();
sycl::event e = oneapi::mkl::lapack::geqrf_batch(
exec_queue, &m_int64, &n_int64, (Ty **)a_shared, &lda_int64, (Ty **)tau_shared, 1,
&group_sizes, scratchpad, scratchpad_size);
std::vector<void *> ptrs{scratchpad, a_shared, tau_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Computes the Euclidean norm of a vector.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [out] result The result scalar.
/// \param [in] result_type Data type of the result.
inline void nrm2(sycl::queue &q, int n, const void *x, library_data_t x_type,
int incx, void *result, library_data_t result_type) {
std::uint64_t key = detail::get_type_combination_id(x_type, result_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float): {
detail::nrm2_impl<float, float>(q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double): {
detail::nrm2_impl<double, double>(q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::real_float): {
detail::nrm2_impl<std::complex<float>, float>(
q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::real_double): {
detail::nrm2_impl<std::complex<double>, double>(
q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::real_half,
library_data_t::real_half): {
detail::nrm2_impl<sycl::half, sycl::half>(
q, n, x, incx, result);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes the dot product of two vectors.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in] y Input vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
/// \param [out] result The result scalar.
/// \param [in] result_type Data type of the result.
inline void dot(sycl::queue &q, int n, const void *x, library_data_t x_type,
int incx, const void *y, library_data_t y_type, int incy,
void *result, library_data_t result_type) {
detail::dotuc<false>(q, n, x, x_type, incx, y, y_type, incy, result,
result_type);
}
/// Computes the dot product of two vectors, conjugating the first vector.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in] y Input vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
/// \param [out] result The result scalar.
/// \param [in] result_type Data type of the result.
inline void dotc(sycl::queue &q, int n, const void *x, library_data_t x_type,
int incx, const void *y, library_data_t y_type, int incy,
void *result, library_data_t result_type) {
detail::dotuc<true>(q, n, x, x_type, incx, y, y_type, incy, result,
result_type);
}
/// Computes the product of a vector by a scalar.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] alpha The scale factor alpha.
/// \param [in] alpha_type The data type of alpha.
/// \param [in, out] x Input/Output vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
inline void scal(sycl::queue &q, int n, const void *alpha,
library_data_t alpha_type, void *x, library_data_t x_type,
int incx) {
std::uint64_t key = detail::get_type_combination_id(x_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float): {
detail::scal_impl<float, float>(q, n, alpha, x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::real_double): {
detail::scal_impl<double, double>(q, n, alpha, x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float): {
detail::scal_impl<std::complex<float>, std::complex<float>>(q, n, alpha,
x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double): {
detail::scal_impl<std::complex<double>, std::complex<double>>(
q, n, alpha, x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::real_half): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
sycl::half alaph_half(alpha_value);
detail::scal_impl<sycl::half, sycl::half>(q, n, &alaph_half, x, incx);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes a vector-scalar product and adds the result to a vector.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] alpha The scale factor alpha.
/// \param [in] alpha_type The data type of alpha.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in, out] y Input/Output vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
inline void axpy(sycl::queue &q, int n, const void *alpha,
library_data_t alpha_type, const void *x, library_data_t x_type,
int incx, void *y, library_data_t y_type, int incy) {
std::uint64_t key = detail::get_type_combination_id(x_type, alpha_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float): {
detail::axpy_impl<float, float>(q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double): {
detail::axpy_impl<double, double>(q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float): {
detail::axpy_impl<std::complex<float>, std::complex<float>>(
q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double): {
detail::axpy_impl<std::complex<double>, std::complex<double>>(
q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::real_half,
library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
sycl::half alaph_half(alpha_value);
detail::axpy_impl<sycl::half, sycl::half>(q, n, &alaph_half, x, incx, y, incy);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Performs rotation of points in the plane.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in, out] x Input/Output vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in, out] y Input/Output vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
/// \param [in] c Scaling factor.
/// \param [in] s Scaling factor.
/// \param [in] cs_type Data type of the scaling factors.
inline void rot(sycl::queue &q, int n, void *x, library_data_t x_type,
int incx, void *y, library_data_t y_type, int incy,
const void *c, const void *s, library_data_t cs_type) {
std::uint64_t key = detail::get_type_combination_id(x_type, cs_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float): {
detail::rot_impl<float, float, float>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double): {
detail::rot_impl<double, double, double>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::real_float): {
detail::rot_impl<std::complex<float>, float, float>(q, n, x, incx, y, incy, c,
s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::real_double): {
detail::rot_impl<std::complex<double>, double, double>(q, n, x, incx, y, incy, c,
s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float): {
detail::rot_impl<std::complex<float>, float, std::complex<float>>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double): {
detail::rot_impl<std::complex<double>, double, std::complex<double>>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::real_half,
library_data_t::real_half): {
detail::rot_impl<sycl::half, sycl::half, sycl::half>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::real_bfloat16,
library_data_t::real_bfloat16): {
detail::rot_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, oneapi::mkl::bfloat16>(q, n, x, incx, y, incy, c, s);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes matrix-matrix product with general matrices.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] a_trans Specifies the operation applied to A.
/// \param [in] b_trans Specifies the operation applied to B.
/// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C.
/// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C.
/// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B).
/// \param [in] alpha Scaling factor for the matrix-matrix product.
/// \param [in] a Input matrix A.
/// \param [in] a_type Data type of the matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] b_type Data type of the matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for matrix C.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] c_type Data type of the matrix C.
/// \param [in] ldc Leading dimension of C.
/// \param [in] scaling_type Data type of the scaling factors.
inline void gemm(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a, library_data_t a_type,
int lda, const void *b, library_data_t b_type, int ldb,
const void *beta, void *c, library_data_t c_type, int ldc,
library_data_t scaling_type) {
bool matched = false;
if (scaling_type == library_data_t::real_float &&
c_type == library_data_t::complex_float) {
scaling_type = library_data_t::complex_float;
} else if (scaling_type == library_data_t::real_double &&
c_type == library_data_t::complex_double) {
scaling_type = library_data_t::complex_double;
}
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(
library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<float, float, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double, library_data_t::real_double): {
detail::gemm_impl<double, double, double, double>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_float, library_data_t::complex_float,
library_data_t::complex_float, library_data_t::complex_float): {
detail::gemm_impl<std::complex<float>, std::complex<float>,
std::complex<float>, std::complex<float>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_double, library_data_t::complex_double,
library_data_t::complex_double, library_data_t::complex_double): {
detail::gemm_impl<std::complex<double>, std::complex<double>,
std::complex<double>, std::complex<double>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_half): {
detail::gemm_impl<sycl::half, sycl::half, sycl::half,
sycl::half>(q, a_trans, b_trans, m, n, k, alpha, a,
lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float,
float>(q, a_trans, b_trans, m, n, k, alpha, a, lda, b,
ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<sycl::half, sycl::half, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
float beta_value =
dpct::get_value(reinterpret_cast<const float *>(beta), q);
sycl::half alpha_half(alpha_value);
sycl::half beta_half(beta_value);
detail::gemm_impl<sycl::half, sycl::half, sycl::half,
sycl::half>(q, a_trans, b_trans, m, n, k, &alpha_half,
a, lda, b, ldb, &beta_half, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<std::int8_t, std::int8_t, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_bfloat16, library_data_t::real_float): {
detail::gemm_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
oneapi::mkl::bfloat16, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_int32, library_data_t::real_int32): {
float alpha_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(alpha), q);
float beta_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(beta), q);
detail::gemm_impl<std::int8_t, std::int8_t, std::int32_t, float>(
q, a_trans, b_trans, m, n, k, &alpha_float, a, lda, b, ldb, &beta_float, c, ldc);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes a batch of matrix-matrix product with general matrices.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] a_trans Specifies the operation applied to A.
/// \param [in] b_trans Specifies the operation applied to B.
/// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C.
/// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C.
/// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B).
/// \param [in] alpha Scaling factor for the matrix-matrix product.
/// \param [in] a Input matrix A.
/// \param [in] a_type Data type of the matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] b_type Data type of the matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for matrix C.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] c_type Data type of the matrix C.
/// \param [in] ldc Leading dimension of C.
/// \param [in] batch_size Specifies the number of matrix multiply operations to perform.
/// \param [in] scaling_type Data type of the scaling factors.
inline void gemm_batch(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a[],
library_data_t a_type, int lda, const void *b[],
library_data_t b_type, int ldb, const void *beta,
void *c[], library_data_t c_type, int ldc,
int batch_size, library_data_t scaling_type) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
bool matched = false;
if (scaling_type == library_data_t::real_float &&
c_type == library_data_t::complex_float) {
scaling_type = library_data_t::complex_float;
} else if (scaling_type == library_data_t::real_double &&
c_type == library_data_t::complex_double) {
scaling_type = library_data_t::complex_double;
}
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(
library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<float, float, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double, library_data_t::real_double): {
detail::gemm_batch_impl<double, double, double, double>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_float, library_data_t::complex_float,
library_data_t::complex_float, library_data_t::complex_float): {
detail::gemm_batch_impl<std::complex<float>, std::complex<float>,
std::complex<float>, std::complex<float>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_double, library_data_t::complex_double,
library_data_t::complex_double, library_data_t::complex_double): {
detail::gemm_batch_impl<std::complex<double>, std::complex<double>,
std::complex<double>, std::complex<double>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_half): {
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half,
sycl::half>(q, a_trans, b_trans, m, n, k, alpha,
a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
#ifdef __INTEL_MKL__
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_bfloat16, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
oneapi::mkl::bfloat16, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float,
float>(q, a_trans, b_trans, m, n, k, alpha, a, lda,
b, ldb, beta, c, ldc, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_int32, library_data_t::real_int32): {
float alpha_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(alpha), q);
float beta_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(beta), q);
detail::gemm_batch_impl<std::int8_t, std::int8_t, std::int32_t,
float>(q, a_trans, b_trans, m, n, k, &alpha_float,
a, lda, b, ldb, &beta_float, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<std::int8_t, std::int8_t, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<sycl::half, sycl::half, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
#endif
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
float beta_value =
dpct::get_value(reinterpret_cast<const float *>(beta), q);
sycl::half alpha_half(alpha_value);
sycl::half beta_half(beta_value);
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, b, ldb, &beta_half, c, ldc,
batch_size);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
#endif
}
/// Computes a batch of matrix-matrix product with general matrices.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] a_trans Specifies the operation applied to A.
/// \param [in] b_trans Specifies the operation applied to B.
/// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C.
/// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C.
/// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B).
/// \param [in] alpha Scaling factor for the matrix-matrix product.
/// \param [in] a Input matrix A.
/// \param [in] a_type Data type of the matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] stride_a Stride between the different A matrices.
/// \param [in] b Input matrix B.
/// \param [in] b_type Data type of the matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] stride_b Stride between the different B matrices.
/// \param [in] beta Scaling factor for matrix C.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] c_type Data type of the matrix C.
/// \param [in] ldc Leading dimension of C.
/// \param [in] stride_c Stride between the different C matrices.
/// \param [in] batch_size Specifies the number of matrix multiply operations to perform.
/// \param [in] scaling_type Data type of the scaling factors.
inline void gemm_batch(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a, library_data_t a_type,
int lda, long long int stride_a, const void *b,
library_data_t b_type, int ldb, long long int stride_b,
const void *beta, void *c, library_data_t c_type,
int ldc, long long int stride_c, int batch_size,
library_data_t scaling_type) {
bool matched = false;
if (scaling_type == library_data_t::real_float &&
c_type == library_data_t::complex_float) {
scaling_type = library_data_t::complex_float;
} else if (scaling_type == library_data_t::real_double &&
c_type == library_data_t::complex_double) {
scaling_type = library_data_t::complex_double;
}
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(
library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<float, float, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double, library_data_t::real_double): {
detail::gemm_batch_impl<double, double, double, double>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_float, library_data_t::complex_float,
library_data_t::complex_float, library_data_t::complex_float): {
detail::gemm_batch_impl<std::complex<float>, std::complex<float>,
std::complex<float>, std::complex<float>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_double, library_data_t::complex_double,
library_data_t::complex_double, library_data_t::complex_double): {
detail::gemm_batch_impl<std::complex<double>, std::complex<double>,
std::complex<double>, std::complex<double>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_half): {
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half,
sycl::half>(q, a_trans, b_trans, m, n, k, alpha,
a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
#ifdef __INTEL_MKL__
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_bfloat16, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
oneapi::mkl::bfloat16, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float,
float>(q, a_trans, b_trans, m, n, k, alpha, a, lda,
stride_a, b, ldb, stride_b, beta, c, ldc,
stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_int32, library_data_t::real_int32): {
detail::gemm_batch_impl<std::int8_t, std::int8_t, std::int32_t,
std::int32_t>(q, a_trans, b_trans, m, n, k, alpha,
a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<std::int8_t, std::int8_t, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<sycl::half, sycl::half, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
#endif
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
float beta_value =
dpct::get_value(reinterpret_cast<const float *>(beta), q);
sycl::half alpha_half(alpha_value);
sycl::half beta_half(beta_value);
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, stride_a, b, ldb, stride_b,
&beta_half, c, ldc, stride_c, batch_size);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// This routines perform a special rank-k update of a symmetric matrix C by
/// general matrices A and B.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] uplo Specifies whether C's data is stored in its upper or lower triangle.
/// \param [in] trans Specifies the operation to apply.
/// \param [in] n The number of rows and columns in C.
/// \param [in] k The inner dimension of matrix multiplications.
/// \param [in] alpha Scaling factor for the rank-k update.
/// \param [in] a Input matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for the rank-k update.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] ldc Leading dimension of C.
template <class T>
inline void syrk(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::transpose trans, int n, int k, const T *alpha,
const T *a, int lda, const T *b, int ldb, const T *beta, T *c,
int ldc) {
detail::rk_impl<false, T, T>(q, uplo, trans, n, k, alpha, a, lda, b,
ldb, beta, c, ldc);
}
/// This routines perform a special rank-k update of a Hermitian matrix C by
/// general matrices A and B.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] uplo Specifies whether C's data is stored in its upper or lower triangle.
/// \param [in] trans Specifies the operation to apply.
/// \param [in] n The number of rows and columns in C.
/// \param [in] k The inner dimension of matrix multiplications.
/// \param [in] alpha Scaling factor for the rank-k update.
/// \param [in] a Input matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for the rank-k update.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] ldc Leading dimension of C.
template <class T, class Tbeta>
inline void herk(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::transpose trans, int n, int k, const T *alpha,
const T *a, int lda, const T *b, int ldb, const Tbeta *beta,
T *c, int ldc) {
detail::rk_impl<true, T, Tbeta>(q, uplo, trans, n, k, alpha, a, lda, b,
ldb, beta, c, ldc);
}
/// This routine performs a group of trsm operations. Each trsm solves an
/// equation of the form op(A) * X = alpha * B or X * op(A) = alpha * B.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] left_right Specifies A multiplies X on the left or on the right.
/// \param [in] upper_lower Specifies A is upper or lower triangular.
/// \param [in] trans Specifies the operation applied to A.
/// \param [in] unit_diag Specifies whether A is unit triangular.
/// \param [in] m Number of rows of the B matrices.
/// \param [in] n Number of columns of the B matrices.
/// \param [in] alpha Scaling factor for the solutions.
/// \param [in] a Input matrices A.
/// \param [in] a_type Data type of the matrices A.
/// \param [in] lda Leading dimension of the matrices A.
/// \param [in, out] b Input and output matrices B.
/// \param [in] b_type Data type of the matrices B.
/// \param [in] ldb Leading dimension of the matrices B.
/// \param [in] batch_size Specifies the number of trsm operations to perform.
/// \param [in] scaling_type Data type of the scaling factors.
inline void trsm_batch(sycl::queue &q, oneapi::mkl::side left_right,
oneapi::mkl::uplo upper_lower,
oneapi::mkl::transpose trans,
oneapi::mkl::diag unit_diag, int m, int n,
const void *alpha, const void **a, library_data_t a_type,
int lda, void **b, library_data_t b_type, int ldb,
int batch_size, library_data_t scaling_type) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float,
library_data_t::real_float): {
detail::trsm_batch_impl<float, float, float>(q, left_right, upper_lower,
trans, unit_diag, m, n, alpha,
a, lda, b, ldb, batch_size);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double,
library_data_t::real_double): {
detail::trsm_batch_impl<double, double, double>(
q, left_right, upper_lower, trans, unit_diag, m, n, alpha, a, lda, b,
ldb, batch_size);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float,
library_data_t::complex_float): {
detail::trsm_batch_impl<std::complex<float>, std::complex<float>,
std::complex<float>>(q, left_right, upper_lower,
trans, unit_diag, m, n, alpha,
a, lda, b, ldb, batch_size);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double,
library_data_t::complex_double): {
detail::trsm_batch_impl<std::complex<double>, std::complex<double>,
std::complex<double>>(q, left_right, upper_lower,
trans, unit_diag, m, n, alpha,
a, lda, b, ldb, batch_size);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
#endif
}
/// Computes a triangular matrix-general matrix product.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] left_right Specifies A is on the left or right side of the
/// multiplication.
/// \param [in] upper_lower Specifies A is upper or lower triangular.
/// \param [in] trans Specifies the operation applied to A.
/// \param [in] unit_diag Specifies whether A is unit triangular.
/// \param [in] m Number of rows of B.
/// \param [in] n Number of columns of B.
/// \param [in] alpha Scaling factor for the matrix-matrix product.
/// \param [in] a Input matrices A.
/// \param [in] lda Leading dimension of the matrices A.
/// \param [in] b Input matrices B.
/// \param [in] ldb Leading dimension of the matrices B.
/// \param [out] c Output matrices C.
/// \param [in] ldc Leading dimension of the matrices C.
template <class T>
inline void trmm(sycl::queue &q, oneapi::mkl::side left_right,
oneapi::mkl::uplo upper_lower, oneapi::mkl::transpose trans,
oneapi::mkl::diag unit_diag, int m, int n, const T *alpha,
const T *a, int lda, const T *b, int ldb, T *c, int ldc) {
using Ty = typename DataType<T>::T2;
auto alpha_val = dpct::get_value(alpha, q);
if (b != c) {
dpct::matrix_mem_copy(c, b, ldc, ldb, m, n, dpct::device_to_device, q);
}
auto data_a = detail::get_memory(reinterpret_cast<const Ty *>(a));
auto data_c = detail::get_memory(reinterpret_cast<Ty *>(c));
oneapi::mkl::blas::column_major::trmm(q, left_right, upper_lower, trans,
unit_diag, m, n, alpha_val, data_a, lda,
data_c, ldc);
}
} // namespace dpct
#endif // __DPCT_BLAS_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/atomic.hpp | //==---- atomic.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_ATOMIC_HPP__
#define __DPCT_ATOMIC_HPP__
#include <sycl/sycl.hpp>
namespace dpct {
/// Atomically add the value operand to the value at the addr and assign the
/// result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to add to the value at \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_add(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_add(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_add(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_add(operand);
}
/// Atomically add the value operand to the value at the addr and assign the
/// result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to add to the value at \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_add(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_add(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_add<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically subtract the value operand from the value at the addr and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to subtract from the value at \p addr
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_sub(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_sub(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_sub(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_sub(operand);
}
/// Atomically subtract the value operand from the value at the addr and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to subtract from the value at \p addr
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_sub(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_sub<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_sub<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_sub<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_sub(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_sub<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically perform a bitwise AND between the value operand and the value at the addr
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise AND operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_and(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_and(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_and(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_and(operand);
}
/// Atomically perform a bitwise AND between the value operand and the value at the addr
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise AND operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_and(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_and<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_and<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_and<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_and(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_and<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically or the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise OR operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_or(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_or(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_or(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_or(operand);
}
/// Atomically or the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise OR operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_or(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_or<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_or<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_or<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_or(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_or<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically xor the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise XOR operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_xor(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_xor(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_xor(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_xor(operand);
}
/// Atomically xor the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise XOR operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_xor(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_xor<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_xor<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_xor<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_xor(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_xor<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically calculate the minimum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_min(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_min(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_min(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_min(operand);
}
/// Atomically calculate the minimum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_min(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_min<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_min<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_min<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_min(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_min<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically calculate the maximum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_max(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_max(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_max(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_max(operand);
}
/// Atomically calculate the maximum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_max(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_max<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_max<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_max<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_max(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_max<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically set \p operand to the value stored in \p addr, if old value stored in
/// \p addr is equal to zero or greater than \p operand, else decrease the value stored
/// in \p addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The threshold value.
/// \param memoryOrder The memory ordering used.
/// \returns The old value stored in \p addr.
template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline unsigned int atomic_fetch_compare_dec(unsigned int *addr,
unsigned int operand) {
auto atm = sycl::atomic_ref<unsigned int, memoryOrder, memoryScope,
addressSpace>(addr[0]);
unsigned int old;
while (true) {
old = atm.load();
if (old == 0 || old > operand) {
if (atm.compare_exchange_strong(old, operand))
break;
} else if (atm.compare_exchange_strong(old, old - 1))
break;
}
return old;
}
/// Atomically increment the value stored in \p addr if old value stored in \p
/// addr is less than \p operand, else set 0 to the value stored in \p addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The threshold value.
/// \param memoryOrder The memory ordering used.
/// \returns The old value stored in \p addr.
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline unsigned int atomic_fetch_compare_inc(unsigned int *addr,
unsigned int operand) {
auto atm = sycl::atomic_ref<unsigned int, memoryOrder, memoryScope,
addressSpace>(addr[0]);
unsigned int old;
while (true) {
old = atm.load();
if (old >= operand) {
if (atm.compare_exchange_strong(old, 0))
break;
} else if (atm.compare_exchange_strong(old, old + 1))
break;
}
return old;
}
/// Atomically increment the value stored in \p addr if old value stored in \p
/// addr is less than \p operand, else set 0 to the value stored in \p addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The threshold value.
/// \param memoryOrder The memory ordering used.
/// \returns The old value stored in \p addr.
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline unsigned int
atomic_fetch_compare_inc(unsigned int *addr, unsigned int operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr,
operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr,
operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr,
operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
/// Atomically exchange the value at the address addr with the value operand.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to be exchanged with the value pointed by \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_exchange(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.exchange(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_exchange(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.exchange(operand);
}
/// Atomically exchange the value at the address addr with the value operand.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to be exchanged with the value pointed by \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_exchange(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_exchange<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_exchange<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_exchange<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_exchange(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_exchange<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically compare the value at \p addr to the value expected and exchange
/// with the value desired if the value at \p addr is equal to the value expected.
/// Returns the value at the \p addr before the call.
/// \param [in, out] addr Multi_ptr.
/// \param expected The value to compare against the value at \p addr.
/// \param desired The value to assign to \p addr if the value at \p addr is expected.
/// \param success The memory ordering used when comparison succeeds.
/// \param fail The memory ordering used when comparison fails.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
T atomic_compare_exchange_strong(
sycl::multi_ptr<T, addressSpace> addr, T expected, T desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(*addr);
atm.compare_exchange_strong(expected, desired, success, fail);
return expected;
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2, typename T3>
T1 atomic_compare_exchange_strong(
sycl::multi_ptr<T1, addressSpace> addr, T2 expected, T3 desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(*addr);
T1 expected_value = expected;
atm.compare_exchange_strong(expected_value, desired, success, fail);
return expected_value;
}
/// Atomically compare the value at \p addr to the value expected and exchange
/// with the value desired if the value at \p addr is equal to the value expected.
/// Returns the value at the \p addr before the call.
/// \param [in] addr The pointer to the data.
/// \param expected The value to compare against the value at \p addr.
/// \param desired The value to assign to \p addr if the value at \p addr is expected.
/// \param success The memory ordering used when comparison succeeds.
/// \param fail The memory ordering used when comparison fails.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
T atomic_compare_exchange_strong(
T *addr, T expected, T desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
atm.compare_exchange_strong(expected, desired, success, fail);
return expected;
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2, typename T3>
T1 atomic_compare_exchange_strong(
T1 *addr, T2 expected, T3 desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
T1 expected_value = expected;
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
atm.compare_exchange_strong(expected_value, desired, success, fail);
return expected_value;
}
/// Atomic extension to implement standard APIs in std::atomic
namespace detail{
template <typename T> struct IsValidAtomicType {
static constexpr bool value =
(std::is_same<T, int>::value || std::is_same<T, unsigned int>::value ||
std::is_same<T, long>::value || std::is_same<T, unsigned long>::value ||
std::is_same<T, long long>::value ||
std::is_same<T, unsigned long long>::value ||
std::is_same<T, float>::value || std::is_same<T, double>::value ||
std::is_pointer<T>::value);
};
} // namespace detail
template <typename T,
sycl::memory_scope DefaultScope = sycl::memory_scope::system,
sycl::memory_order DefaultOrder = sycl::memory_order::seq_cst,
sycl::access::address_space Space =
sycl::access::address_space::generic_space>
class atomic{
static_assert(
detail::IsValidAtomicType<T>::value,
"Invalid atomic type. Valid types are int, unsigned int, long, "
"unsigned long, long long, unsigned long long, float, double "
"and pointer types");
T __d;
public:
/// default memory synchronization order
static constexpr sycl::memory_order default_read_order =
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space>::default_read_order;
static constexpr sycl::memory_order default_write_order =
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space>::default_write_order;
static constexpr sycl::memory_scope default_scope = DefaultScope;
static constexpr sycl::memory_order default_read_modify_write_order =
DefaultOrder;
/// Default constructor.
constexpr atomic() noexcept = default;
/// Constructor with initialize value.
constexpr atomic(T d) noexcept : __d(d){};
/// atomically replaces the value of the referenced object with a non-atomic argument
/// \param operand The value to replace the pointed value.
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
void store(T operand, sycl::memory_order memoryOrder = default_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
atm.store(operand, memoryOrder, memoryScope);
}
/// atomically obtains the value of the referenced object
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object
T load(sycl::memory_order memoryOrder = default_read_order,
sycl::memory_scope memoryScope = default_scope) const noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(
const_cast<T &>(__d));
return atm.load(memoryOrder, memoryScope);
}
/// atomically replaces the value of the referenced object and obtains the value held previously
/// \param operand The value to replace the pointed value.
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object before the call.
T exchange(T operand,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.exchange(operand, memoryOrder, memoryScope);
}
/// atomically compares the value of the referenced object with non-atomic argument
/// and performs atomic exchange if equal or atomic load if not
/// \param expected The value expected to be found in the object referenced by the atomic_ref object
/// \param desired The value to store in the referenced object if it is as expected
/// \param success The memory models for the read-modify-write
/// \param failure The memory models for load operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false otherwise.
bool compare_exchange_weak(
T &expected, T desired,
sycl::memory_order success, sycl::memory_order failure,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_weak(expected, desired, success, failure, memoryScope);
}
/// \param expected The value expected to be found in the object referenced by the atomic_ref object
/// \param desired The value to store in the referenced object if it is as expected
/// \param memoryOrder The memory synchronization ordering for operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false otherwise.
bool compare_exchange_weak(T &expected, T desired,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_weak(expected, desired, memoryOrder, memoryScope);
}
/// atomically compares the value of the referenced object with non-atomic argument
/// and performs atomic exchange if equal or atomic load if not
/// \param expected The value expected to be found in the object referenced by the atomic_ref object
/// \param desired The value to store in the referenced object if it is as expected
/// \param success The memory models for the read-modify-write
/// \param failure The memory models for load operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false otherwise.
bool compare_exchange_strong(
T &expected, T desired,
sycl::memory_order success, sycl::memory_order failure,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_strong(expected, desired, success, failure, memoryScope);
}
/// \param expected The value expected to be found in the object referenced by the atomic_ref object
/// \param desired The value to store in the referenced object if it is as expected
/// \param memoryOrder The memory synchronization ordering for operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false otherwise.
bool compare_exchange_strong(T &expected, T desired,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_strong(expected, desired, memoryOrder, memoryScope);
}
/// atomically adds the argument to the value stored in the atomic object and obtains the value held previously
/// \param operand The other argument of arithmetic addition
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object before the call.
T fetch_add(T operand,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.fetch_add(operand, memoryOrder, memoryScope);
}
/// atomically subtracts the argument from the value stored in the atomic object and obtains the value held previously
/// \param operand The other argument of arithmetic subtraction
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object before the call.
T fetch_sub(T operand,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.fetch_sub(operand, memoryOrder, memoryScope);
}
};
} // namespace dpct
#endif // __DPCT_ATOMIC_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/rng_utils.hpp | //==---- rng_utils.hpp ----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_RNG_UTILS_HPP__
#define __DPCT_RNG_UTILS_HPP__
#include <sycl/sycl.hpp>
#include <oneapi/mkl.hpp>
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
#include <oneapi/mkl/rng/device.hpp>
#endif
#include "device.hpp"
#include "lib_common_utils.hpp"
namespace dpct {
namespace rng {
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
namespace device {
/// The random number generator on device.
/// \tparam engine_t The device random number generator engine. It can only be
/// oneapi::mkl::rng::device::mrg32k3a<1> or
/// oneapi::mkl::rng::device::mrg32k3a<4> or
/// oneapi::mkl::rng::device::philox4x32x10<1> or
/// oneapi::mkl::rng::device::philox4x32x10<4>.
template <typename engine_t> class rng_generator {
static_assert(
std::disjunction_v<
std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<4>>,
std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<4>>,
std::is_same<engine_t, oneapi::mkl::rng::device::mcg59<1>>>,
"engine_t can only be oneapi::mkl::rng::device::mrg32k3a<1> or "
"oneapi::mkl::rng::device::mrg32k3a<4> or "
"oneapi::mkl::rng::device::philox4x32x10<1> or "
"oneapi::mkl::rng::device::philox4x32x10<4> or "
"oneapi::mkl::rng::device::mcg59<1>.");
static constexpr bool _is_engine_vec_size_one = std::disjunction_v<
std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::mcg59<1>>>;
static constexpr std::uint64_t default_seed = 0;
oneapi::mkl::rng::device::bits<std::uint32_t> _distr_bits;
oneapi::mkl::rng::device::uniform_bits<std::uint32_t> _distr_uniform_bits;
oneapi::mkl::rng::device::gaussian<float> _distr_gaussian_float;
oneapi::mkl::rng::device::gaussian<double> _distr_gaussian_double;
oneapi::mkl::rng::device::lognormal<float> _distr_lognormal_float;
oneapi::mkl::rng::device::lognormal<double> _distr_lognormal_double;
oneapi::mkl::rng::device::poisson<std::uint32_t> _distr_poisson;
oneapi::mkl::rng::device::uniform<float> _distr_uniform_float;
oneapi::mkl::rng::device::uniform<double> _distr_uniform_double;
engine_t _engine;
public:
/// Default constructor of rng_generator
rng_generator() { _engine = engine_t(default_seed); }
/// Constructor of rng_generator if engine type is not mcg59
/// \param [in] seed The seed to initialize the engine state.
/// \param [in] num_to_skip Set the number of elements need to be skipped.
/// The number is calculated as: num_to_skip[0] + num_to_skip[1] * 2^64 +
/// num_to_skip[2] * 2^128 + ... + num_to_skip[n-1] * 2^(64*(n-1))
template <typename T = engine_t,
typename std::enable_if<!std::is_same_v<
T, oneapi::mkl::rng::device::mcg59<1>>>::type * = nullptr>
rng_generator(std::uint64_t seed,
std::initializer_list<std::uint64_t> num_to_skip) {
_engine = engine_t(seed, num_to_skip);
}
/// Constructor of rng_generator if engine type is mcg59
/// \param [in] seed The seed to initialize the engine state.
/// \param [in] num_to_skip Set the number of elements need to be skipped.
template <typename T = engine_t,
typename std::enable_if<std::is_same_v<
T, oneapi::mkl::rng::device::mcg59<1>>>::type * = nullptr>
rng_generator(std::uint64_t seed, std::uint64_t num_to_skip) {
_engine = engine_t(seed, num_to_skip);
}
/// Generate random number(s) obeys distribution \tparam distr_t.
/// \tparam T The distribution of the random number. It can only be
/// oneapi::mkl::rng::device::bits<std::uint32_t>,
/// oneapi::mkl::rng::device::uniform_bits<std::uint32_t>,
/// oneapi::mkl::rng::device::gaussian<float>,
/// oneapi::mkl::rng::device::gaussian<double>,
/// oneapi::mkl::rng::device::lognormal<float>,
/// oneapi::mkl::rng::device::lognormal<double>,
/// oneapi::mkl::rng::device::poisson<std::uint32_t>,
/// oneapi::mkl::rng::device::uniform<float> or
/// oneapi::mkl::rng::device::uniform<double>
/// \tparam vec_size The length of the return vector. It can only be 1, 2
/// or 4.
/// \param distr_params The parameter(s) for lognormal or poisson
/// distribution.
/// \return The vector of the random number(s).
template <typename distr_t, int vec_size, class... distr_params_t>
auto generate(distr_params_t... distr_params) {
static_assert(vec_size == 1 || vec_size == 2 || vec_size == 4,
"vec_size is not supported.");
static_assert(
std::disjunction_v<
std::is_same<distr_t,
oneapi::mkl::rng::device::bits<std::uint32_t>>,
std::is_same<distr_t,
oneapi::mkl::rng::device::uniform_bits<std::uint32_t>>,
std::is_same<distr_t, oneapi::mkl::rng::device::gaussian<float>>,
std::is_same<distr_t, oneapi::mkl::rng::device::gaussian<double>>,
std::is_same<distr_t, oneapi::mkl::rng::device::lognormal<float>>,
std::is_same<distr_t, oneapi::mkl::rng::device::lognormal<double>>,
std::is_same<distr_t,
oneapi::mkl::rng::device::poisson<std::uint32_t>>,
std::is_same<distr_t, oneapi::mkl::rng::device::uniform<float>>,
std::is_same<distr_t, oneapi::mkl::rng::device::uniform<double>>>,
"distribution is not supported.");
if constexpr (std::is_same_v<
distr_t, oneapi::mkl::rng::device::bits<std::uint32_t>>) {
return generate_vec<vec_size>(_distr_bits);
}
if constexpr (std::is_same_v<
distr_t,
oneapi::mkl::rng::device::uniform_bits<std::uint32_t>>) {
return generate_vec<vec_size>(_distr_uniform_bits);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::gaussian<float>>) {
return generate_vec<vec_size>(_distr_gaussian_float);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::gaussian<double>>) {
return generate_vec<vec_size>(_distr_gaussian_double);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::lognormal<float>>) {
return generate_vec<vec_size>(_distr_lognormal_float, distr_params...,
0.0f, 1.0f);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::lognormal<double>>) {
return generate_vec<vec_size>(_distr_lognormal_double, distr_params...,
0.0, 1.0);
}
if constexpr (std::is_same_v<distr_t, oneapi::mkl::rng::device::poisson<
std::uint32_t>>) {
return generate_vec<vec_size>(_distr_poisson, distr_params...);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::uniform<float>>) {
return generate_vec<vec_size>(_distr_uniform_float);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::uniform<double>>) {
return generate_vec<vec_size>(_distr_uniform_double);
}
}
/// Get the random number generator engine.
/// \return The reference of the internal random number generator engine.
engine_t &get_engine() { return _engine; }
private:
template <int vec_size, typename distr_t, class... distr_params_t>
auto generate_vec(distr_t &distr, distr_params_t... distr_params) {
if constexpr (sizeof...(distr_params_t)) {
typename distr_t::param_type pt(distr_params...);
distr.param(pt);
}
if constexpr (vec_size == 4) {
if constexpr (_is_engine_vec_size_one) {
sycl::vec<typename distr_t::result_type, 4> res;
res.x() = oneapi::mkl::rng::device::generate(distr, _engine);
res.y() = oneapi::mkl::rng::device::generate(distr, _engine);
res.z() = oneapi::mkl::rng::device::generate(distr, _engine);
res.w() = oneapi::mkl::rng::device::generate(distr, _engine);
return res;
} else {
return oneapi::mkl::rng::device::generate(distr, _engine);
}
} else if constexpr (vec_size == 1) {
if constexpr (_is_engine_vec_size_one) {
return oneapi::mkl::rng::device::generate(distr, _engine);
} else {
return oneapi::mkl::rng::device::generate_single(distr, _engine);
}
} else if constexpr (vec_size == 2) {
if constexpr (_is_engine_vec_size_one) {
sycl::vec<typename distr_t::result_type, 2> res;
res.x() = oneapi::mkl::rng::device::generate(distr, _engine);
res.y() = oneapi::mkl::rng::device::generate(distr, _engine);
return res;
} else {
sycl::vec<typename distr_t::result_type, 2> res;
res.x() = oneapi::mkl::rng::device::generate_single(distr, _engine);
res.y() = oneapi::mkl::rng::device::generate_single(distr, _engine);
return res;
}
}
}
};
} // namespace device
#endif
namespace host {
namespace detail {
class rng_generator_base {
public:
/// Set the seed of host rng_generator.
/// \param seed The engine seed.
virtual void set_seed(const std::uint64_t seed) = 0;
/// Set the dimensions of host rng_generator.
/// \param dimensions The engine dimensions.
virtual void set_dimensions(const std::uint32_t dimensions) = 0;
/// Set the queue of host rng_generator.
/// \param queue The engine queue.
virtual void set_queue(sycl::queue *queue) = 0;
/// Generate unsigned int random number(s) with 'uniform_bits' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform_bits(unsigned int *output,
std::int64_t n) = 0;
/// Generate unsigned long long random number(s) with 'uniform_bits'
/// distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform_bits(unsigned long long *output,
std::int64_t n) = 0;
/// Generate float random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
virtual inline void generate_lognormal(float *output, std::int64_t n, float m,
float s) = 0;
/// Generate double random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
virtual inline void generate_lognormal(double *output, std::int64_t n,
double m, double s) = 0;
/// Generate float random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
virtual inline void generate_gaussian(float *output, std::int64_t n,
float mean, float stddev) = 0;
/// Generate double random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
virtual inline void generate_gaussian(double *output, std::int64_t n,
double mean, double stddev) = 0;
/// Generate unsigned int random number(s) with 'poisson' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param lambda Lambda for the Poisson distribution.
virtual inline void generate_poisson(unsigned int *output, std::int64_t n,
double lambda) = 0;
/// Generate float random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform(float *output, std::int64_t n) = 0;
/// Generate double random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform(double *output, std::int64_t n) = 0;
/// Skip ahead several random number(s).
/// \param num_to_skip The number of random numbers to be skipped.
virtual void skip_ahead(const std::uint64_t num_to_skip) = 0;
protected:
sycl::queue *_queue{&dpct::get_default_queue()};
std::uint64_t _seed{0};
std::uint32_t _dimensions{1};
};
/// The random number generator on host.
template <typename engine_t = oneapi::mkl::rng::philox4x32x10>
class rng_generator : public rng_generator_base {
public:
/// Constructor of rng_generator.
rng_generator() : _engine(create_engine(_queue, _seed, _dimensions)) {}
/// Set the seed of host rng_generator.
/// \param seed The engine seed.
void set_seed(const std::uint64_t seed) {
if (seed == _seed) {
return;
}
_seed = seed;
_engine = create_engine(_queue, _seed, _dimensions);
}
/// Set the dimensions of host rng_generator.
/// \param dimensions The engine dimensions.
void set_dimensions(const std::uint32_t dimensions) {
if (dimensions == _dimensions) {
return;
}
_dimensions = dimensions;
_engine = create_engine(_queue, _seed, _dimensions);
}
/// Set the queue of host rng_generator.
/// \param queue The engine queue.
void set_queue(sycl::queue *queue) {
if (queue == _queue) {
return;
}
_queue = queue;
_engine = create_engine(_queue, _seed, _dimensions);
}
/// Generate unsigned int random number(s) with 'uniform_bits' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform_bits(unsigned int *output, std::int64_t n) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
static_assert(sizeof(unsigned int) == sizeof(std::uint32_t));
generate<oneapi::mkl::rng::uniform_bits<std::uint32_t>>(
(std::uint32_t *)output, n);
#endif
}
/// Generate unsigned long long random number(s) with 'uniform_bits'
/// distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform_bits(unsigned long long *output,
std::int64_t n) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
static_assert(sizeof(unsigned long long) == sizeof(std::uint64_t));
generate<oneapi::mkl::rng::uniform_bits<std::uint64_t>>(
(std::uint64_t *)output, n);
#endif
}
/// Generate float random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
inline void generate_lognormal(float *output, std::int64_t n, float m,
float s) {
generate<oneapi::mkl::rng::lognormal<float>>(output, n, m, s);
}
/// Generate double random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
inline void generate_lognormal(double *output, std::int64_t n, double m,
double s) {
generate<oneapi::mkl::rng::lognormal<double>>(output, n, m, s);
}
/// Generate float random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
inline void generate_gaussian(float *output, std::int64_t n, float mean,
float stddev) {
generate<oneapi::mkl::rng::gaussian<float>>(output, n, mean, stddev);
}
/// Generate double random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
inline void generate_gaussian(double *output, std::int64_t n, double mean,
double stddev) {
generate<oneapi::mkl::rng::gaussian<double>>(output, n, mean, stddev);
}
/// Generate unsigned int random number(s) with 'poisson' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param lambda Lambda for the Poisson distribution.
inline void generate_poisson(unsigned int *output, std::int64_t n,
double lambda) {
generate<oneapi::mkl::rng::poisson<unsigned int>>(output, n, lambda);
}
/// Generate float random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform(float *output, std::int64_t n) {
generate<oneapi::mkl::rng::uniform<float>>(output, n);
}
/// Generate double random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform(double *output, std::int64_t n) {
generate<oneapi::mkl::rng::uniform<double>>(output, n);
}
/// Skip ahead several random number(s).
/// \param num_to_skip The number of random numbers to be skipped.
void skip_ahead(const std::uint64_t num_to_skip) {
#ifndef __INTEL_MKL__
oneapi::mkl::rng::skip_ahead(_engine, num_to_skip);
#else
if constexpr (std::is_same_v<engine_t, oneapi::mkl::rng::mt2203>)
throw std::runtime_error("no skip_ahead method of mt2203 engine.");
else
oneapi::mkl::rng::skip_ahead(_engine, num_to_skip);
#endif
}
private:
static inline engine_t create_engine(sycl::queue *queue,
const std::uint64_t seed,
const std::uint32_t dimensions) {
#ifdef __INTEL_MKL__
return std::is_same_v<engine_t, oneapi::mkl::rng::sobol>
? engine_t(*queue, dimensions)
: engine_t(*queue, seed);
#else
return engine_t(*queue, seed);
#endif
}
template <typename distr_t, typename buffer_t, class... distr_params_t>
void generate(buffer_t *output, const std::int64_t n,
const distr_params_t... distr_params) {
auto output_buf = dpct::detail::get_memory(output);
oneapi::mkl::rng::generate(distr_t(distr_params...), _engine, n,
output_buf);
}
engine_t _engine{};
};
} // namespace detail
} // namespace host
enum class random_engine_type {
philox4x32x10,
mrg32k3a,
mt2203,
mt19937,
sobol,
mcg59
};
typedef std::shared_ptr<rng::host::detail::rng_generator_base> host_rng_ptr;
/// Create a host random number generator.
/// \param type The random engine type.
/// \return The pointer of random number generator.
inline host_rng_ptr create_host_rng(const random_engine_type type) {
switch (type) {
case random_engine_type::philox4x32x10:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::philox4x32x10>>();
case random_engine_type::mrg32k3a:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mrg32k3a>>();
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
case random_engine_type::mt2203:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mt2203>>();
case random_engine_type::mt19937:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mt19937>>();
case random_engine_type::sobol:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::sobol>>();
case random_engine_type::mcg59:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mcg59>>();
#endif
}
}
} // namespace rng
} // namespace dpct
#endif // __DPCT_RNG_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/numeric.h | //==---- numeric.h --------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_NUMERIC_H__
#define __DPCT_NUMERIC_H__
namespace dpct {
template <typename Policy, typename InputIt1, typename InputIt2, typename T>
T inner_product(Policy &&policy, InputIt1 first1, InputIt1 last1,
InputIt2 first2, T init) {
return std::transform_reduce(std::forward<Policy>(policy), first1, last1,
first2, init);
}
template <typename Policy, typename InputIt1, typename InputIt2, typename T,
typename BinaryOperation1, typename BinaryOperation2>
T inner_product(Policy &&policy, InputIt1 first1, InputIt1 last1,
InputIt2 first2, T init, BinaryOperation1 op1,
BinaryOperation2 op2) {
return std::transform_reduce(std::forward<Policy>(policy), first1, last1,
first2, init, op1, op2);
}
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/iterators.h | //==---- iterators.h ------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_ITERATORS_H__
#define __DPCT_ITERATORS_H__
#include <oneapi/dpl/iterator>
#include "functional.h"
namespace dpct {
namespace internal {
// Wrapper class returned from a dereferenced transform_iterator which was
// created using
// make_transform_output_iterator(). Used to apply the supplied transform
// function when writing into an object of this class.
//
// Example:
// int a[] = {0, 1, 2, 3, 4};
// int* p = a;
// auto f = [](auto v) {return v*v;};
// auto tr_out = dpct::make_transform_output_iterator(p+1, f);
// auto wrap = *tr_out; // wrap is a transform_output_ref_wrapper
// std::cout<<*(p+1)<<std::endl; // '1'
// wrap = 2; // apply function, store 2*2=4
// std::cout<<*(p+1)<<std::endl; // '4'
template <typename T, typename _UnaryFunc> class transform_output_ref_wrapper {
private:
T __my_reference_;
_UnaryFunc __my_unary_func_;
public:
template <typename U>
transform_output_ref_wrapper(U &&__reference, _UnaryFunc __unary_func)
: __my_reference_(std::forward<U>(__reference)),
__my_unary_func_(__unary_func) {}
// When writing to an object of this type, apply the supplied unary function,
// then write to the wrapped reference
template <typename UnaryInputType>
transform_output_ref_wrapper &operator=(const UnaryInputType &e) {
__my_reference_ = __my_unary_func_(e);
return *this;
}
};
// Unary functor to create a transform_output_reference_wrapper when a
// transform_iterator is dereferenced, so that a
// the supplied unary function may be applied on write, resulting in a
// transform_output_iterator
template <typename _UnaryFunc> struct _Unary_Out {
_Unary_Out(_UnaryFunc __f_) : __f(__f_) {}
_UnaryFunc __f;
template <typename T> auto operator()(T &&val) const {
return transform_output_ref_wrapper<T, _UnaryFunc>(std::forward<T>(val),
__f);
}
};
} // end namespace internal
using std::advance;
using std::distance;
template <typename T>
oneapi::dpl::counting_iterator<T> make_counting_iterator(const T &input) {
return oneapi::dpl::counting_iterator<T>(input);
}
template <typename _Tp> class constant_iterator {
public:
typedef std::false_type is_hetero;
typedef std::true_type is_passed_directly;
typedef std::ptrdiff_t difference_type;
typedef _Tp value_type;
typedef _Tp *pointer;
// There is no storage behind the iterator, so we return a value instead of
// reference.
typedef const _Tp reference;
typedef const _Tp const_reference;
typedef std::random_access_iterator_tag iterator_category;
explicit constant_iterator(_Tp __init)
: __my_value_(__init), __my_counter_(0) {}
private:
// used to construct iterator instances with different counter values required
// by arithmetic operators
constant_iterator(const _Tp &__value, const difference_type &__offset)
: __my_value_(__value), __my_counter_(__offset) {}
public:
// non-const variants of access operators are not provided so unintended
// writes are caught at compile time.
const_reference operator*() const { return __my_value_; }
const_reference operator[](difference_type) const { return __my_value_; }
difference_type operator-(const constant_iterator &__it) const {
return __my_counter_ - __it.__my_counter_;
}
constant_iterator &operator+=(difference_type __forward) {
__my_counter_ += __forward;
return *this;
}
constant_iterator &operator-=(difference_type __backward) {
return *this += -__backward;
}
constant_iterator &operator++() { return *this += 1; }
constant_iterator &operator--() { return *this -= 1; }
constant_iterator operator++(int) {
constant_iterator __it(*this);
++(*this);
return __it;
}
constant_iterator operator--(int) {
constant_iterator __it(*this);
--(*this);
return __it;
}
constant_iterator operator-(difference_type __backward) const {
return constant_iterator(__my_value_, __my_counter_ - __backward);
}
constant_iterator operator+(difference_type __forward) const {
return constant_iterator(__my_value_, __my_counter_ + __forward);
}
friend constant_iterator operator+(difference_type __forward,
const constant_iterator __it) {
return __it + __forward;
}
bool operator==(const constant_iterator &__it) const {
return __my_value_ == __it.__my_value_ &&
this->__my_counter_ == __it.__my_counter_;
}
bool operator!=(const constant_iterator &__it) const {
return !(*this == __it);
}
bool operator<(const constant_iterator &__it) const {
return *this - __it < 0;
}
bool operator>(const constant_iterator &__it) const { return __it < *this; }
bool operator<=(const constant_iterator &__it) const {
return !(*this > __it);
}
bool operator>=(const constant_iterator &__it) const {
return !(*this < __it);
}
private:
_Tp __my_value_;
uint64_t __my_counter_;
};
template <typename _Tp>
constant_iterator<_Tp> make_constant_iterator(_Tp __value) {
return constant_iterator<_Tp>(__value);
}
// key_value_pair class to represent a key and value, specifically a
// dereferenced arg_index_input_iterator
template <typename _KeyTp, typename _ValueTp> class key_value_pair {
public:
key_value_pair() = default;
key_value_pair(const _KeyTp &_key, const _ValueTp &_value)
: key(_key), value(_value) {}
bool operator==(const key_value_pair<_KeyTp, _ValueTp> &_kvp) const {
return (key == _kvp.key) && (value == _kvp.value);
}
bool operator!=(const key_value_pair<_KeyTp, _ValueTp> &_kvp) const {
return (key != _kvp.key) || (value != _kvp.value);
}
_KeyTp key;
_ValueTp value;
};
namespace detail {
template <typename KeyTp, typename _ValueTp> struct make_key_value_pair {
template <typename ValRefTp>
key_value_pair<KeyTp, _ValueTp>
operator()(const oneapi::dpl::__internal::tuple<KeyTp, ValRefTp> &tup) const {
return ::dpct::key_value_pair<KeyTp, _ValueTp>(::std::get<0>(tup),
::std::get<1>(tup));
}
};
template <class T> struct __zip_iterator_impl;
template <class... Ts> struct __zip_iterator_impl<std::tuple<Ts...>> {
using type = oneapi::dpl::zip_iterator<Ts...>;
};
} // end namespace detail
// dpct::zip_iterator can only accept std::tuple type as template argument for
// compatibility purpose. Please use oneapi::dpl::zip_iterator if you want to
// pass iterator's types directly.
template <typename... Ts>
using zip_iterator = typename detail::__zip_iterator_impl<Ts...>::type;
// arg_index_input_iterator is an iterator over a input iterator, with a index.
// When dereferenced, it returns a key_value_pair, which can be interrogated for
// the index key or the value from the input iterator
template <typename InputIteratorT, typename OffsetT = ptrdiff_t,
typename OutputValueT =
typename ::std::iterator_traits<InputIteratorT>::value_type>
class arg_index_input_iterator
: public oneapi::dpl::transform_iterator<
oneapi::dpl::zip_iterator<oneapi::dpl::counting_iterator<OffsetT>,
InputIteratorT>,
detail::make_key_value_pair<OffsetT, OutputValueT>> {
using arg_index_input_iterator_wrap = oneapi::dpl::transform_iterator<
oneapi::dpl::zip_iterator<oneapi::dpl::counting_iterator<OffsetT>,
InputIteratorT>,
detail::make_key_value_pair<OffsetT, OutputValueT>>;
public:
typedef OffsetT difference_type;
// signal to __get_sycl_range that this iterator is as a direct pass iterator
using is_zip = ::std::true_type;
arg_index_input_iterator(const arg_index_input_iterator_wrap &__arg_wrap)
: arg_index_input_iterator_wrap(__arg_wrap) {}
arg_index_input_iterator(InputIteratorT __iter)
: arg_index_input_iterator_wrap(
oneapi::dpl::make_zip_iterator(
oneapi::dpl::counting_iterator(OffsetT(0)), __iter),
detail::make_key_value_pair<OffsetT, OutputValueT>()) {}
arg_index_input_iterator &operator=(const arg_index_input_iterator &__input) {
arg_index_input_iterator_wrap::operator=(__input);
return *this;
}
arg_index_input_iterator &operator++() {
arg_index_input_iterator_wrap::operator++();
return *this;
}
arg_index_input_iterator &operator--() {
arg_index_input_iterator_wrap::operator--();
return *this;
}
arg_index_input_iterator operator++(int) {
arg_index_input_iterator __it(*this);
++(*this);
return __it;
}
arg_index_input_iterator operator--(int) {
arg_index_input_iterator __it(*this);
--(*this);
return __it;
}
arg_index_input_iterator operator+(difference_type __forward) const {
return arg_index_input_iterator(
arg_index_input_iterator_wrap::operator+(__forward));
}
arg_index_input_iterator operator-(difference_type __backward) const {
return arg_index_input_iterator(
arg_index_input_iterator_wrap::operator-(__backward));
}
arg_index_input_iterator &operator+=(difference_type __forward) {
arg_index_input_iterator_wrap::operator+=(__forward);
return *this;
}
arg_index_input_iterator &operator-=(difference_type __backward) {
arg_index_input_iterator_wrap::operator-=(__backward);
return *this;
}
friend arg_index_input_iterator
operator+(difference_type __forward, const arg_index_input_iterator &__it) {
return __it + __forward;
}
difference_type operator-(const arg_index_input_iterator &__it) const {
return arg_index_input_iterator_wrap::operator-(__it);
}
bool operator==(const arg_index_input_iterator &__it) const {
return arg_index_input_iterator_wrap::operator==(__it);
}
bool operator!=(const arg_index_input_iterator &__it) const {
return !(*this == __it);
}
bool operator<(const arg_index_input_iterator &__it) const {
return *this - __it < 0;
}
bool operator>(const arg_index_input_iterator &__it) const {
return __it < *this;
}
bool operator<=(const arg_index_input_iterator &__it) const {
return !(*this > __it);
}
bool operator>=(const arg_index_input_iterator &__it) const {
return !(*this < __it);
}
// returns an arg_index_input_iterator with the same iter position, but a
// count reset to 0
arg_index_input_iterator create_normalized() {
return arg_index_input_iterator(
::std::get<1>(arg_index_input_iterator_wrap::base().base()));
}
};
template <typename IterT> struct io_iterator_pair {
inline io_iterator_pair() : selector(false) {}
inline io_iterator_pair(const IterT &first, const IterT &second)
: selector(false) {
iter[0] = first;
iter[1] = second;
}
inline IterT first() const { return selector ? iter[1] : iter[0]; }
inline IterT second() const { return selector ? iter[0] : iter[1]; }
inline void swap() { selector = !selector; }
bool selector;
IterT iter[2];
};
template <typename _Iter, typename _UnaryFunc>
auto make_transform_output_iterator(_Iter __it, _UnaryFunc __unary_func) {
return oneapi::dpl::transform_iterator(
__it, internal::_Unary_Out<_UnaryFunc>(__unary_func));
}
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/algorithm.h | //==---- algorithm.h ------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_ALGORITHM_H__
#define __DPCT_ALGORITHM_H__
#include <oneapi/dpl/execution>
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/numeric>
#include "functional.h"
#include "iterators.h"
#include "vector.h"
namespace dpct {
template <typename Policy, typename Iter1, typename Iter2, typename Pred,
typename T>
void replace_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p,
const T &new_value) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
std::transform(std::forward<Policy>(policy), first, last, mask, first,
internal::replace_if_fun<typename std::iterator_traits<Iter1>::value_type, Pred>(p, new_value));
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Pred, typename T>
Iter3 replace_copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, Pred p, const T &new_value) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return std::transform(std::forward<Policy>(policy), first, last, mask, result,
internal::replace_if_fun<typename std::iterator_traits<Iter3>::value_type, Pred>(p, new_value));
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
internal::enable_if_hetero_execution_policy<Policy, Iter1>
remove_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using oneapi::dpl::make_zip_iterator;
using policy_type = typename std::decay<Policy>::type;
using internal::__buffer;
using ValueType = typename std::iterator_traits<Iter1>::value_type;
__buffer<ValueType> _tmp(std::distance(first, last));
auto end = std::copy_if(
std::forward<Policy>(policy), make_zip_iterator(first, mask),
make_zip_iterator(last, mask + std::distance(first, last)),
make_zip_iterator(_tmp.get(), oneapi::dpl::discard_iterator()),
internal::negate_predicate_key_fun<Pred>(p));
return std::copy(std::forward<Policy>(policy), _tmp.get(),
std::get<0>(end.base()), first);
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
typename std::enable_if<!internal::is_hetero_execution_policy<
typename std::decay<Policy>::type>::value,
Iter1>::type
remove_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using oneapi::dpl::make_zip_iterator;
using policy_type = typename std::decay<Policy>::type;
using ValueType = typename std::iterator_traits<Iter1>::value_type;
std::vector<ValueType> _tmp(std::distance(first, last));
auto end = std::copy_if(
policy, make_zip_iterator(first, mask),
make_zip_iterator(last, mask + std::distance(first, last)),
make_zip_iterator(_tmp.begin(), oneapi::dpl::discard_iterator()),
internal::negate_predicate_key_fun<Pred>(p));
return std::copy(policy, _tmp.begin(), std::get<0>(end.base()), first);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Pred>
Iter3 remove_copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using oneapi::dpl::make_zip_iterator;
auto ret_val = std::remove_copy_if(
std::forward<Policy>(policy), make_zip_iterator(first, mask),
make_zip_iterator(last, mask + std::distance(first, last)),
make_zip_iterator(result, oneapi::dpl::discard_iterator()),
internal::predicate_key_fun<Pred>(p));
return std::get<0>(ret_val.base());
}
template <class Policy, class Iter1, class Iter2, class BinaryPred>
std::pair<Iter1, Iter2> unique(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first,
BinaryPred binary_pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::unique(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first, values_first),
oneapi::dpl::make_zip_iterator(
keys_last, values_first + std::distance(keys_first, keys_last)),
internal::compare_key_fun<BinaryPred>(binary_pred));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_first, values_first), ret_val);
return std::make_pair(keys_first + n1, values_first + n1);
}
template <class Policy, class Iter1, class Iter2>
std::pair<Iter1, Iter2> unique(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
return unique(std::forward<Policy>(policy), keys_first, keys_last,
values_first, std::equal_to<T>());
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class BinaryPred>
std::pair<Iter3, Iter4> unique_copy(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first,
Iter3 keys_result, Iter4 values_result,
BinaryPred binary_pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::unique_copy(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first, values_first),
oneapi::dpl::make_zip_iterator(
keys_last, values_first + std::distance(keys_first, keys_last)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::unique_fun<BinaryPred>(binary_pred));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4>
std::pair<Iter3, Iter4> unique_copy(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first,
Iter3 keys_result, Iter4 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
auto comp = std::equal_to<T>();
return unique_copy(std::forward<Policy>(policy), keys_first, keys_last,
values_first, keys_result, values_result, comp);
}
template <typename Policy, typename Iter, typename Pred>
Iter partition_point(Policy &&policy, Iter first, Iter last, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
if (std::is_partitioned(std::forward<Policy>(policy), first, last, p))
return std::find_if_not(std::forward<Policy>(policy), first, last, p);
else
return first;
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Pred>
Iter3 copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::copy_if(
std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, mask),
oneapi::dpl::make_zip_iterator(last, mask + std::distance(first, last)),
oneapi::dpl::make_zip_iterator(result, oneapi::dpl::discard_iterator()),
internal::predicate_key_fun<Pred>(pred));
return std::get<0>(ret_val.base());
}
template <class Policy, class Iter1, class Iter2, class UnaryOperation,
class Pred>
Iter2 transform_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 result,
UnaryOperation unary_op, Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
const auto n = std::distance(first, last);
std::for_each(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, result),
oneapi::dpl::make_zip_iterator(first, result) + n,
internal::transform_if_fun<T, Pred, UnaryOperation>(pred, unary_op));
return result + n;
}
template <class Policy, class Iter1, class Iter2, class Iter3,
class UnaryOperation, class Pred>
Iter3 transform_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, UnaryOperation unary_op, Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
using Ref1 = typename std::iterator_traits<Iter1>::reference;
using Ref2 = typename std::iterator_traits<Iter2>::reference;
const auto n = std::distance(first, last);
std::for_each(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, mask, result),
oneapi::dpl::make_zip_iterator(first, mask, result) + n,
internal::transform_if_unary_zip_mask_fun<T, Pred, UnaryOperation>(
pred, unary_op));
return result + n;
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class BinaryOperation, class Pred>
Iter4 transform_if(Policy &&policy, Iter1 first1, Iter1 last1, Iter2 first2,
Iter3 mask, Iter4 result, BinaryOperation binary_op,
Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
const auto n = std::distance(first1, last1);
using ZipIterator =
typename oneapi::dpl::zip_iterator<Iter1, Iter2, Iter3, Iter4>;
using T = typename std::iterator_traits<ZipIterator>::value_type;
std::for_each(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first1, first2, mask, result),
oneapi::dpl::make_zip_iterator(last1, first2 + n, mask + n, result + n),
internal::transform_if_zip_mask_fun<T, Pred, BinaryOperation>(pred,
binary_op));
return result + n;
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename OutputIter>
void scatter(Policy &&policy, InputIter1 first, InputIter1 last, InputIter2 map,
OutputIter result) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
oneapi::dpl::copy(policy, first, last,
oneapi::dpl::make_permutation_iterator(result, map));
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename OutputIter>
OutputIter gather(Policy &&policy, InputIter1 map_first, InputIter1 map_last,
InputIter2 input_first, OutputIter result) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto perm_begin =
oneapi::dpl::make_permutation_iterator(input_first, map_first);
const int n = ::std::distance(map_first, map_last);
return oneapi::dpl::copy(policy, perm_begin, perm_begin + n, result);
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename InputIter3, typename OutputIter, typename Predicate>
void scatter_if(Policy &&policy, InputIter1 first, InputIter1 last,
InputIter2 map, InputIter3 mask, OutputIter result,
Predicate pred) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
transform_if(policy, first, last, mask,
oneapi::dpl::make_permutation_iterator(result, map),
[=](auto &&v) { return v; }, [=](auto &&m) { return pred(m); });
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename InputIter3, typename OutputIter, typename Predicate>
OutputIter gather_if(Policy &&policy, InputIter1 map_first, InputIter1 map_last,
InputIter2 mask, InputIter3 input_first, OutputIter result,
Predicate pred) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto perm_begin =
oneapi::dpl::make_permutation_iterator(input_first, map_first);
const int n = std::distance(map_first, map_last);
return transform_if(policy, perm_begin, perm_begin + n, mask, result,
[=](auto &&v) { return v; },
[=](auto &&m) { return pred(m); });
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Iter5, typename Iter6>
std::pair<Iter5, Iter6>
merge(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto n1 = std::distance(keys_first1, keys_last1);
auto n2 = std::distance(keys_first2, keys_last2);
std::merge(std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(keys_last1, values_first1 + n1),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(keys_last2, values_first2 + n2),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
return std::make_pair(keys_result + n1 + n2, values_result + n1 + n2);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Iter5, typename Iter6, typename Comp>
std::pair<Iter5, Iter6>
merge(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto n1 = std::distance(keys_first1, keys_last1);
auto n2 = std::distance(keys_first2, keys_last2);
std::merge(std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(keys_last1, values_first1 + n1),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(keys_last2, values_first2 + n2),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
return std::make_pair(keys_result + n1 + n2, values_result + n1 + n2);
}
template <class Policy, class Iter, class T>
void iota(Policy &&policy, Iter first, Iter last, T init, T step) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using DiffSize = typename std::iterator_traits<Iter>::difference_type;
std::transform(
std::forward<Policy>(policy), oneapi::dpl::counting_iterator<DiffSize>(0),
oneapi::dpl::counting_iterator<DiffSize>(std::distance(first, last)),
first, internal::sequence_fun<T>(init, step));
}
template <class Policy, class Iter, class T>
void iota(Policy &&policy, Iter first, Iter last, T init) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
iota(std::forward<Policy>(policy), first, last, init, T(1));
}
template <class Policy, class Iter>
void iota(Policy &&policy, Iter first, Iter last) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using DiffSize = typename std::iterator_traits<Iter>::difference_type;
iota(std::forward<Policy>(policy), first, last, DiffSize(0), DiffSize(1));
}
template <class Policy, class Iter1, class Iter2, class Comp>
void sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto first = oneapi::dpl::make_zip_iterator(keys_first, values_first);
auto last = first + std::distance(keys_first, keys_last);
std::sort(std::forward<Policy>(policy), first, last,
internal::compare_key_fun<Comp>(comp));
}
template <class Policy, class Iter1, class Iter2>
void sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
sort(std::forward<Policy>(policy), keys_first, keys_last, values_first,
internal::__less());
}
template <class Policy, class Iter1, class Iter2, class Comp>
void stable_sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
std::stable_sort(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first, values_first),
oneapi::dpl::make_zip_iterator(
keys_last, values_first + std::distance(keys_first, keys_last)),
internal::compare_key_fun<Comp>(comp));
}
template <class Policy, class Iter1, class Iter2>
void stable_sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
stable_sort(std::forward<Policy>(policy), keys_first, keys_last, values_first,
internal::__less());
}
template <class Policy, class Iter, class Operator>
void for_each_index(Policy &&policy, Iter first, Iter last, Operator unary_op) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using DiffSize = typename std::iterator_traits<Iter>::difference_type;
std::transform(
std::forward<Policy>(policy), oneapi::dpl::counting_iterator<DiffSize>(0),
oneapi::dpl::counting_iterator<DiffSize>(std::distance(first, last)),
first, unary_op);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5>
std::pair<Iter4, Iter5>
set_intersection(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 keys_result, Iter5 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_intersection(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_last2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Comp>
std::pair<Iter4, Iter5>
set_intersection(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 keys_result, Iter5 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_intersection(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_last2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6>
std::pair<Iter5, Iter6>
set_symmetric_difference(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2,
Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_symmetric_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6, class Comp>
std::pair<Iter5, Iter6>
set_symmetric_difference(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2,
Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_symmetric_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6>
std::pair<Iter5, Iter6>
set_difference(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6, class Comp>
std::pair<Iter5, Iter6> set_difference(Policy &&policy, Iter1 keys_first1,
Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result,
Iter6 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6>
internal::enable_if_execution_policy<Policy, std::pair<Iter5, Iter6>>
set_union(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_union(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6, class Comp>
internal::enable_if_execution_policy<Policy, std::pair<Iter5, Iter6>>
set_union(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result, Iter6 values_result,
Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_union(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Pred>
internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>>
stable_partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 out_true, Iter4 out_false, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::partition_copy(
std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, mask),
oneapi::dpl::make_zip_iterator(last, mask + std::distance(first, last)),
oneapi::dpl::make_zip_iterator(out_true, oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(out_false,
oneapi::dpl::discard_iterator()),
internal::predicate_key_fun<Pred>(p));
return std::make_pair(std::get<0>(ret_val.first.base()),
std::get<0>(ret_val.second.base()));
}
template <typename Policy, typename Iter1, typename Iter3, typename Iter4,
typename Pred>
internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>>
stable_partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter3 out_true,
Iter4 out_false, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return std::partition_copy(std::forward<Policy>(policy), first, last,
out_true, out_false, p);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Pred>
internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>>
partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 out_true, Iter4 out_false, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return stable_partition_copy(std::forward<Policy>(policy), first, last, mask,
out_true, out_false, p);
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
internal::enable_if_hetero_execution_policy<Policy, Iter1>
stable_partition(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
typedef typename std::decay<Policy>::type policy_type;
internal::__buffer<typename std::iterator_traits<Iter1>::value_type> _tmp(
std::distance(first, last));
std::copy(std::forward<Policy>(policy), mask,
mask + std::distance(first, last), _tmp.get());
auto ret_val =
std::stable_partition(std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, _tmp.get()),
oneapi::dpl::make_zip_iterator(
last, _tmp.get() + std::distance(first, last)),
internal::predicate_key_fun<Pred>(p));
return std::get<0>(ret_val.base());
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
typename std::enable_if<!internal::is_hetero_execution_policy<
typename std::decay<Policy>::type>::value,
Iter1>::type
stable_partition(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
typedef typename std::decay<Policy>::type policy_type;
std::vector<typename std::iterator_traits<Iter1>::value_type> _tmp(
std::distance(first, last));
std::copy(std::forward<Policy>(policy), mask,
mask + std::distance(first, last), _tmp.begin());
auto ret_val = std::stable_partition(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, _tmp.begin()),
oneapi::dpl::make_zip_iterator(last,
_tmp.begin() + std::distance(first, last)),
internal::predicate_key_fun<Pred>(p));
return std::get<0>(ret_val.base());
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
internal::enable_if_execution_policy<Policy, Iter1>
partition(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return stable_partition(std::forward<Policy>(policy), first, last, mask, p);
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value &&
dpct::internal::is_iterator<value_t>::value &&
dpct::internal::is_iterator<value_out_t>::value>
sort_pairs(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending = false, int begin_bit = 0,
int end_bit =
sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8);
template <typename _ExecutionPolicy, typename key_t, typename key_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value>
sort_keys(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
int64_t n, bool descending = false, int begin_bit = 0,
int end_bit =
sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8);
namespace internal {
// Transforms key to a specific bit range and sorts the transformed key
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename transformed_key_t>
inline void transform_and_sort(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, int64_t n, bool descending,
int begin_bit, int end_bit) {
using key_t_value_t = typename std::iterator_traits<key_t>::value_type;
auto trans_key =
translate_key<key_t_value_t, transformed_key_t>(begin_bit, end_bit);
// Use of the comparison operator that is not simply std::greater() or
// std::less() will result in
// not using radix sort which will cost some performance. However, this is
// necessary to provide the transformation of the key to the bitrange
// desired.
auto partial_sort_with_comp = [&](const auto &comp) {
return oneapi::dpl::partial_sort_copy(
std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n, keys_out,
keys_out + n, [=](const auto a, const auto b) {
return comp(trans_key(a), trans_key(b));
});
};
if (descending)
partial_sort_with_comp(::std::greater<transformed_key_t>());
else
partial_sort_with_comp(::std::less<transformed_key_t>());
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t>
inline void sort_only(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, int64_t n, bool descending) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
if constexpr (::std::is_floating_point<key_t_value_t>::value) {
if (descending) {
// Comparison operator that is not std::greater() ensures stability of
// -0.0 and 0.0
// at the cost of some performance because radix sort will not be used.
auto comp_descending = [=](const auto a, const auto b) { return a > b; };
oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy),
keys_in, keys_in + n, keys_out,
keys_out + n, comp_descending);
} else {
// Comparison operator that is not std::less() ensures stability of -0.0
// and 0.0
// at the cost of some performance because radix sort will not be used.
auto comp_ascending = [=](const auto a, const auto b) { return a < b; };
oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy),
keys_in, keys_in + n, keys_out,
keys_out + n, comp_ascending);
}
} else {
if (descending) {
oneapi::dpl::partial_sort_copy(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n,
keys_out, keys_out + n, ::std::greater<key_t_value_t>());
} else {
oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy),
keys_in, keys_in + n, keys_out,
keys_out + n);
}
}
}
// Transforms key from a pair to a specific bit range and sorts the pairs by the
// transformed key
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename transform_key_t, typename value_t, typename value_out_t>
inline void transform_and_sort_pairs(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, value_t values_in,
value_out_t values_out, int64_t n,
bool descending, int begin_bit,
int end_bit) {
using key_t_value_t = typename std::iterator_traits<key_t>::value_type;
auto zip_input = oneapi::dpl::zip_iterator(keys_in, values_in);
auto zip_output = oneapi::dpl::zip_iterator(keys_out, values_out);
auto trans_key =
translate_key<key_t_value_t, transform_key_t>(begin_bit, end_bit);
// Use of the comparison operator that is not simply std::greater() or
// std::less() will result in
// not using radix sort which will cost some performance. However, this is
// necessary to provide the transformation of the key to the bitrange desired
// and also to select the key from the zipped pair.
auto load_val = [=](const auto a) { return trans_key(std::get<0>(a)); };
auto partial_sort_with_comp = [&](const auto &comp) {
return oneapi::dpl::partial_sort_copy(
std::forward<_ExecutionPolicy>(policy), zip_input, zip_input + n,
zip_output, zip_output + n, [=](const auto a, const auto b) {
return comp(load_val(a), load_val(b));
});
};
if (descending)
partial_sort_with_comp(::std::greater<key_t_value_t>());
else
partial_sort_with_comp(::std::less<key_t_value_t>());
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
inline void sort_only_pairs(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, value_t values_in,
value_out_t values_out, int64_t n,
bool descending) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
auto zip_input = oneapi::dpl::zip_iterator(keys_in, values_in);
auto zip_output = oneapi::dpl::zip_iterator(keys_out, values_out);
// Use of the comparison operator that is not simply std::greater() or
// std::less() will result in
// not using radix sort which will cost some performance. However, this is
// necessary to select the key from the zipped pair.
auto load_val = [=](const auto a) { return std::get<0>(a); };
auto partial_sort_with_comp = [&](const auto &comp) {
return oneapi::dpl::partial_sort_copy(
std::forward<_ExecutionPolicy>(policy), zip_input, zip_input + n,
zip_output, zip_output + n, [=](const auto a, const auto b) {
return comp(load_val(a), load_val(b));
});
};
if (descending)
partial_sort_with_comp(::std::greater<key_t_value_t>());
else
partial_sort_with_comp(::std::less<key_t_value_t>());
}
// overload for key_out_t != std::nullptr_t
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
typename ::std::enable_if<!::std::is_null_pointer<key_out_t>::value>::type
sort_pairs_impl(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending, int begin_bit, int end_bit) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
int clipped_begin_bit = ::std::max(begin_bit, 0);
int clipped_end_bit =
::std::min((::std::uint64_t)end_bit, sizeof(key_t_value_t) * 8);
int num_bytes = (clipped_end_bit - clipped_begin_bit - 1) / 8 + 1;
auto transform_and_sort_pairs_f = [&](auto x) {
using T = typename ::std::decay_t<decltype(x)>;
internal::transform_and_sort_pairs<decltype(policy), key_t, key_out_t, T,
value_t, value_out_t>(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, descending, clipped_begin_bit, clipped_end_bit);
};
if (clipped_end_bit - clipped_begin_bit == sizeof(key_t_value_t) * 8) {
internal::sort_only_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in,
keys_out, values_in, values_out, n, descending);
} else if (num_bytes == 1) {
transform_and_sort_pairs_f.template operator()<uint8_t>(0);
} else if (num_bytes == 2) {
transform_and_sort_pairs_f.template operator()<uint16_t>(0);
} else if (num_bytes <= 4) {
transform_and_sort_pairs_f.template operator()<uint32_t>(0);
} else // if (num_bytes <= 8)
{
transform_and_sort_pairs_f.template operator()<uint64_t>(0);
}
}
// overload for key_out_t == std::nullptr_t
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
typename ::std::enable_if<::std::is_null_pointer<key_out_t>::value>::type
sort_pairs_impl(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending, int begin_bit, int end_bit) {
// create temporary keys_out to discard, memory footprint could be improved by
// a specialized iterator with a single
// unchanging dummy key_t element
using key_t_value_t = typename std::iterator_traits<key_t>::value_type;
sycl::buffer<key_t_value_t, 1> temp_keys_out{sycl::range<1>(n)};
internal::sort_pairs_impl(std::forward<_ExecutionPolicy>(policy), keys_in,
oneapi::dpl::begin(temp_keys_out), values_in,
values_out, n, descending, begin_bit, end_bit);
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline void segmented_sort_pairs_by_parallel_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_out_t values_in, value_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
using offset_type =
typename ::std::iterator_traits<OffsetIteratorT>::value_type;
::std::vector<offset_type> host_accessible_offset_starts(nsegments);
::std::vector<offset_type> host_accessible_offset_ends(nsegments);
// make offsets accessible on host
::std::copy(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
begin_offsets + nsegments, host_accessible_offset_starts.begin());
::std::copy(::std::forward<_ExecutionPolicy>(policy), end_offsets,
end_offsets + nsegments, host_accessible_offset_ends.begin());
for (::std::uint64_t i = 0; i < nsegments; i++) {
uint64_t segment_begin = host_accessible_offset_starts[i];
uint64_t segment_end =
::std::min(n, (int64_t)host_accessible_offset_ends[i]);
if (segment_begin < segment_end) {
::dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy),
keys_in + segment_begin, keys_out + segment_begin,
values_in + segment_begin, values_out + segment_begin,
segment_end - segment_begin, descending, begin_bit,
end_bit);
}
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline void segmented_sort_keys_by_parallel_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
using offset_type =
typename ::std::iterator_traits<OffsetIteratorT>::value_type;
::std::vector<offset_type> host_accessible_offset_starts(nsegments);
::std::vector<offset_type> host_accessible_offset_ends(nsegments);
// make offsets accessible on host
::std::copy(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
begin_offsets + nsegments, host_accessible_offset_starts.begin());
::std::copy(::std::forward<_ExecutionPolicy>(policy), end_offsets,
end_offsets + nsegments, host_accessible_offset_ends.begin());
for (::std::uint64_t i = 0; i < nsegments; i++) {
uint64_t segment_begin = host_accessible_offset_starts[i];
uint64_t segment_end =
::std::min(n, (int64_t)host_accessible_offset_ends[i]);
if (segment_begin < segment_end) {
::dpct::sort_keys(::std::forward<_ExecutionPolicy>(policy),
keys_in + segment_begin, keys_out + segment_begin,
segment_end - segment_begin, descending, begin_bit,
end_bit);
}
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline void segmented_sort_pairs_by_parallel_for_of_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
uint64_t segment_begin = begin_offsets[i];
uint64_t segment_end = ::std::min(n, (int64_t)end_offsets[i]);
if (segment_begin == segment_end) {
return;
}
::dpct::sort_pairs(::std::execution::seq, keys_in + segment_begin,
keys_out + segment_begin, values_in + segment_begin,
values_out + segment_begin,
segment_end - segment_begin, descending, begin_bit,
end_bit);
});
});
policy.queue().wait();
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline void segmented_sort_keys_by_parallel_for_of_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
uint64_t segment_begin = begin_offsets[i];
uint64_t segment_end = ::std::min(n, (int64_t)end_offsets[i]);
if (segment_begin == segment_end) {
return;
}
::dpct::sort_keys(::std::execution::seq, keys_in + segment_begin,
keys_out + segment_begin, segment_end - segment_begin,
descending, begin_bit, end_bit);
});
});
policy.queue().wait();
}
template <typename _ExecutionPolicy, typename OffsetIteratorT>
inline void
mark_segments(_ExecutionPolicy &&policy, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, int64_t n, int64_t nsegments,
sycl::buffer<::std::size_t, 1> segments) {
::std::size_t work_group_size =
policy.queue()
.get_device()
.template get_info<sycl::info::device::max_work_group_size>();
auto sg_sizes = policy.queue()
.get_device()
.template get_info<sycl::info::device::sub_group_sizes>();
::std::size_t sub_group_size = sg_sizes.empty() ? 0 : sg_sizes.back();
float avg_seg_size = (float)n / (float)nsegments;
if (avg_seg_size > work_group_size) {
// If average segment size is larger than workgroup, use workgroup to
// coordinate to mark segments
policy.queue()
.submit([&](sycl::handler &h) {
auto segments_acc = segments.get_access<sycl::access_mode::write>(h);
h.parallel_for(work_group_size, ([=](sycl::id<1> id) {
for (::std::size_t seg = 0; seg < nsegments; seg++) {
::std::size_t i = begin_offsets[seg];
::std::size_t end = end_offsets[seg];
while (i + id < end) {
segments_acc[i + id] = seg;
i += work_group_size;
}
}
}));
})
.wait();
} else if (sub_group_size > 0 && avg_seg_size > sub_group_size / 2) {
// If average segment size is larger than half a subgroup, use subgroup to
// coordinate to mark segments
policy.queue()
.submit([&](sycl::handler &h) {
auto segments_acc = segments.get_access<sycl::access_mode::write>(h);
h.parallel_for(
sycl::nd_range<1>{work_group_size, work_group_size},
([=](sycl::nd_item<1> item) {
auto sub_group = item.get_sub_group();
::std::size_t num_subgroups =
sub_group.get_group_range().size();
::std::size_t local_size = sub_group.get_local_range().size();
::std::size_t sub_group_id = sub_group.get_group_id();
while (sub_group_id < nsegments) {
::std::size_t subgroup_local_id = sub_group.get_local_id();
::std::size_t i = begin_offsets[sub_group_id];
::std::size_t end = end_offsets[sub_group_id];
while (i + subgroup_local_id < end) {
segments_acc[i + subgroup_local_id] = sub_group_id;
i += local_size;
}
sub_group_id += num_subgroups;
}
}));
})
.wait();
} else {
// If average segment size is small as compared to subgroup, use single
// work item to mark each segment
policy.queue()
.submit([&](sycl::handler &h) {
auto segments_acc = segments.get_access<sycl::access_mode::write>(h);
h.parallel_for(nsegments, ([=](sycl::id<1> seg) {
for (::std::size_t i = begin_offsets[seg];
i < end_offsets[seg]; i++) {
segments_acc[i] = seg;
}
}));
})
.wait();
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline void segmented_sort_keys_by_two_pair_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sycl::buffer<::std::size_t, 1> segments{sycl::range<1>(n)};
sycl::buffer<::std::size_t, 1> segments_sorted{sycl::range<1>(n)};
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
sycl::buffer<key_t_value_t, 1> keys_temp{sycl::range<1>(n)};
mark_segments(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
end_offsets, n, nsegments, segments);
// Part 1: Sort by keys keeping track of which segment were in
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in,
oneapi::dpl::begin(keys_temp), oneapi::dpl::begin(segments),
oneapi::dpl::begin(segments_sorted), n, descending);
// Part 2: Sort the segments with a stable sort to get back sorted segments.
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy),
oneapi::dpl::begin(segments_sorted),
oneapi::dpl::begin(segments), oneapi::dpl::begin(keys_temp),
keys_out, n, false);
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline void segmented_sort_pairs_by_two_pair_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_out_t values_in, value_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sycl::buffer<::std::size_t, 1> segments{sycl::range<1>(n)};
sycl::buffer<::std::size_t, 1> segments_sorted{sycl::range<1>(n)};
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
sycl::buffer<key_t_value_t, 1> keys_temp{sycl::range<1>(n)};
using value_t_value_t = typename ::std::iterator_traits<value_t>::value_type;
sycl::buffer<value_t_value_t, 1> values_temp{sycl::range<1>(n)};
mark_segments(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
end_offsets, n, nsegments, segments);
auto zip_seg_vals =
oneapi::dpl::make_zip_iterator(oneapi::dpl::begin(segments), values_in);
auto zip_seg_vals_out = oneapi::dpl::make_zip_iterator(
oneapi::dpl::begin(segments_sorted), oneapi::dpl::begin(values_temp));
// Part 1: Sort by keys keeping track of which segment were in
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in,
oneapi::dpl::begin(keys_temp), zip_seg_vals,
zip_seg_vals_out, n, descending);
auto zip_keys_vals = oneapi::dpl::make_zip_iterator(
oneapi::dpl::begin(keys_temp), oneapi::dpl::begin(values_temp));
auto zip_keys_vals_out = oneapi::dpl::make_zip_iterator(keys_out, values_out);
// Part 2: Sort the segments with a stable sort to get back sorted segments.
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy),
oneapi::dpl::begin(segments_sorted),
oneapi::dpl::begin(segments), zip_keys_vals,
zip_keys_vals_out, n, false);
}
} // end namespace internal
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value &&
dpct::internal::is_iterator<value_t>::value &&
dpct::internal::is_iterator<value_out_t>::value>
sort_pairs(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending, int begin_bit, int end_bit) {
internal::sort_pairs_impl(std::forward<_ExecutionPolicy>(policy), keys_in,
keys_out, values_in, values_out, n, descending,
begin_bit, end_bit);
}
template <typename _ExecutionPolicy, typename key_t, typename value_t>
inline void sort_pairs(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys,
io_iterator_pair<value_t> &values, int64_t n, bool descending = false,
bool do_swap_iters = false,
int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys.first(),
keys.second(), values.first(), values.second(), n, descending,
begin_bit, end_bit);
if (do_swap_iters) {
keys.swap();
values.swap();
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value>
sort_keys(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
int64_t n, bool descending, int begin_bit, int end_bit) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
int clipped_begin_bit = ::std::max(begin_bit, 0);
int clipped_end_bit =
::std::min((::std::uint64_t)end_bit, sizeof(key_t_value_t) * 8);
int num_bytes = (clipped_end_bit - clipped_begin_bit - 1) / 8 + 1;
auto transform_and_sort_f = [&](auto x) {
using T = typename ::std::decay_t<decltype(x)>;
internal::transform_and_sort<decltype(policy), key_t, key_out_t, T>(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
descending, clipped_begin_bit, clipped_end_bit);
};
if (clipped_end_bit - clipped_begin_bit == sizeof(key_t_value_t) * 8) {
internal::sort_only(::std::forward<_ExecutionPolicy>(policy), keys_in,
keys_out, n, descending);
} else if (num_bytes == 1) {
transform_and_sort_f.template operator()<uint8_t>(0);
} else if (num_bytes == 2) {
transform_and_sort_f.template operator()<uint16_t>(0);
} else if (num_bytes <= 4) {
transform_and_sort_f.template operator()<uint32_t>(0);
} else // if (num_bytes <= 8)
{
transform_and_sort_f.template operator()<uint64_t>(0);
}
}
template <typename _ExecutionPolicy, typename key_t>
inline void sort_keys(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys, int64_t n,
bool descending = false,
bool do_swap_iters = false,
int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sort_keys(std::forward<_ExecutionPolicy>(policy), keys.first(), keys.second(),
n, descending, begin_bit, end_bit);
if (do_swap_iters)
keys.swap();
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value>
segmented_sort_keys(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
int compute_units =
policy.queue()
.get_device()
.template get_info<sycl::info::device::max_compute_units>();
auto sg_sizes = policy.queue()
.get_device()
.template get_info<sycl::info::device::sub_group_sizes>();
int subgroup_size = sg_sizes.empty() ? 1 : sg_sizes.back();
// parallel for of serial sorts when we have sufficient number of segments for
// load balance when number of segments is large as compared to our target
// compute capability
if (nsegments >
compute_units *
(policy.queue().get_device().is_gpu() ? subgroup_size : 1)) {
dpct::internal::segmented_sort_keys_by_parallel_for_of_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit);
} else if (nsegments < 512) // for loop of parallel sorts when we have a small
// number of total sorts to limit total overhead
{
dpct::internal::segmented_sort_keys_by_parallel_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit);
} else // decent catch all using 2 full sorts
{
dpct::internal::segmented_sort_keys_by_two_pair_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit);
}
}
template <typename _ExecutionPolicy, typename key_t, typename OffsetIteratorT>
inline void segmented_sort_keys(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false,
bool do_swap_iters = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
segmented_sort_keys(::std::forward<_ExecutionPolicy>(policy), keys.first(),
keys.second(), n, nsegments, begin_offsets, end_offsets,
descending, begin_bit, end_bit);
if (do_swap_iters) {
keys.swap();
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value &&
dpct::internal::is_iterator<value_t>::value &&
dpct::internal::is_iterator<value_out_t>::value>
segmented_sort_pairs(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
int compute_units =
policy.queue()
.get_device()
.template get_info<sycl::info::device::max_compute_units>();
auto sg_sizes = policy.queue()
.get_device()
.template get_info<sycl::info::device::sub_group_sizes>();
int subgroup_size = sg_sizes.empty() ? 1 : sg_sizes.back();
// parallel for of serial sorts when we have sufficient number of segments for
// load balance when number of segments is large as compared to our target
// compute capability
if (nsegments >
compute_units *
(policy.queue().get_device().is_gpu() ? subgroup_size : 1)) {
dpct::internal::segmented_sort_pairs_by_parallel_for_of_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
} else if (nsegments < 512) // for loop of parallel sorts when we have a small
// number of total sorts to limit total overhead
{
dpct::internal::segmented_sort_pairs_by_parallel_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
} else // decent catch all using 2 full sorts
{
dpct::internal::segmented_sort_pairs_by_two_pair_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
}
}
template <typename _ExecutionPolicy, typename key_t, typename value_t,
typename OffsetIteratorT>
inline void segmented_sort_pairs(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys,
io_iterator_pair<value_t> &values, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, bool do_swap_iters = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
segmented_sort_pairs(std::forward<_ExecutionPolicy>(policy), keys.first(),
keys.second(), values.first(), values.second(), n,
nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
if (do_swap_iters) {
keys.swap();
values.swap();
}
}
template <typename _ExecutionPolicy, typename Iter1, typename Iter2>
inline void reduce_argmax(_ExecutionPolicy &&policy, Iter1 input, Iter2 output,
::std::size_t n) {
dpct::arg_index_input_iterator<decltype(input), int> input_arg_idx(input);
auto ret = ::std::max_element(
::std::forward<_ExecutionPolicy>(policy), input_arg_idx,
input_arg_idx + n,
[](const auto &a, const auto &b) { return (a.value < b.value); });
::std::copy(::std::forward<_ExecutionPolicy>(policy), ret, ret + 1, output);
}
template <typename _ExecutionPolicy, typename Iter1, typename Iter2>
inline void reduce_argmin(_ExecutionPolicy &&policy, Iter1 input, Iter2 output,
::std::size_t n) {
dpct::arg_index_input_iterator<decltype(input), int> input_arg_idx(input);
auto ret = ::std::min_element(
::std::forward<_ExecutionPolicy>(policy), input_arg_idx,
input_arg_idx + n,
[](const auto &a, const auto &b) { return (a.value < b.value); });
::std::copy(::std::forward<_ExecutionPolicy>(policy), ret, ret + 1, output);
}
template <typename _ExecutionPolicy, typename Iter1,
typename ValueLessComparable, typename StrictWeakOrdering>
inline ::std::pair<Iter1, Iter1>
equal_range(_ExecutionPolicy &&policy, Iter1 start, Iter1 end,
const ValueLessComparable &value, StrictWeakOrdering comp) {
::std::vector<::std::int64_t> res_lower(1);
::std::vector<::std::int64_t> res_upper(1);
::std::vector<ValueLessComparable> value_vec(1, value);
::oneapi::dpl::lower_bound(policy, start, end, value_vec.begin(),
value_vec.end(), res_lower.begin(), comp);
::oneapi::dpl::upper_bound(::std::forward<_ExecutionPolicy>(policy), start,
end, value_vec.begin(), value_vec.end(),
res_upper.begin(), comp);
auto result = ::std::make_pair(start + res_lower[0], start + res_upper[0]);
return result;
}
template <typename _ExecutionPolicy, typename Iter1,
typename ValueLessComparable>
inline ::std::pair<Iter1, Iter1> equal_range(_ExecutionPolicy &&policy,
Iter1 start, Iter1 end,
const ValueLessComparable &value) {
return equal_range(::std::forward<_ExecutionPolicy>(policy), start, end,
value, internal::__less());
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3>
inline ::std::enable_if_t<
dpct::internal::is_iterator<Iter1>::value &&
dpct::internal::is_iterator<Iter2>::value &&
internal::is_hetero_execution_policy<::std::decay_t<Policy>>::value>
segmented_reduce_argmin(Policy &&policy, Iter1 keys_in, Iter2 keys_out,
::std::int64_t nsegments, Iter3 begin_offsets,
Iter3 end_offsets) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
if (end_offsets[i] <= begin_offsets[i]) {
keys_out[i] = dpct::key_value_pair(
1, ::std::numeric_limits<
typename ::std::iterator_traits<Iter1>::value_type>::max());
} else {
dpct::arg_index_input_iterator<Iter1, int> arg_index(keys_in +
begin_offsets[i]);
keys_out[i] = *::std::min_element(
arg_index, arg_index + (end_offsets[i] - begin_offsets[i]),
[](const auto &a, const auto &b) { return a.value < b.value; });
}
});
});
policy.queue().wait();
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3>
inline ::std::enable_if_t<
dpct::internal::is_iterator<Iter1>::value &&
dpct::internal::is_iterator<Iter2>::value &&
internal::is_hetero_execution_policy<::std::decay_t<Policy>>::value>
segmented_reduce_argmax(Policy &&policy, Iter1 keys_in, Iter2 keys_out,
::std::int64_t nsegments, Iter3 begin_offsets,
Iter3 end_offsets) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
if (end_offsets[i] <= begin_offsets[i]) {
keys_out[i] = dpct::key_value_pair(
1,
::std::numeric_limits<
typename ::std::iterator_traits<Iter1>::value_type>::lowest());
} else {
dpct::arg_index_input_iterator<Iter1, int> arg_index(keys_in +
begin_offsets[i]);
keys_out[i] = *::std::max_element(
arg_index, arg_index + (end_offsets[i] - begin_offsets[i]),
[](const auto &a, const auto &b) { return a.value < b.value; });
}
});
});
policy.queue().wait();
}
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/memory.h | //==---- memory.h ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MEMORY_H__
#define __DPCT_MEMORY_H__
#include <sycl/sycl.hpp>
// Memory management section:
// device_pointer, device_reference, swap, device_iterator, malloc_device,
// device_new, free_device, device_delete
namespace dpct {
namespace detail {
template <typename T>
struct make_allocatable
{
using type = T;
};
template <>
struct make_allocatable<void>
{
using type = dpct::byte_t;
};
#if defined(__LIBSYCL_MAJOR_VERSION) && defined(__LIBSYCL_MINOR_VERSION) && \
defined(__LIBSYCL_PATCH_VERSION)
#define _DPCT_LIBSYCL_VERSION \
(__LIBSYCL_MAJOR_VERSION * 10000 + __LIBSYCL_MINOR_VERSION * 100 + \
__LIBSYCL_PATCH_VERSION)
#else
#define _DPCT_LIBSYCL_VERSION 0
#endif
template <typename _DataT>
using __buffer_allocator =
#if _DPCT_LIBSYCL_VERSION >= 60000
sycl::buffer_allocator<typename make_allocatable<_DataT>::type>;
#else
sycl::buffer_allocator;
#endif
} // namespace detail
#ifdef DPCT_USM_LEVEL_NONE
template <typename T, sycl::access_mode Mode = sycl::access_mode::read_write,
typename Allocator = detail::__buffer_allocator<T>>
class device_pointer;
#else
template <typename T> class device_pointer;
#endif
template <typename T> struct device_reference {
using pointer = device_pointer<T>;
using value_type = T;
template <typename OtherT>
device_reference(const device_reference<OtherT> &input)
: value(input.value) {}
device_reference(const pointer &input) : value((*input).value) {}
device_reference(value_type &input) : value(input) {}
template <typename OtherT>
device_reference &operator=(const device_reference<OtherT> &input) {
value = input;
return *this;
};
device_reference &operator=(const device_reference &input) {
T val = input.value;
value = val;
return *this;
};
device_reference &operator=(const value_type &x) {
value = x;
return *this;
};
pointer operator&() const { return pointer(&value); };
operator value_type() const { return T(value); }
device_reference &operator++() {
++value;
return *this;
};
device_reference &operator--() {
--value;
return *this;
};
device_reference operator++(int) {
device_reference ref(*this);
++(*this);
return ref;
};
device_reference operator--(int) {
device_reference ref(*this);
--(*this);
return ref;
};
device_reference &operator+=(const T &input) {
value += input;
return *this;
};
device_reference &operator-=(const T &input) {
value -= input;
return *this;
};
device_reference &operator*=(const T &input) {
value *= input;
return *this;
};
device_reference &operator/=(const T &input) {
value /= input;
return *this;
};
device_reference &operator%=(const T &input) {
value %= input;
return *this;
};
device_reference &operator&=(const T &input) {
value &= input;
return *this;
};
device_reference &operator|=(const T &input) {
value |= input;
return *this;
};
device_reference &operator^=(const T &input) {
value ^= input;
return *this;
};
device_reference &operator<<=(const T &input) {
value <<= input;
return *this;
};
device_reference &operator>>=(const T &input) {
value >>= input;
return *this;
};
void swap(device_reference &input) {
T tmp = (*this);
*this = (input);
input = (tmp);
}
T &value;
};
template <typename T>
void swap(device_reference<T> &x, device_reference<T> &y) {
x.swap(y);
}
template <typename T> void swap(T &x, T &y) {
T tmp = x;
x = y;
y = tmp;
}
namespace internal {
// struct for checking if iterator is heterogeneous or not
template <typename Iter,
typename Void = void> // for non-heterogeneous iterators
struct is_hetero_iterator : std::false_type {};
template <typename Iter> // for heterogeneous iterators
struct is_hetero_iterator<
Iter, typename std::enable_if<Iter::is_hetero::value, void>::type>
: std::true_type {};
} // namespace internal
#ifdef DPCT_USM_LEVEL_NONE
template <typename T, sycl::access_mode Mode, typename Allocator>
class device_iterator;
template <typename ValueType, typename Allocator, typename Derived>
class device_pointer_base {
protected:
sycl::buffer<ValueType, 1, Allocator> buffer;
std::size_t idx;
public:
using pointer = ValueType *;
using difference_type = std::make_signed<std::size_t>::type;
device_pointer_base(sycl::buffer<ValueType, 1> in, std::size_t i = 0)
: buffer(in), idx(i) {}
#ifdef __USE_DPCT
template <typename OtherT>
device_pointer_base(OtherT *ptr)
: buffer(
dpct::detail::mem_mgr::instance()
.translate_ptr(ptr)
.buffer.template reinterpret<ValueType, 1>(sycl::range<1>(
dpct::detail::mem_mgr::instance().translate_ptr(ptr).size /
sizeof(ValueType)))),
idx(ptr - (ValueType*)dpct::detail::mem_mgr::instance()
.translate_ptr(ptr).alloc_ptr) {}
#endif
device_pointer_base(const std::size_t count)
: buffer(sycl::range<1>(count / sizeof(ValueType))), idx() {}
// buffer has no default ctor we pass zero-range to create an empty buffer
device_pointer_base() : buffer(sycl::range<1>(0)) {}
device_pointer_base(const device_pointer_base &in)
: buffer(in.buffer), idx(in.idx) {}
pointer get() const {
auto res =
(const_cast<device_pointer_base *>(this)
->buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
return res + idx;
}
operator ValueType *() {
auto res = (buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
return res + idx;
}
operator ValueType *() const {
auto res =
(const_cast<device_pointer_base *>(this)
->buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
return res + idx;
}
Derived operator+(difference_type forward) const {
return Derived{buffer, idx + forward};
}
Derived operator-(difference_type backward) const {
return Derived{buffer, idx - backward};
}
Derived operator++(int) {
Derived p(buffer, idx);
idx += 1;
return p;
}
Derived operator--(int) {
Derived p(buffer, idx);
idx -= 1;
return p;
}
difference_type operator-(const Derived &it) const { return idx - it.idx; }
template <typename OtherIterator>
typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value,
difference_type>::type
operator-(const OtherIterator &it) const {
return idx - std::distance(oneapi::dpl::begin(buffer), it);
}
std::size_t get_idx() const { return idx; } // required
sycl::buffer<ValueType, 1, Allocator> get_buffer() {
return buffer;
} // required
};
template <typename T, sycl::access_mode Mode, typename Allocator>
class device_pointer
: public device_pointer_base<T, Allocator,
device_pointer<T, Mode, Allocator>> {
private:
using base_type = device_pointer_base<T, Allocator, device_pointer>;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = T *;
using reference = T &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::true_type; // required
using is_passed_directly = std::false_type;
static constexpr sycl::access_mode mode = Mode; // required
device_pointer(sycl::buffer<T, 1> in, std::size_t i = 0) : base_type(in, i) {}
#ifdef __USE_DPCT
template <typename OtherT> device_pointer(OtherT *ptr) : base_type(ptr) {}
#endif
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
device_pointer(const device_pointer &in) : base_type(in) {}
device_pointer &operator+=(difference_type forward) {
this->idx += forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->idx -= backward;
return *this;
}
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
this->idx += 1;
return *this;
}
device_pointer &operator--() {
this->idx -= 1;
return *this;
}
};
template <sycl::access_mode Mode, typename Allocator>
class device_pointer<void, Mode, Allocator>
: public device_pointer_base<dpct::byte_t, Allocator,
device_pointer<void, Mode, Allocator>> {
private:
using base_type =
device_pointer_base<dpct::byte_t, Allocator, device_pointer>;
public:
using value_type = dpct::byte_t;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = void *;
using reference = value_type &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::true_type; // required
using is_passed_directly = std::false_type;
static constexpr sycl::access_mode mode = Mode; // required
device_pointer(sycl::buffer<value_type, 1> in, std::size_t i = 0)
: base_type(in, i) {}
#ifdef __USE_DPCT
template <typename OtherT> device_pointer(OtherT *ptr) : base_type(ptr) {}
#endif
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
device_pointer(const device_pointer &in) : base_type(in) {}
device_pointer &operator+=(difference_type forward) {
this->idx += forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->idx -= backward;
return *this;
}
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
this->idx += 1;
return *this;
}
device_pointer &operator--() {
this->idx -= 1;
return *this;
}
};
#else
template <typename T> class device_iterator;
template <typename ValueType, typename Derived> class device_pointer_base {
protected:
ValueType *ptr;
public:
using pointer = ValueType *;
using difference_type = std::make_signed<std::size_t>::type;
device_pointer_base(ValueType *p) : ptr(p) {}
device_pointer_base(const std::size_t count) {
sycl::queue default_queue = dpct::get_default_queue();
ptr = static_cast<ValueType *>(sycl::malloc_shared(
count, default_queue.get_device(), default_queue.get_context()));
}
device_pointer_base() {}
pointer get() const { return ptr; }
operator ValueType *() { return ptr; }
operator ValueType *() const { return ptr; }
ValueType &operator[](difference_type idx) { return ptr[idx]; }
ValueType &operator[](difference_type idx) const { return ptr[idx]; }
Derived operator+(difference_type forward) const {
return Derived{ptr + forward};
}
Derived operator-(difference_type backward) const {
return Derived{ptr - backward};
}
Derived operator++(int) {
Derived p(ptr);
++ptr;
return p;
}
Derived operator--(int) {
Derived p(ptr);
--ptr;
return p;
}
difference_type operator-(const Derived &it) const { return ptr - it.ptr; }
};
template <typename T>
class device_pointer : public device_pointer_base<T, device_pointer<T>> {
private:
using base_type = device_pointer_base<T, device_pointer<T>>;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = T *;
using reference = T &;
using const_reference = const T &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::false_type; // required
using is_passed_directly = std::true_type; // required
device_pointer(T *p) : base_type(p) {}
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
device_pointer &operator=(const device_iterator<T> &in) {
this->ptr = static_cast<device_pointer<T>>(in).ptr;
return *this;
}
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
++(this->ptr);
return *this;
}
device_pointer &operator--() {
--(this->ptr);
return *this;
}
device_pointer &operator+=(difference_type forward) {
this->ptr = this->ptr + forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->ptr = this->ptr - backward;
return *this;
}
};
template <>
class device_pointer<void>
: public device_pointer_base<dpct::byte_t, device_pointer<void>> {
private:
using base_type = device_pointer_base<dpct::byte_t, device_pointer<void>>;
public:
using value_type = dpct::byte_t;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = void *;
using reference = value_type &;
using const_reference = const value_type &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::false_type; // required
using is_passed_directly = std::true_type; // required
device_pointer(void *p) : base_type(static_cast<value_type *>(p)) {}
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
pointer get() const { return static_cast<pointer>(this->ptr); }
operator void *() { return this->ptr; }
operator void *() const { return this->ptr; }
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
++(this->ptr);
return *this;
}
device_pointer &operator--() {
--(this->ptr);
return *this;
}
device_pointer &operator+=(difference_type forward) {
this->ptr = this->ptr + forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->ptr = this->ptr - backward;
return *this;
}
};
#endif
#ifdef DPCT_USM_LEVEL_NONE
template <typename T, sycl::access_mode Mode = sycl::access_mode::read_write,
typename Allocator = detail::__buffer_allocator<T>>
class device_iterator : public device_pointer<T, Mode, Allocator> {
using Base = device_pointer<T, Mode, Allocator>;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = T *;
using reference = T &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::true_type; // required
using is_passed_directly = std::false_type; // required
static constexpr sycl::access_mode mode = Mode; // required
device_iterator() : Base() {}
device_iterator(sycl::buffer<T, 1, Allocator> vec, std::size_t index)
: Base(vec, index) {}
template <sycl::access_mode inMode>
device_iterator(const device_iterator<T, inMode, Allocator> &in)
: Base(in.buffer, in.idx) {} // required for iter_mode
device_iterator &operator=(const device_iterator &in) {
Base::buffer = in.buffer;
Base::idx = in.idx;
return *this;
}
reference operator*() const {
return const_cast<device_iterator *>(this)
->buffer.template get_access<mode>()[Base::idx];
}
reference operator[](difference_type i) const { return *(*this + i); }
device_iterator &operator++() {
++Base::idx;
return *this;
}
device_iterator &operator--() {
--Base::idx;
return *this;
}
device_iterator operator++(int) {
device_iterator it(*this);
++(*this);
return it;
}
device_iterator operator--(int) {
device_iterator it(*this);
--(*this);
return it;
}
device_iterator operator+(difference_type forward) const {
const auto new_idx = Base::idx + forward;
return {Base::buffer, new_idx};
}
device_iterator &operator+=(difference_type forward) {
Base::idx += forward;
return *this;
}
device_iterator operator-(difference_type backward) const {
return {Base::buffer, Base::idx - backward};
}
device_iterator &operator-=(difference_type backward) {
Base::idx -= backward;
return *this;
}
friend device_iterator operator+(difference_type forward,
const device_iterator &it) {
return it + forward;
}
difference_type operator-(const device_iterator &it) const {
return Base::idx - it.idx;
}
template <typename OtherIterator>
typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value,
difference_type>::type
operator-(const OtherIterator &it) const {
return Base::idx - std::distance(oneapi::dpl::begin(Base::buffer), it);
}
bool operator==(const device_iterator &it) const { return *this - it == 0; }
bool operator!=(const device_iterator &it) const { return !(*this == it); }
bool operator<(const device_iterator &it) const { return *this - it < 0; }
bool operator>(const device_iterator &it) const { return it < *this; }
bool operator<=(const device_iterator &it) const { return !(*this > it); }
bool operator>=(const device_iterator &it) const { return !(*this < it); }
std::size_t get_idx() const { return Base::idx; } // required
sycl::buffer<T, 1, Allocator> get_buffer() {
return Base::buffer;
} // required
};
#else
template <typename T> class device_iterator : public device_pointer<T> {
using Base = device_pointer<T>;
protected:
std::size_t idx;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = typename Base::pointer;
using reference = typename Base::reference;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::false_type; // required
using is_passed_directly = std::true_type; // required
static constexpr sycl::access_mode mode =
sycl::access_mode::read_write; // required
device_iterator() : Base(nullptr), idx(0) {}
device_iterator(T *vec, std::size_t index) : Base(vec), idx(index) {}
template <sycl::access_mode inMode>
device_iterator(const device_iterator<T> &in)
: Base(in.ptr), idx(in.idx) {} // required for iter_mode
device_iterator &operator=(const device_iterator &in) {
Base::operator=(in);
idx = in.idx;
return *this;
}
reference operator*() const { return *(Base::ptr + idx); }
reference operator[](difference_type i) { return Base::ptr[idx + i]; }
reference operator[](difference_type i) const { return Base::ptr[idx + i]; }
device_iterator &operator++() {
++idx;
return *this;
}
device_iterator &operator--() {
--idx;
return *this;
}
device_iterator operator++(int) {
device_iterator it(*this);
++(*this);
return it;
}
device_iterator operator--(int) {
device_iterator it(*this);
--(*this);
return it;
}
device_iterator operator+(difference_type forward) const {
const auto new_idx = idx + forward;
return {Base::ptr, new_idx};
}
device_iterator &operator+=(difference_type forward) {
idx += forward;
return *this;
}
device_iterator operator-(difference_type backward) const {
return {Base::ptr, idx - backward};
}
device_iterator &operator-=(difference_type backward) {
idx -= backward;
return *this;
}
friend device_iterator operator+(difference_type forward,
const device_iterator &it) {
return it + forward;
}
difference_type operator-(const device_iterator &it) const {
return idx - it.idx;
}
template <typename OtherIterator>
typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value,
difference_type>::type
operator-(const OtherIterator &it) const {
return idx - it.get_idx();
}
bool operator==(const device_iterator &it) const { return *this - it == 0; }
bool operator!=(const device_iterator &it) const { return !(*this == it); }
bool operator<(const device_iterator &it) const { return *this - it < 0; }
bool operator>(const device_iterator &it) const { return it < *this; }
bool operator<=(const device_iterator &it) const { return !(*this > it); }
bool operator>=(const device_iterator &it) const { return !(*this < it); }
std::size_t get_idx() const { return idx; } // required
device_iterator &get_buffer() { return *this; } // required
std::size_t size() const { return idx; }
};
#endif
template <typename T>
device_pointer<T> malloc_device(const std::size_t num_elements) {
return device_pointer<T>(num_elements * sizeof(T));
}
static inline device_pointer<void> malloc_device(const std::size_t num_bytes) {
return device_pointer<void>(num_bytes);
}
template <typename T>
device_pointer<T> device_new(device_pointer<T> p, const T &value,
const std::size_t count = 1) {
std::vector<T> result(count, value);
p.buffer = sycl::buffer<T, 1>(result.begin(), result.end());
return p + count;
}
template <typename T>
device_pointer<T> device_new(device_pointer<T> p, const std::size_t count = 1) {
return device_new(p, T{}, count);
}
template <typename T>
device_pointer<T> device_new(const std::size_t count = 1) {
return device_pointer<T>(count);
}
template <typename T> void free_device(device_pointer<T> ptr) {}
template <typename T>
typename std::enable_if<!std::is_trivially_destructible<T>::value, void>::type
device_delete(device_pointer<T> p, const std::size_t count = 1) {
for (std::size_t i = 0; i < count; ++i) {
p[i].~T();
}
}
template <typename T>
typename std::enable_if<std::is_trivially_destructible<T>::value, void>::type
device_delete(device_pointer<T>, const std::size_t count = 1) {}
template <typename T> device_pointer<T> get_device_pointer(T *ptr) {
return device_pointer<T>(ptr);
}
template <typename T>
device_pointer<T> get_device_pointer(const device_pointer<T> &ptr) {
return device_pointer<T>(ptr);
}
template <typename T> T *get_raw_pointer(const device_pointer<T> &ptr) {
return ptr.get();
}
template <typename Pointer> Pointer get_raw_pointer(const Pointer &ptr) {
return ptr;
}
template <typename T> const T &get_raw_reference(const device_reference<T> &ref) {
return ref.value;
}
template <typename T> T &get_raw_reference(device_reference<T> &ref) {
return ref.value;
}
template <typename T> const T &get_raw_reference(const T &ref) {
return ref;
}
template <typename T> T &get_raw_reference(T &ref) {
return ref;
}
} // namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/vector.h | //==---- vector.h ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_VECTOR_H__
#define __DPCT_VECTOR_H__
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <sycl/sycl.hpp>
#include "memory.h"
#include <algorithm>
#include <iterator>
#include <vector>
#include "../device.hpp"
namespace dpct {
namespace internal {
template <typename Iter, typename Void = void> // for non-iterators
struct is_iterator : std::false_type {};
template <typename Iter> // For iterators
struct is_iterator<
Iter,
typename std::enable_if<
!std::is_void<typename Iter::iterator_category>::value, void>::type>
: std::true_type {};
template <typename T> // For pointers
struct is_iterator<T *> : std::true_type {};
} // end namespace internal
#ifndef DPCT_USM_LEVEL_NONE
template <typename T,
typename Allocator = sycl::usm_allocator<T, sycl::usm::alloc::shared>>
class device_vector {
public:
using iterator = device_iterator<T>;
using const_iterator = const iterator;
using reference = device_reference<T>;
using const_reference = const reference;
using value_type = T;
using pointer = T *;
using const_pointer = const T *;
using difference_type =
typename ::std::iterator_traits<iterator>::difference_type;
using size_type = ::std::size_t;
private:
Allocator _alloc;
size_type _size;
size_type _capacity;
pointer _storage;
size_type _min_capacity() const { return size_type(1); }
void _set_capacity_and_alloc() {
_capacity = ::std::max(_size * 2, _min_capacity());
_storage = _alloc.allocate(_capacity);
}
public:
template <typename OtherA> operator ::std::vector<T, OtherA>() const {
auto __tmp = ::std::vector<T, OtherA>(this->size());
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
this->begin(), this->end(), __tmp.begin());
return __tmp;
}
device_vector()
: _alloc(get_default_queue()), _size(0), _capacity(_min_capacity()) {
_set_capacity_and_alloc();
}
~device_vector() /*= default*/ { _alloc.deallocate(_storage, _capacity); };
explicit device_vector(size_type n) : device_vector(n, T()) {}
explicit device_vector(size_type n, const T &value)
: _alloc(get_default_queue()), _size(n) {
_set_capacity_and_alloc();
if (_size > 0) {
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), end(), T(value));
}
}
device_vector(const device_vector &other) : _alloc(get_default_queue()) {
_size = other.size();
_capacity = other.capacity();
_storage = _alloc.allocate(_capacity);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
other.begin(), other.end(), begin());
}
device_vector(device_vector &&other)
: _alloc(get_default_queue()), _size(other.size()),
_capacity(other.capacity()), _storage(other._storage) {
other._size = 0;
other._capacity = 0;
other._storage = nullptr;
}
template <typename InputIterator>
device_vector(InputIterator first,
typename ::std::enable_if<
internal::is_iterator<InputIterator>::value &&
!::std::is_pointer<InputIterator>::value &&
::std::is_same<typename ::std::iterator_traits<
InputIterator>::iterator_category,
::std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _alloc(get_default_queue()) {
_size = ::std::distance(first, last);
_set_capacity_and_alloc();
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, begin());
}
}
template <typename InputIterator>
device_vector(InputIterator first,
typename ::std::enable_if<::std::is_pointer<InputIterator>::value,
InputIterator>::type last)
: _alloc(get_default_queue()) {
_size = ::std::distance(first, last);
_set_capacity_and_alloc();
if (_size > 0) {
auto ptr_type = sycl::get_pointer_type(first, get_default_context());
if (ptr_type != sycl::usm::alloc::host &&
ptr_type != sycl::usm::alloc::unknown) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, begin());
} else {
sycl::buffer<T, 1> buf(first, last);
auto buf_first = oneapi::dpl::begin(buf);
auto buf_last = oneapi::dpl::end(buf);
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
buf_first, buf_last, begin());
}
}
}
template <typename InputIterator>
device_vector(InputIterator first,
typename ::std::enable_if<
internal::is_iterator<InputIterator>::value &&
!::std::is_pointer<InputIterator>::value &&
!::std::is_same<typename ::std::iterator_traits<
InputIterator>::iterator_category,
::std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _alloc(get_default_queue()), _size(::std::distance(first, last)) {
_set_capacity_and_alloc();
::std::vector<T> _tmp(first, last);
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
_tmp.begin(), _tmp.end(), this->begin());
}
}
template <typename OtherAllocator>
device_vector(const device_vector<T, OtherAllocator> &v)
: _alloc(get_default_queue()), _storage(v.real_begin()), _size(v.size()),
_capacity(v.capacity()) {}
template <typename OtherAllocator>
device_vector(::std::vector<T, OtherAllocator> &v)
: _alloc(get_default_queue()), _size(v.size()) {
_set_capacity_and_alloc();
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
v.begin(), v.end(), this->begin());
}
}
template <typename OtherAllocator>
device_vector &operator=(const ::std::vector<T, OtherAllocator> &v) {
resize(v.size());
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
v.begin(), v.end(), begin());
}
return *this;
}
device_vector &operator=(const device_vector &other) {
// Copy assignment operator:
resize(other.size());
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
other.begin(), other.end(), begin());
}
return *this;
}
device_vector &operator=(device_vector &&other) {
// Move assignment operator:
device_vector dummy(::std::move(other));
this->swap(dummy);
return *this;
}
size_type size() const { return _size; }
iterator begin() noexcept { return device_iterator<T>(_storage, 0); }
iterator end() { return device_iterator<T>(_storage, size()); }
const_iterator begin() const noexcept {
return device_iterator<T>(_storage, 0);
}
const_iterator cbegin() const noexcept { return begin(); }
const_iterator end() const { return device_iterator<T>(_storage, size()); }
const_iterator cend() const { return end(); }
T *real_begin() { return _storage; }
const T *real_begin() const { return _storage; }
void swap(device_vector &v) {
::std::swap(_size, v._size);
::std::swap(_capacity, v._capacity);
::std::swap(_storage, v._storage);
::std::swap(_alloc, v._alloc);
}
reference operator[](size_type n) { return _storage[n]; }
const_reference operator[](size_type n) const { return _storage[n]; }
void reserve(size_type n) {
if (n > capacity()) {
// allocate buffer for new size
auto tmp = _alloc.allocate(2 * n);
// copy content (old buffer to new buffer)
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), end(), tmp);
// deallocate old memory
_alloc.deallocate(_storage, _capacity);
_storage = tmp;
_capacity = 2 * n;
}
}
void resize(size_type new_size, const T &x = T()) {
reserve(new_size);
if (_size < new_size) {
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin() + _size, begin() + new_size, x);
}
_size = new_size;
}
size_type max_size(void) const {
return ::std::numeric_limits<size_type>::max() / sizeof(T);
}
size_type capacity() const { return _capacity; }
const_reference front() const { return *begin(); }
reference front() { return *begin(); }
const_reference back(void) const { return *(end() - 1); }
reference back(void) { return *(end() - 1); }
pointer data(void) { return _storage; }
const_pointer data(void) const { return _storage; }
void shrink_to_fit(void) {
if (_size != capacity()) {
size_type tmp_capacity = ::std::max(_size, _min_capacity());
auto tmp = _alloc.allocate(tmp_capacity);
if (_size > 0) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), end(), tmp);
}
_alloc.deallocate(_storage, _capacity);
_storage = tmp;
_capacity = tmp_capacity;
}
}
void assign(size_type n, const T &x) {
resize(n);
if (_size > 0) {
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), begin() + n, x);
}
}
template <typename InputIterator>
void
assign(InputIterator first,
typename ::std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = ::std::distance(first, last);
resize(n);
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, begin());
}
}
void clear(void) { _size = 0; }
bool empty(void) const { return (size() == 0); }
void push_back(const T &x) { insert(end(), size_type(1), x); }
void pop_back(void) {
if (_size > 0)
--_size;
}
iterator erase(iterator first, iterator last) {
auto n = ::std::distance(first, last);
if (last == end()) {
_size = _size - n;
return end();
}
auto m = ::std::distance(last, end());
if (m <= 0) {
return end();
}
auto tmp = _alloc.allocate(m);
// copy remainder to temporary buffer.
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
last, end(), tmp);
// override (erase) subsequence in storage.
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
tmp, tmp + m, first);
_alloc.deallocate(tmp, m);
_size -= n;
return begin() + first.get_idx() + n;
}
iterator erase(iterator pos) { return erase(pos, pos + 1); }
iterator insert(iterator position, const T &x) {
auto n = ::std::distance(begin(), position);
insert(position, size_type(1), x);
return begin() + n;
}
void insert(iterator position, size_type n, const T &x) {
if (position == end()) {
resize(size() + n);
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
end() - n, end(), x);
} else {
auto i_n = ::std::distance(begin(), position);
// allocate temporary storage
auto m = ::std::distance(position, end());
// will throw if position is not inside active vector
auto tmp = _alloc.allocate(m);
// copy remainder
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
position, end(), tmp);
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
position, position + n, x);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
tmp, tmp + m, position + n);
_alloc.deallocate(tmp, m);
}
}
template <typename InputIterator>
void
insert(iterator position, InputIterator first,
typename ::std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = ::std::distance(first, last);
if (position == end()) {
resize(size() + n);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, end());
} else {
auto m = ::std::distance(position, end());
// will throw if position is not inside active vector
auto tmp = _alloc.allocate(m);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
position, end(), tmp);
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, position);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
tmp, tmp + m, position + n);
_alloc.deallocate(tmp, m);
}
}
Allocator get_allocator() const { return _alloc; }
};
#else
template <typename T, typename Allocator = detail::__buffer_allocator<T>>
class device_vector {
static_assert(
std::is_same<Allocator, detail::__buffer_allocator<T>>::value,
"device_vector doesn't support custom allocator when USM is not used.");
public:
using iterator = device_iterator<T>;
using const_iterator = const iterator;
using reference = device_reference<T>;
using const_reference = const reference;
using value_type = T;
using pointer = T *;
using const_pointer = const T *;
using difference_type =
typename std::iterator_traits<iterator>::difference_type;
using size_type = std::size_t;
private:
using Buffer = sycl::buffer<T, 1>;
using Range = sycl::range<1>;
// Using mem_mgr to handle memory allocation
void *_storage;
size_type _size;
size_type _min_capacity() const { return size_type(1); }
void *alloc_store(size_type num_bytes) {
return detail::mem_mgr::instance().mem_alloc(num_bytes);
}
public:
template <typename OtherA> operator std::vector<T, OtherA>() const {
auto __tmp = std::vector<T, OtherA>(this->size());
std::copy(oneapi::dpl::execution::dpcpp_default, this->begin(), this->end(),
__tmp.begin());
return __tmp;
}
device_vector()
: _storage(alloc_store(_min_capacity() * sizeof(T))), _size(0) {}
~device_vector() = default;
explicit device_vector(size_type n) : device_vector(n, T()) {}
explicit device_vector(size_type n, const T &value)
: _storage(alloc_store(std::max(n, _min_capacity()) * sizeof(T))),
_size(n) {
auto buf = get_buffer();
std::fill(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(buf),
oneapi::dpl::begin(buf) + n, T(value));
}
device_vector(const device_vector &other)
: _storage(other._storage), _size(other.size()) {}
device_vector(device_vector &&other)
: _storage(std::move(other._storage)), _size(other.size()) {}
template <typename InputIterator>
device_vector(InputIterator first,
typename std::enable_if<
internal::is_iterator<InputIterator>::value &&
!std::is_pointer<InputIterator>::value &&
std::is_same<typename std::iterator_traits<
InputIterator>::iterator_category,
std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _storage(alloc_store(std::distance(first, last) * sizeof(T))),
_size(std::distance(first, last)) {
auto buf = get_buffer();
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, dst);
}
template <typename InputIterator>
device_vector(InputIterator first,
typename std::enable_if<std::is_pointer<InputIterator>::value,
InputIterator>::type last)
: _storage(alloc_store(std::distance(first, last) * sizeof(T))),
_size(std::distance(first, last)) {
auto buf = get_buffer();
Buffer tmp_buf(first, last);
auto start = oneapi::dpl::begin(tmp_buf);
auto end = oneapi::dpl::end(tmp_buf);
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
start, end, dst);
}
template <typename InputIterator>
device_vector(InputIterator first,
typename std::enable_if<
internal::is_iterator<InputIterator>::value &&
!std::is_same<typename std::iterator_traits<
InputIterator>::iterator_category,
std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _storage(alloc_store(std::distance(first, last) * sizeof(T))),
_size(std::distance(first, last)) {
auto buf = get_buffer();
std::vector<T> tmp(first, last);
Buffer tmp_buf(tmp);
auto start = oneapi::dpl::begin(tmp_buf);
auto end = oneapi::dpl::end(tmp_buf);
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
start, end, dst);
}
template <typename OtherAllocator>
device_vector(const device_vector<T, OtherAllocator> &v)
: _storage(alloc_store(v.size() * sizeof(T))), _size(v.size()) {
auto buf = get_buffer();
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
v.real_begin(), v.real_begin() + v.size(), dst);
}
template <typename OtherAllocator>
device_vector(std::vector<T, OtherAllocator> &v)
: _storage(alloc_store(v.size() * sizeof(T))), _size(v.size()) {
std::copy(oneapi::dpl::execution::dpcpp_default, v.begin(), v.end(),
oneapi::dpl::begin(get_buffer()));
}
device_vector &operator=(const device_vector &other) {
// Copy assignment operator:
_size = other.size();
void *tmp = alloc_store(_size * sizeof(T));
auto tmp_buf =
detail::mem_mgr::instance()
.translate_ptr(tmp)
.buffer.template reinterpret<T, 1>(sycl::range<1>(_size));
std::copy(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(other.get_buffer()),
oneapi::dpl::end(other.get_buffer()),
oneapi::dpl::begin(tmp_buf));
detail::mem_mgr::instance().mem_free(_storage);
_storage = tmp;
return *this;
}
device_vector &operator=(device_vector &&other) {
// Move assignment operator:
_size = other.size();
this->_storage = std::move(other._storage);
return *this;
}
template <typename OtherAllocator>
device_vector &operator=(const std::vector<T, OtherAllocator> &v) {
Buffer data(v.begin(), v.end());
_size = v.size();
void *tmp = alloc_store(_size * sizeof(T));
auto tmp_buf =
detail::mem_mgr::instance()
.translate_ptr(tmp)
.buffer.template reinterpret<T, 1>(sycl::range<1>(_size));
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(data),
oneapi::dpl::end(data), oneapi::dpl::begin(tmp_buf));
detail::mem_mgr::instance().mem_free(_storage);
_storage = tmp;
return *this;
}
Buffer get_buffer() const {
return detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.template reinterpret<T, 1>(sycl::range<1>(capacity()));
}
size_type size() const { return _size; }
iterator begin() noexcept { return device_iterator<T>(get_buffer(), 0); }
iterator end() { return device_iterator<T>(get_buffer(), _size); }
const_iterator begin() const noexcept {
return device_iterator<T>(get_buffer(), 0);
}
const_iterator cbegin() const noexcept { return begin(); }
const_iterator end() const { return device_iterator<T>(get_buffer(), _size); }
const_iterator cend() const { return end(); }
T *real_begin() {
return (detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
}
const T *real_begin() const {
return const_cast<device_vector *>(this)
->detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.template get_access<sycl::access_mode::read_write>()
.get_pointer();
}
void swap(device_vector &v) {
void *temp = v._storage;
v._storage = this->_storage;
this->_storage = temp;
std::swap(_size, v._size);
}
reference operator[](size_type n) { return *(begin() + n); }
const_reference operator[](size_type n) const { return *(begin() + n); }
void reserve(size_type n) {
if (n > capacity()) {
// create new buffer (allocate for new size)
void *a = alloc_store(n * sizeof(T));
// copy content (old buffer to new buffer)
if (_storage != nullptr) {
auto tmp = detail::mem_mgr::instance()
.translate_ptr(a)
.buffer.template reinterpret<T, 1>(sycl::range<1>(n));
auto src_buf = get_buffer();
std::copy(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(src_buf), oneapi::dpl::end(src_buf),
oneapi::dpl::begin(tmp));
// deallocate old memory
detail::mem_mgr::instance().mem_free(_storage);
}
_storage = a;
}
}
void resize(size_type new_size, const T &x = T()) {
reserve(new_size);
if (_size < new_size) {
auto src_buf = get_buffer();
std::fill(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(src_buf) + _size,
oneapi::dpl::begin(src_buf) + new_size, x);
}
_size = new_size;
}
size_type max_size(void) const {
return std::numeric_limits<size_type>::max() / sizeof(T);
}
size_type capacity() const {
return _storage != nullptr ? detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.size() /
sizeof(T)
: 0;
}
const_reference front() const { return *begin(); }
reference front() { return *begin(); }
const_reference back(void) const { return *(end() - 1); }
reference back(void) { return *(end() - 1); }
pointer data(void) { return reinterpret_cast<pointer>(_storage); }
const_pointer data(void) const {
return reinterpret_cast<const_pointer>(_storage);
}
void shrink_to_fit(void) {
if (_size != capacity()) {
void *a = alloc_store(_size * sizeof(T));
auto tmp = detail::mem_mgr::instance()
.translate_ptr(a)
.buffer.template reinterpret<T, 1>(sycl::range<1>(_size));
std::copy(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(get_buffer()),
oneapi::dpl::begin(get_buffer()) + _size,
oneapi::dpl::begin(tmp));
detail::mem_mgr::instance().mem_free(_storage);
_storage = a;
}
}
void assign(size_type n, const T &x) {
resize(n);
std::fill(oneapi::dpl::execution::dpcpp_default, begin(), begin() + n, x);
}
template <typename InputIterator>
void
assign(InputIterator first,
typename std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = std::distance(first, last);
resize(n);
if (internal::is_iterator<InputIterator>::value &&
!std::is_pointer<InputIterator>::value)
std::copy(oneapi::dpl::execution::dpcpp_default, first, last, begin());
else {
Buffer tmp(first, last);
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), begin());
}
}
void clear(void) {
_size = 0;
detail::mem_mgr::instance().mem_free(_storage);
_storage = nullptr;
}
bool empty(void) const { return (size() == 0); }
void push_back(const T &x) { insert(end(), size_type(1), x); }
void pop_back(void) {
if (_size > 0)
--_size;
}
iterator erase(iterator first, iterator last) {
auto n = std::distance(first, last);
if (last == end()) {
_size = _size - n;
return end();
}
Buffer tmp{Range(std::distance(last, end()))};
// copy remainder to temporary buffer.
std::copy(oneapi::dpl::execution::dpcpp_default, last, end(),
oneapi::dpl::begin(tmp));
// override (erase) subsequence in storage.
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), first);
resize(_size - n);
return begin() + first.get_idx() + n;
}
iterator erase(iterator pos) { return erase(pos, pos + 1); }
iterator insert(iterator position, const T &x) {
auto n = std::distance(begin(), position);
insert(position, size_type(1), x);
return begin() + n;
}
void insert(iterator position, size_type n, const T &x) {
if (position == end()) {
resize(size() + n);
std::fill(oneapi::dpl::execution::dpcpp_default, end() - n, end(), x);
} else {
auto i_n = std::distance(begin(), position);
// allocate temporary storage
Buffer tmp{Range(std::distance(position, end()))};
// copy remainder
std::copy(oneapi::dpl::execution::dpcpp_default, position, end(),
oneapi::dpl::begin(tmp));
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
std::fill(oneapi::dpl::execution::dpcpp_default, position, position + n,
x);
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), position + n);
}
}
template <typename InputIterator>
void
insert(iterator position, InputIterator first,
typename std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = std::distance(first, last);
if (position == end()) {
resize(size() + n);
std::copy(oneapi::dpl::execution::dpcpp_default, first, last, end());
} else {
Buffer tmp{Range(std::distance(position, end()))};
std::copy(oneapi::dpl::execution::dpcpp_default, position, end(),
oneapi::dpl::begin(tmp));
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
std::copy(oneapi::dpl::execution::dpcpp_default, first, last, position);
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), position + n);
}
}
};
#endif
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/dpcpp_extensions.h | //==---- dpcpp_extensions.h ------------------*- C++ -*---------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------===//
#ifndef __DPCT_DPCPP_EXTENSIONS_H__
#define __DPCT_DPCPP_EXTENSIONS_H__
#include <sycl/sycl.hpp>
#include <stdexcept>
#ifdef SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS
#include <sycl/ext/oneapi/experimental/user_defined_reductions.hpp>
#endif
#include "../dpct.hpp"
namespace dpct {
namespace group {
namespace detail {
template <typename... _Args>
constexpr auto __reduce_over_group(_Args... __args) {
return sycl::reduce_over_group(__args...);
}
template <typename... _Args> constexpr auto __group_broadcast(_Args... __args) {
return sycl::group_broadcast(__args...);
}
template <typename... _Args>
constexpr auto __exclusive_scan_over_group(_Args... __args) {
return sycl::exclusive_scan_over_group(__args...);
}
template <typename... _Args>
constexpr auto __inclusive_scan_over_group(_Args... __args) {
return sycl::inclusive_scan_over_group(__args...);
}
} // end namespace detail
/// Perform an exclusive scan over the values of inputs from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param inputs Pointer to the input data for the scan operation.
/// \param outputs Pointer to the location where scan results will be stored.
/// \param init initial value of the scan result.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan.
template <typename Item, typename T, class BinaryOperation,
int VALUES_PER_THREAD>
__dpct_inline__ void
exclusive_scan(const Item &item, T (&inputs)[VALUES_PER_THREAD],
T (&outputs)[VALUES_PER_THREAD], T init,
BinaryOperation binary_op) {
T result = inputs[0];
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
result = binary_op(result, inputs[i]);
}
T exclusive_result =
detail::__exclusive_scan_over_group(item.get_group(), result, binary_op);
T input = inputs[0];
if (item.get_local_linear_id() == 0) {
outputs[0] = init;
} else {
outputs[0] = exclusive_result;
}
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
T output = binary_op(input, outputs[i - 1]);
input = inputs[i];
outputs[i] = output;
}
}
/// Perform an exclusive scan over the values of input from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Input data for the scan operation.
/// \param init initial value of the scan result.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param group_aggregate group-wide aggregate of all inputs
/// in the work-items of the group. \returns exclusive scan of the first i
/// work-items where item is the i-th work item.
template <typename Item, typename T, class BinaryOperation>
__dpct_inline__ T
exclusive_scan(const Item &item, T input, T init, BinaryOperation binary_op,
T &group_aggregate) {
T output = detail::__exclusive_scan_over_group(item.get_group(), input, init,
binary_op);
if (item.get_local_linear_id() == item.get_local_range().size() - 1) {
group_aggregate = binary_op(output, input);
}
group_aggregate = detail::__group_broadcast(
item.get_group(), group_aggregate, item.get_local_range().size() - 1);
return output;
}
/// Perform an exclusive scan over the values of input from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Input data for the scan operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param prefix_callback_op functor invoked by the first
/// work-item in the group that returns the
/// initial value in the resulting scan of the work-items in the group.
/// \returns exclusive scan of the input elements assigned to work-items in the
/// group.
template <typename Item, typename T, class BinaryOperation,
class GroupPrefixCallbackOperation>
__dpct_inline__ T
exclusive_scan(const Item &item, T input, BinaryOperation binary_op,
GroupPrefixCallbackOperation &prefix_callback_op) {
T group_aggregate;
T output =
detail::__exclusive_scan_over_group(item.get_group(), input, binary_op);
if (item.get_local_linear_id() == item.get_local_range().size() - 1) {
group_aggregate = binary_op(output, input);
}
group_aggregate = detail::__group_broadcast(
item.get_group(), group_aggregate, item.get_local_range().size() - 1);
T group_prefix = prefix_callback_op(group_aggregate);
if (item.get_local_linear_id() == 0) {
output = group_prefix;
} else {
output = binary_op(group_prefix, output);
}
return output;
}
namespace detail {
typedef uint16_t digit_counter_type;
typedef uint32_t packed_counter_type;
template <int N, int CURRENT_VAL = N, int COUNT = 0> struct log2 {
enum { VALUE = log2<N, (CURRENT_VAL >> 1), COUNT + 1>::VALUE };
};
template <int N, int COUNT> struct log2<N, 0, COUNT> {
enum { VALUE = (1 << (COUNT - 1) < N) ? COUNT : COUNT - 1 };
};
__dpct_inline__ uint32_t bfe(uint32_t source, uint32_t bit_start,
uint32_t num_bits) {
const uint32_t MASK = (1 << num_bits) - 1;
return (source >> bit_start) & MASK;
}
template <int RADIX_BITS, bool DESCENDING = false> class radix_rank {
public:
static size_t get_local_memory_size(size_t group_threads) {
return group_threads * PADDED_COUNTER_LANES * sizeof(packed_counter_type);
}
radix_rank(uint8_t *local_memory) : _local_memory(local_memory) {}
template <typename Item, int VALUES_PER_THREAD>
__dpct_inline__ void
rank_keys(const Item &item, uint32_t (&keys)[VALUES_PER_THREAD],
int (&ranks)[VALUES_PER_THREAD], int current_bit, int num_bits) {
digit_counter_type thread_prefixes[VALUES_PER_THREAD];
digit_counter_type *digit_counters[VALUES_PER_THREAD];
digit_counter_type *buffer =
reinterpret_cast<digit_counter_type *>(_local_memory);
reset_local_memory(item);
item.barrier(sycl::access::fence_space::local_space);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
uint32_t digit = bfe(keys[i], current_bit, num_bits);
uint32_t sub_counter = digit >> LOG_COUNTER_LANES;
uint32_t counter_lane = digit & (COUNTER_LANES - 1);
if (DESCENDING) {
sub_counter = PACKING_RATIO - 1 - sub_counter;
counter_lane = COUNTER_LANES - 1 - counter_lane;
}
digit_counters[i] =
&buffer[counter_lane * item.get_local_range().size() * PACKING_RATIO +
item.get_local_linear_id() * PACKING_RATIO + sub_counter];
thread_prefixes[i] = *digit_counters[i];
*digit_counters[i] = thread_prefixes[i] + 1;
}
item.barrier(sycl::access::fence_space::local_space);
scan_counters(item);
item.barrier(sycl::access::fence_space::local_space);
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
ranks[i] = thread_prefixes[i] + *digit_counters[i];
}
}
private:
template <typename Item>
__dpct_inline__ void reset_local_memory(const Item &item) {
packed_counter_type *ptr =
reinterpret_cast<packed_counter_type *>(_local_memory);
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
ptr[i * item.get_local_range().size() + item.get_local_linear_id()] = 0;
}
}
template <typename Item>
__dpct_inline__ packed_counter_type upsweep(const Item &item) {
packed_counter_type sum = 0;
packed_counter_type *ptr =
reinterpret_cast<packed_counter_type *>(_local_memory);
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; i++) {
cached_segment[i] =
ptr[item.get_local_linear_id() * PADDED_COUNTER_LANES + i];
}
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
sum += cached_segment[i];
}
return sum;
}
template <typename Item>
__dpct_inline__ void
exclusive_downsweep(const Item &item, packed_counter_type raking_partial) {
packed_counter_type *ptr =
reinterpret_cast<packed_counter_type *>(_local_memory);
packed_counter_type sum = raking_partial;
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
packed_counter_type value = cached_segment[i];
cached_segment[i] = sum;
sum += value;
}
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
ptr[item.get_local_linear_id() * PADDED_COUNTER_LANES + i] =
cached_segment[i];
}
}
struct prefix_callback {
__dpct_inline__ packed_counter_type
operator()(packed_counter_type block_aggregate) {
packed_counter_type block_prefix = 0;
#pragma unroll
for (int packed = 1; packed < PACKING_RATIO; packed++) {
block_prefix += block_aggregate
<< (sizeof(digit_counter_type) * 8 * packed);
}
return block_prefix;
}
};
template <typename Item>
__dpct_inline__ void scan_counters(const Item &item) {
packed_counter_type raking_partial = upsweep(item);
prefix_callback callback;
packed_counter_type exclusive_partial = exclusive_scan(
item, raking_partial, sycl::ext::oneapi::plus<packed_counter_type>(),
callback);
exclusive_downsweep(item, exclusive_partial);
}
private:
static constexpr int PACKING_RATIO =
sizeof(packed_counter_type) / sizeof(digit_counter_type);
static constexpr int LOG_PACKING_RATIO = log2<PACKING_RATIO>::VALUE;
static constexpr int LOG_COUNTER_LANES = RADIX_BITS - LOG_PACKING_RATIO;
static constexpr int COUNTER_LANES = 1 << LOG_COUNTER_LANES;
static constexpr int PADDED_COUNTER_LANES = COUNTER_LANES + 1;
packed_counter_type cached_segment[PADDED_COUNTER_LANES];
uint8_t *_local_memory;
};
template <typename T, typename U> struct base_traits {
static __dpct_inline__ U twiddle_in(U key) {
throw std::runtime_error("Not implemented");
}
static __dpct_inline__ U twiddle_out(U key) {
throw std::runtime_error("Not implemented");
}
};
template <typename U> struct base_traits<uint32_t, U> {
static __dpct_inline__ U twiddle_in(U key) { return key; }
static __dpct_inline__ U twiddle_out(U key) { return key; }
};
template <typename U> struct base_traits<int, U> {
static constexpr U HIGH_BIT = U(1) << ((sizeof(U) * 8) - 1);
static __dpct_inline__ U twiddle_in(U key) { return key ^ HIGH_BIT; }
static __dpct_inline__ U twiddle_out(U key) { return key ^ HIGH_BIT; }
};
template <typename U> struct base_traits<float, U> {
static constexpr U HIGH_BIT = U(1) << ((sizeof(U) * 8) - 1);
static __dpct_inline__ U twiddle_in(U key) {
U mask = (key & HIGH_BIT) ? U(-1) : HIGH_BIT;
return key ^ mask;
}
static __dpct_inline__ U twiddle_out(U key) {
U mask = (key & HIGH_BIT) ? HIGH_BIT : U(-1);
return key ^ mask;
}
};
template <typename T> struct traits : base_traits<T, T> {};
template <> struct traits<uint32_t> : base_traits<uint32_t, uint32_t> {};
template <> struct traits<int> : base_traits<int, uint32_t> {};
template <> struct traits<float> : base_traits<float, uint32_t> {};
} // namespace detail
namespace detail {
template <int N> struct power_of_two {
enum { VALUE = ((N & (N - 1)) == 0) };
};
__dpct_inline__ uint32_t shr_add(uint32_t x, uint32_t shift, uint32_t addend) {
return (x >> shift) + addend;
}
} // namespace detail
/// Implements scatter to blocked exchange pattern used in radix sort algorithm.
///
/// \tparam T type of the data elements exchanges
/// \tparam VALUES_PER_THREAD number of data elements assigned to a thread
template <typename T, int VALUES_PER_THREAD> class exchange {
public:
static size_t get_local_memory_size(size_t group_threads) {
size_t padding_values =
(INSERT_PADDING)
? ((group_threads * VALUES_PER_THREAD) >> LOG_LOCAL_MEMORY_BANKS)
: 0;
return (group_threads * VALUES_PER_THREAD + padding_values) * sizeof(T);
}
exchange(uint8_t *local_memory) : _local_memory(local_memory) {}
/// Rearrange elements from rank order to blocked order
template <typename Item>
__dpct_inline__ void
scatter_to_blocked(Item item, T (&keys)[VALUES_PER_THREAD],
int (&ranks)[VALUES_PER_THREAD]) {
T *buffer = reinterpret_cast<T *>(_local_memory);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; i++) {
int offset = ranks[i];
if (INSERT_PADDING)
offset = detail::shr_add(offset, LOG_LOCAL_MEMORY_BANKS, offset);
buffer[offset] = keys[i];
}
item.barrier(sycl::access::fence_space::local_space);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; i++) {
int offset = (item.get_local_id(0) * VALUES_PER_THREAD) + i;
if (INSERT_PADDING)
offset = detail::shr_add(offset, LOG_LOCAL_MEMORY_BANKS, offset);
keys[i] = buffer[offset];
}
}
private:
static constexpr int LOG_LOCAL_MEMORY_BANKS = 5;
static constexpr bool INSERT_PADDING =
(VALUES_PER_THREAD > 4) &&
(detail::power_of_two<VALUES_PER_THREAD>::VALUE);
uint8_t *_local_memory;
};
/// Implements radix sort to sort integer data elements assigned to all threads
/// in the group.
///
/// \tparam T type of the data elements exchanges
/// \tparam VALUES_PER_THREAD number of data elements assigned to a thread
/// \tparam DECENDING boolean value indicating if data elements are sorted in
/// decending order.
template <typename T, int VALUES_PER_THREAD, bool DESCENDING = false>
class radix_sort {
public:
static size_t get_local_memory_size(size_t group_threads) {
size_t ranks_size =
detail::radix_rank<RADIX_BITS>::get_local_memory_size(group_threads);
size_t exchange_size =
exchange<T, VALUES_PER_THREAD>::get_local_memory_size(group_threads);
return sycl::max(ranks_size, exchange_size);
}
radix_sort(uint8_t *local_memory) : _local_memory(local_memory) {}
template <typename Item>
__dpct_inline__ void
sort(const Item &item, T (&keys)[VALUES_PER_THREAD], int begin_bit = 0,
int end_bit = 8 * sizeof(T)) {
uint32_t(&unsigned_keys)[VALUES_PER_THREAD] =
reinterpret_cast<uint32_t(&)[VALUES_PER_THREAD]>(keys);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
unsigned_keys[i] = detail::traits<T>::twiddle_in(unsigned_keys[i]);
}
while (true) {
int pass_bits = sycl::min(RADIX_BITS, end_bit - begin_bit);
int ranks[VALUES_PER_THREAD];
detail::radix_rank<RADIX_BITS, DESCENDING>(_local_memory)
.template rank_keys(item, unsigned_keys, ranks, begin_bit, pass_bits);
begin_bit += RADIX_BITS;
item.barrier(sycl::access::fence_space::local_space);
exchange<T, VALUES_PER_THREAD>(_local_memory)
.scatter_to_blocked(item, keys, ranks);
item.barrier(sycl::access::fence_space::local_space);
if (begin_bit >= end_bit)
break;
}
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
unsigned_keys[i] = detail::traits<T>::twiddle_out(unsigned_keys[i]);
}
}
private:
static constexpr int RADIX_BITS = 4;
uint8_t *_local_memory;
};
/// Perform a reduction of the data elements assigned to all threads in the
/// group.
///
/// \param item A work-item in a group.
/// \param inputs Pointer to the input data for the reduce operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \returns value of the reduction using binary_op
template <typename Item, typename T, class BinaryOperation,
int VALUES_PER_THREAD>
__dpct_inline__ T
reduce(Item item, T (&inputs)[VALUES_PER_THREAD], BinaryOperation binary_op) {
T result = inputs[0];
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; i++) {
result = binary_op(result, inputs[i]);
}
return detail::__reduce_over_group(item.get_group(), result, binary_op);
}
/// Perform a reduction on a limited number of the work items in a subgroup
///
/// \param item A work-item in a group.
/// \param value value per work item which is to be reduced
/// \param items_to_reduce num work items at the start of the subgroup to reduce
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \returns value of the reduction using binary_op
template <typename Item, typename T, class BinaryOperation>
__dpct_inline__
typename ::std::enable_if_t<sycl::has_known_identity_v<BinaryOperation, T>, T>
reduce_over_partial_group(const Item &item, const T &value,
const ::std::uint16_t &items_to_reduce,
BinaryOperation binary_op) {
T value_temp = (item.get_local_linear_id() < items_to_reduce)
? value
: sycl::known_identity_v<BinaryOperation, T>;
return detail::__reduce_over_group(item.get_sub_group(), value_temp,
binary_op);
}
/// Perform an inclusive scan over the values of inputs from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param inputs Pointer to the input data for the scan operation.
/// \param outputs Pointer to the location where scan results will be stored.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \returns inclusive scan of the input elements assigned to
/// work-items in the group.
template <typename Item, typename T, class BinaryOperation,
int VALUES_PER_THREAD>
__dpct_inline__ void
inclusive_scan(const Item &item, T (&inputs)[VALUES_PER_THREAD],
T (&outputs)[VALUES_PER_THREAD], BinaryOperation binary_op) {
T result = inputs[0];
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
result = binary_op(result, inputs[i]);
}
T exclusive_result =
detail::__exclusive_scan_over_group(item.get_group(), result, binary_op);
if (item.get_local_linear_id() == 0) {
outputs[0] = inputs[0];
} else {
outputs[0] = binary_op(inputs[0], exclusive_result);
}
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
outputs[i] = binary_op(inputs[i], outputs[i - 1]);
}
}
/// Perform an inclusive scan over the values of inputs from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Pointer to the input data for the scan operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param group_aggregate group-wide aggregate of all inputs
/// in the work-items of the group. \returns inclusive scan of the input
/// elements assigned to work-items in the group.
template <typename Item, typename T, class BinaryOperation>
__dpct_inline__ T inclusive_scan(const Item &item, T input,
BinaryOperation binary_op,
T &group_aggregate) {
T output =
detail::__inclusive_scan_over_group(item.get_group(), input, binary_op);
if (item.get_local_linear_id() == item.get_local_range().size() - 1) {
group_aggregate = output;
}
group_aggregate = detail::__group_broadcast(
item.get_group(), group_aggregate, item.get_local_range().size() - 1);
return output;
}
/// Perform an inclusive scan over the values of input from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Input data for the scan operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param prefix_callback_op functor invoked by the first
/// work-item in the group that returns the
/// initial value in the resulting scan of the work-items in the group.
/// \returns inclusive scan of the input elements assigned to work-items in the
/// group.
template <typename Item, typename T, class BinaryOperation,
class GroupPrefixCallbackOperation>
__dpct_inline__ T
inclusive_scan(const Item &item, T input, BinaryOperation binary_op,
GroupPrefixCallbackOperation &prefix_callback_op) {
T group_aggregate;
T output = inclusive_scan(item, input, binary_op, group_aggregate);
T group_prefix = prefix_callback_op(group_aggregate);
return binary_op(group_prefix, output);
}
} // namespace group
namespace device {
namespace detail {
template <typename... _Args> constexpr auto __joint_reduce(_Args... __args) {
return sycl::joint_reduce(__args...);
}
} // namespace detail
/// Perform a reduce on each of the segments specified within data stored on
/// the device.
///
/// \param queue Command queue used to access device used for reduction
/// \param inputs Pointer to the data elements on the device to be reduced
/// \param outputs Pointer to the storage where the reduced value for each
/// segment will be stored \param segment_count number of segments to be reduced
/// \param begin_offsets Pointer to the set of indices that are the first
/// element in each segment \param end_offsets Pointer to the set of indices
/// that are one past the last element in each segment \param binary_op functor
/// that implements the binary operation used to perform the scan. \param init
/// initial value of the reduction for each segment.
template <int GROUP_SIZE, typename T, typename OffsetT, class BinaryOperation>
void segmented_reduce(sycl::queue queue, T *inputs, T *outputs,
size_t segment_count, OffsetT *begin_offsets,
OffsetT *end_offsets, BinaryOperation binary_op, T init) {
sycl::range<1> global_size(segment_count * GROUP_SIZE);
sycl::range<1> local_size(GROUP_SIZE);
queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for(
sycl::nd_range<1>(global_size, local_size), [=](sycl::nd_item<1> item) {
OffsetT segment_begin = begin_offsets[item.get_group_linear_id()];
OffsetT segment_end = end_offsets[item.get_group_linear_id()];
if (segment_begin == segment_end) {
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = init;
}
return;
}
sycl::multi_ptr<T, sycl::access::address_space::global_space>
input_ptr = inputs;
T group_aggregate = detail::__joint_reduce(
item.get_group(), input_ptr + segment_begin,
input_ptr + segment_end, init, binary_op);
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = group_aggregate;
}
});
});
}
#ifdef SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS
namespace experimental {
namespace detail {
template <typename _Tp, typename... _Ts> struct __is_any {
constexpr static bool value = std::disjunction_v<
std::is_same<std::remove_cv_t<_Tp>, std::remove_cv_t<_Ts>>...>;
};
template <typename _Tp, typename _Bp> struct __in_native_op_list {
constexpr static bool value =
__is_any<_Bp, sycl::plus<_Tp>, sycl::bit_or<_Tp>, sycl::bit_xor<_Tp>,
sycl::bit_and<_Tp>, sycl::maximum<_Tp>, sycl::minimum<_Tp>,
sycl::multiplies<_Tp>>::value;
};
template <typename _Tp, typename _Bp> struct __is_native_op {
constexpr static bool value = __in_native_op_list<_Tp, _Bp>::value ||
__in_native_op_list<void, _Bp>::value;
};
} // namespace detail
/// Perform a reduce on each of the segments specified within data stored on
/// the device. Compared with dpct::device::segmented_reduce, this experimental
/// feature support user define reductions.
///
/// \param queue Command queue used to access device used for reduction
/// \param inputs Pointer to the data elements on the device to be reduced
/// \param outputs Pointer to the storage where the reduced value for each
/// segment will be stored \param segment_count number of segments to be reduced
/// \param begin_offsets Pointer to the set of indices that are the first
/// element in each segment \param end_offsets Pointer to the set of indices
/// that are one past the last element in each segment \param binary_op functor
/// that implements the binary operation used to perform the scan. \param init
/// initial value of the reduction for each segment.
template <int GROUP_SIZE, typename T, typename OffsetT, class BinaryOperation>
void segmented_reduce(sycl::queue queue, T *inputs, T *outputs,
size_t segment_count, OffsetT *begin_offsets,
OffsetT *end_offsets, BinaryOperation binary_op, T init) {
sycl::range<1> global_size(segment_count * GROUP_SIZE);
sycl::range<1> local_size(GROUP_SIZE);
if constexpr (!detail::__is_native_op<T, BinaryOperation>::value) {
queue.submit([&](sycl::handler &cgh) {
size_t temp_memory_size = GROUP_SIZE * sizeof(T);
auto scratch = sycl::local_accessor<std::byte, 1>(temp_memory_size, cgh);
cgh.parallel_for(
sycl::nd_range<1>(global_size, local_size),
[=](sycl::nd_item<1> item) {
OffsetT segment_begin = begin_offsets[item.get_group_linear_id()];
OffsetT segment_end = end_offsets[item.get_group_linear_id()];
if (segment_begin == segment_end) {
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = init;
}
return;
}
// Create a handle that associates the group with an allocation it
// can use
auto handle =
sycl::ext::oneapi::experimental::group_with_scratchpad(
item.get_group(),
sycl::span(&scratch[0], temp_memory_size));
T group_aggregate = sycl::ext::oneapi::experimental::joint_reduce(
handle, inputs + segment_begin, inputs + segment_end, init,
binary_op);
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = group_aggregate;
}
});
});
} else {
dpct::device::segmented_reduce<GROUP_SIZE>(queue, inputs, outputs,
segment_count, begin_offsets,
end_offsets, binary_op, init);
}
}
} // namespace experimental
#endif // SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS
} // namespace device
} // namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/functional.h | //==---- functional.h -----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_FUNCTIONAL_H__
#define __DPCT_FUNCTIONAL_H__
#include <functional>
#include <oneapi/dpl/functional>
#include <oneapi/dpl/iterator>
#if ONEDPL_USE_DPCPP_BACKEND
#include <oneapi/dpl/pstl/hetero/dpcpp/parallel_backend_sycl_utils.h>
#endif
#include <tuple>
#include <utility>
namespace dpct {
struct null_type {};
namespace internal {
template <class _ExecPolicy, class _T>
using enable_if_execution_policy =
typename std::enable_if<oneapi::dpl::execution::is_execution_policy<
typename std::decay<_ExecPolicy>::type>::value,
_T>::type;
template <typename _T> struct is_hetero_execution_policy : ::std::false_type {};
template <typename... PolicyParams>
struct is_hetero_execution_policy<
oneapi::dpl::execution::device_policy<PolicyParams...>> : ::std::true_type {
};
template <typename _T> struct is_fpga_execution_policy : ::std::false_type {};
#if _ONEDPL_FPGA_DEVICE
template <unsigned int unroll_factor, typename... PolicyParams>
struct is_hetero_execution_policy<
execution::fpga_policy<unroll_factor, PolicyParams...>> : ::std::true_type {
};
#endif
template <class _ExecPolicy, class _T>
using enable_if_hetero_execution_policy = typename std::enable_if<
is_hetero_execution_policy<typename std::decay<_ExecPolicy>::type>::value,
_T>::type;
#if _ONEDPL_CPP14_INTEGER_SEQUENCE_PRESENT
template <std::size_t... _Sp>
using index_sequence = std::index_sequence<_Sp...>;
template <std::size_t _Np>
using make_index_sequence = std::make_index_sequence<_Np>;
#else
template <std::size_t... _Sp> class index_sequence {};
template <std::size_t _Np, std::size_t... _Sp>
struct make_index_sequence_impl
: make_index_sequence_impl<_Np - 1, _Np - 1, _Sp...> {};
template <std::size_t... _Sp> struct make_index_sequence_impl<0, _Sp...> {
using type = index_sequence<_Sp...>;
};
template <std::size_t _Np>
using make_index_sequence = typename make_index_sequence_impl<_Np>::type;
#endif
// Minimal buffer implementations for temporary storage in mapping rules
// Some of our algorithms need to start with raw memory buffer,
// not an initialized array, because initialization/destruction
// would make the span be at least O(N).
#if ONEDPL_USE_DPCPP_BACKEND
template <typename _Tp> class __buffer {
sycl::buffer<_Tp, 1> __buf;
__buffer(const __buffer &) = delete;
void operator=(const __buffer &) = delete;
public:
// Try to obtain buffer of given size to store objects of _Tp type
__buffer(std::size_t __n) : __buf(sycl::range<1>(__n)) {}
// Return pointer to buffer, or NULL if buffer could not be obtained.
auto get() -> decltype(oneapi::dpl::begin(__buf)) const {
return oneapi::dpl::begin(__buf);
}
};
#else
template <typename _Tp> class __buffer {
std::unique_ptr<_Tp> _M_ptr;
__buffer(const __buffer &) = delete;
void operator=(const __buffer &) = delete;
public:
// Try to obtain buffer of given size to store objects of _Tp type
__buffer(const std::size_t __n) : _M_ptr(new _Tp[__n]) {}
// Return pointer to buffer, or NULL if buffer could not be obtained.
_Tp *get() const { return _M_ptr.get(); }
};
#endif
// Implements C++14 std::less<void> specialization to allow parameter type
// deduction.
class __less {
public:
template <typename _Xp, typename _Yp>
bool operator()(_Xp &&__x, _Yp &&__y) const {
return std::forward<_Xp>(__x) < std::forward<_Yp>(__y);
}
};
template <typename Policy, typename NewName> struct rebind_policy {
using type = Policy;
};
template <typename KernelName, typename NewName>
struct rebind_policy<oneapi::dpl::execution::device_policy<KernelName>,
NewName> {
using type = oneapi::dpl::execution::device_policy<NewName>;
};
#if _ONEDPL_FPGA_DEVICE
template <unsigned int factor, typename KernelName, typename NewName>
struct rebind_policy<oneapi::dpl::execution::fpga_policy<factor, KernelName>,
NewName> {
using type = oneapi::dpl::execution::fpga_policy<factor, NewName>;
};
#endif
template <typename T1, typename T2,
typename R1 = typename std::iterator_traits<T1>::reference,
typename R2 = typename std::iterator_traits<T2>::reference>
struct perm_fun {
typedef R2 result_of;
perm_fun(T1 input) : source(input) {}
R2 operator()(R1 x) const { return *(source + x); }
private:
T1 source;
};
// Functor compares first element (key) from tied sequence.
template <typename Compare = class internal::__less> struct compare_key_fun {
typedef bool result_of;
compare_key_fun(Compare _comp = internal::__less()) : comp(_comp) {}
template <typename _T1, typename _T2>
result_of operator()(_T1 &&a, _T2 &&b) const {
using std::get;
return comp(get<0>(a), get<0>(b));
}
private:
mutable Compare comp;
};
// Functor evaluates second element of tied sequence with predicate.
// Used by: copy_if, remove_copy_if, stable_partition_copy
// Lambda:
template <typename Predicate> struct predicate_key_fun {
typedef bool result_of;
predicate_key_fun(Predicate _pred) : pred(_pred) {}
template <typename _T1> result_of operator()(_T1 &&a) const {
using std::get;
return pred(get<1>(a));
}
private:
mutable Predicate pred;
};
// Used by: remove_if
template <typename Predicate> struct negate_predicate_key_fun {
typedef bool result_of;
negate_predicate_key_fun(Predicate _pred) : pred(_pred) {}
template <typename _T1> result_of operator()(_T1 &&a) const {
using std::get;
return !pred(get<1>(a));
}
private:
mutable Predicate pred;
};
template <typename T> struct sequence_fun {
using result_type = T;
sequence_fun(T _init, T _step) : init(_init), step(_step) {}
template <typename _T> result_type operator()(_T &&i) const {
return static_cast<T>(init + step * i);
}
private:
const T init;
const T step;
};
//[binary_pred](Ref a, Ref b){ return(binary_pred(get<0>(a),get<0>(b)));
template <typename Predicate> struct unique_fun {
typedef bool result_of;
unique_fun(Predicate _pred) : pred(_pred) {}
template <typename _T> result_of operator()(_T &&a, _T &&b) const {
using std::get;
return pred(get<0>(a), get<0>(b));
}
private:
mutable Predicate pred;
};
// Lambda: [pred, &new_value](Ref1 a, Ref2 s) {return pred(s) ? new_value : a;
// });
template <typename T, typename Predicate> struct replace_if_fun {
public:
typedef T result_of;
replace_if_fun(Predicate _pred, T _new_value)
: pred(_pred), new_value(_new_value) {}
template <typename _T1, typename _T2> T operator()(_T1 &&a, _T2 &&s) const {
return pred(s) ? new_value : a;
}
private:
mutable Predicate pred;
const T new_value;
};
//[pred,op](Ref a){return pred(a) ? op(a) : a; }
template <typename T, typename Predicate, typename Operator>
struct transform_if_fun {
transform_if_fun(Predicate _pred, Operator _op) : pred(_pred), op(_op) {}
template <typename _T>
void operator()(_T&& t) const {
using std::get;
if (pred(get<0>(t)))
get<1>(t) = op(get<0>(t));
}
private:
mutable Predicate pred;
mutable Operator op;
};
//[pred, op](Ref1 a, Ref2 s) { return pred(s) ? op(a) : a; });
template <typename T, typename Predicate, typename Operator>
struct transform_if_unary_zip_mask_fun {
transform_if_unary_zip_mask_fun(Predicate _pred, Operator _op) : pred(_pred), op(_op) {}
template <typename _T>
void operator()(_T&& t) const {
using std::get;
if (pred(get<1>(t)))
get<2>(t) = op(get<0>(t));
}
private:
mutable Predicate pred;
mutable Operator op;
};
template <typename T, typename Predicate, typename BinaryOperation>
class transform_if_zip_mask_fun {
public:
transform_if_zip_mask_fun(Predicate _pred = oneapi::dpl::identity(),
BinaryOperation _op = oneapi::dpl::identity())
: pred(_pred), op(_op) {}
template <typename _T> void operator()(_T &&t) const {
using std::get;
if (pred(get<2>(t)))
get<3>(t) = op(get<0>(t), get<1>(t));
}
private:
mutable Predicate pred;
mutable BinaryOperation op;
};
// This following code is similar to a section of code in
// oneDPL/include/oneapi/dpl/pstl/hetero/dpcpp/parallel_backend_sycl_radix_sort.h
// It has a similar approach, and could be consolidated.
// Outside of some differences in approach, there are two significant
// differences in function.
//
// 1) This code allows the output type of the bit range translation to be fit
// into to the minimal type required to provide that many bits. The code in
// oneDPL to calculate the bucket for the radix is similar but its output is
// always std::uint32_t. The assumption that the bit range desired will fit in
// 32 bits is not true for this code.
//
// 2) This code ensures that for floating point type, -0.0f and 0.0f map to the
// same value. This allows the output of this translation to be used to provide
// a sort which ensures the stability of these values for floating point types.
template <int N> struct uint_byte_map {};
template <> struct uint_byte_map<1> { using type = uint8_t; };
template <> struct uint_byte_map<2> { using type = uint16_t; };
template <> struct uint_byte_map<4> { using type = uint32_t; };
template <> struct uint_byte_map<8> { using type = uint64_t; };
template <typename T> struct uint_map {
using type = typename uint_byte_map<sizeof(T)>::type;
};
template <typename T, typename OutKeyT> class translate_key {
using uint_type_t = typename uint_map<T>::type;
public:
translate_key(int begin_bit, int end_bit) {
shift = begin_bit;
mask = ~OutKeyT(0); // all ones
mask = mask >> (sizeof(OutKeyT) * 8 -
(end_bit - begin_bit)); // setup appropriate mask
flip_sign = uint_type_t(1) << (sizeof(uint_type_t) * 8 - 1); // sign bit
flip_key = ~uint_type_t(0); // 0xF...F
}
inline OutKeyT operator()(const T &key) const {
uint_type_t intermediate;
if constexpr (std::is_floating_point<T>::value) {
// normal case (both -0.0f and 0.0f equal -0.0f)
if (key != T(-0.0f)) {
uint_type_t is_negative = reinterpret_cast<const uint_type_t &>(key) >>
(sizeof(uint_type_t) * 8 - 1);
intermediate = reinterpret_cast<const uint_type_t &>(key) ^
((is_negative * flip_key) | flip_sign);
} else // special case for -0.0f to keep stability with 0.0f
{
T negzero = T(-0.0f);
intermediate = reinterpret_cast<const uint_type_t &>(negzero);
}
} else if constexpr (std::is_signed<T>::value) {
intermediate = reinterpret_cast<const uint_type_t &>(key) ^ flip_sign;
} else {
intermediate = key;
}
return static_cast<OutKeyT>(intermediate >> shift) &
mask; // shift, cast, and mask
}
private:
uint8_t shift;
OutKeyT mask;
uint_type_t flip_sign;
uint_type_t flip_key;
};
} // end namespace internal
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/Common/helper_timer.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Helper Timing Functions
#ifndef COMMON_HELPER_TIMER_H_
#define COMMON_HELPER_TIMER_H_
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// includes, system
#include <vector>
// includes, project
#include <exception.h>
// Definition of the StopWatch Interface, this is used if we don't want to use
// the CUT functions But rather in a self contained class interface
class StopWatchInterface {
public:
StopWatchInterface() {}
virtual ~StopWatchInterface() {}
public:
//! Start time measurement
virtual void start() = 0;
//! Stop time measurement
virtual void stop() = 0;
//! Reset time counters to zero
virtual void reset() = 0;
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
virtual float getTime() = 0;
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
virtual float getAverageTime() = 0;
};
//////////////////////////////////////////////////////////////////
// Begin Stopwatch timer class definitions for all OS platforms //
//////////////////////////////////////////////////////////////////
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// includes, system
#define WINDOWS_LEAN_AND_MEAN
#include <windows.h>
#undef min
#undef max
//! Windows specific implementation of StopWatch
class StopWatchWin : public StopWatchInterface {
public:
//! Constructor, default
StopWatchWin()
: start_time(),
end_time(),
diff_time(0.0f),
total_time(0.0f),
running(false),
clock_sessions(0),
freq(0),
freq_set(false) {
if (!freq_set) {
// helper variable
LARGE_INTEGER temp;
// get the tick frequency from the OS
QueryPerformanceFrequency(reinterpret_cast<LARGE_INTEGER *>(&temp));
// convert to type in which it is needed
freq = (static_cast<double>(temp.QuadPart)) / 1000.0;
// rememeber query
freq_set = true;
}
}
// Destructor
~StopWatchWin() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// member variables
//! Start of measurement
LARGE_INTEGER start_time;
//! End of measurement
LARGE_INTEGER end_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
//! tick frequency
double freq;
//! flag if the frequency has been set
bool freq_set;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::start() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::stop() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&end_time));
diff_time = static_cast<float>(((static_cast<double>(end_time.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
total_time += diff_time;
clock_sessions++;
running = false;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
LARGE_INTEGER temp;
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&temp));
retval += static_cast<float>(((static_cast<double>(temp.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
#else
// Declarations for Stopwatch on Linux and Mac OSX
// includes, system
#include <sys/time.h>
#include <ctime>
//! Windows specific implementation of StopWatch
class StopWatchLinux : public StopWatchInterface {
public:
//! Constructor, default
StopWatchLinux()
: start_time(),
diff_time(0.0),
total_time(0.0),
running(false),
clock_sessions(0) {}
// Destructor
virtual ~StopWatchLinux() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// helper functions
//! Get difference between start time and current time
inline float getDiffTime();
private:
// member variables
//! Start of measurement
struct timeval start_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::start() {
gettimeofday(&start_time, 0);
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::stop() {
diff_time = getDiffTime();
total_time += diff_time;
running = false;
clock_sessions++;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
gettimeofday(&start_time, 0);
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
retval += getDiffTime();
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getDiffTime() {
struct timeval t_time;
gettimeofday(&t_time, 0);
// time difference in milli-seconds
return static_cast<float>(1000.0 * (t_time.tv_sec - start_time.tv_sec) +
(0.001 * (t_time.tv_usec - start_time.tv_usec)));
}
#endif // WIN32
////////////////////////////////////////////////////////////////////////////////
//! Timer functionality exported
////////////////////////////////////////////////////////////////////////////////
//! Create a new timer
//! @return true if a time has been created, otherwise false
//! @param name of the new timer, 0 if the creation failed
////////////////////////////////////////////////////////////////////////////////
inline bool sdkCreateTimer(StopWatchInterface **timer_interface) {
// printf("sdkCreateTimer called object %08x\n", (void *)*timer_interface);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
*timer_interface = reinterpret_cast<StopWatchInterface *>(new StopWatchWin());
#else
*timer_interface =
reinterpret_cast<StopWatchInterface *>(new StopWatchLinux());
#endif
return (*timer_interface != NULL) ? true : false;
}
////////////////////////////////////////////////////////////////////////////////
//! Delete a timer
//! @return true if a time has been deleted, otherwise false
//! @param name of the timer to delete
////////////////////////////////////////////////////////////////////////////////
inline bool sdkDeleteTimer(StopWatchInterface **timer_interface) {
// printf("sdkDeleteTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
delete *timer_interface;
*timer_interface = NULL;
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Start the time with name \a name
//! @param name name of the timer to start
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStartTimer(StopWatchInterface **timer_interface) {
// printf("sdkStartTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->start();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop the time with name \a name. Does not reset.
//! @param name name of the timer to stop
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStopTimer(StopWatchInterface **timer_interface) {
// printf("sdkStopTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->stop();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Resets the timer's counter.
//! @param name name of the timer to reset.
////////////////////////////////////////////////////////////////////////////////
inline bool sdkResetTimer(StopWatchInterface **timer_interface) {
// printf("sdkResetTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->reset();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Return the average time for timer execution as the total time
//! for the timer dividied by the number of completed (stopped) runs the timer
//! has made.
//! Excludes the current running time if the timer is currently running.
//! @param name name of the timer to return the time of
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetAverageTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetAverageTimerValue called object %08x\n", (void
// *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getAverageTime();
} else {
return 0.0f;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Total execution time for the timer over all runs since the last reset
//! or timer creation.
//! @param name name of the timer to obtain the value of.
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetTimerValue called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getTime();
} else {
return 0.0f;
}
}
#endif // COMMON_HELPER_TIMER_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/Common/helper_string.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (string parsing, timers, etc)
#ifndef COMMON_HELPER_STRING_H_
#define COMMON_HELPER_STRING_H_
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <string>
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#ifndef _CRT_SECURE_NO_DEPRECATE
#define _CRT_SECURE_NO_DEPRECATE
#endif
#ifndef STRCASECMP
#define STRCASECMP _stricmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP _strnicmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy_s(sFilePath, nLength, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode)
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result != 0)
#endif
#ifndef SSCANF
#define SSCANF sscanf_s
#endif
#ifndef SPRINTF
#define SPRINTF sprintf_s
#endif
#else // Linux Includes
#include <string.h>
#include <strings.h>
#ifndef STRCASECMP
#define STRCASECMP strcasecmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP strncasecmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy(sFilePath, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode))
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result == NULL)
#endif
#ifndef SSCANF
#define SSCANF sscanf
#endif
#ifndef SPRINTF
#define SPRINTF sprintf
#endif
#endif
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// CUDA Utility Helper Functions
inline int stringRemoveDelimiter(char delimiter, const char *string) {
int string_start = 0;
while (string[string_start] == delimiter) {
string_start++;
}
if (string_start >= static_cast<int>(strlen(string) - 1)) {
return 0;
}
return string_start;
}
inline int getFileExtension(char *filename, char **extension) {
int string_length = static_cast<int>(strlen(filename));
while (filename[string_length--] != '.') {
if (string_length == 0) break;
}
if (string_length > 0) string_length += 2;
if (string_length == 0)
*extension = NULL;
else
*extension = &filename[string_length];
return string_length;
}
inline bool checkCmdLineFlag(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
const char *equal_pos = strchr(string_argv, '=');
int argv_length = static_cast<int>(
equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv);
int length = static_cast<int>(strlen(string_ref));
if (length == argv_length &&
!STRNCASECMP(string_argv, string_ref, length)) {
bFound = true;
continue;
}
}
}
return bFound;
}
// This function wraps the CUDA Driver API into a template function
template <class T>
inline bool getCmdLineArgumentValue(const int argc, const char **argv,
const char *string_ref, T *value) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
*value = (T)atoi(&string_argv[length + auto_inc]);
}
bFound = true;
i = argc;
}
}
}
return bFound;
}
inline int getCmdLineArgumentInt(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
int value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = atoi(&string_argv[length + auto_inc]);
} else {
value = 0;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline float getCmdLineArgumentFloat(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
float value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = static_cast<float>(atof(&string_argv[length + auto_inc]));
} else {
value = 0.f;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline bool getCmdLineArgumentString(const int argc, const char **argv,
const char *string_ref,
char **string_retval) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
char *string_argv = const_cast<char *>(&argv[i][string_start]);
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
*string_retval = &string_argv[length + 1];
bFound = true;
continue;
}
}
}
if (!bFound) {
*string_retval = NULL;
}
return bFound;
}
//////////////////////////////////////////////////////////////////////////////
//! Find the path for a file assuming that
//! files are found in the searchPath.
//!
//! @return the path if succeeded, otherwise 0
//! @param filename name of the file
//! @param executable_path optional absolute path of the executable
//////////////////////////////////////////////////////////////////////////////
inline char *sdkFindFilePath(const char *filename,
const char *executable_path) {
// <executable_name> defines a variable that is replaced with the name of the
// executable
// Typical relative search paths to locate needed companion files (e.g. sample
// input data, or JIT source files) The origin for the relative search may be
// the .exe file, a .bat file launching an .exe, a browser .exe launching the
// .exe or .bat, etc
const char *searchPath[] = {
"./", // same dir
"./data/", // same dir
"../../../../Samples/<executable_name>/", // up 4 in tree
"../../../Samples/<executable_name>/", // up 3 in tree
"../../Samples/<executable_name>/", // up 2 in tree
"../../../../Samples/<executable_name>/data/", // up 4 in tree
"../../../Samples/<executable_name>/data/", // up 3 in tree
"../../Samples/<executable_name>/data/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/", // up 4 in tree
"../../../Samples/0_Introduction/<executable_name>/", // up 3 in tree
"../../Samples/0_Introduction/<executable_name>/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/", // up 4 in tree
"../../../Samples/1_Utilities/<executable_name>/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 4 in tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 3 in tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 2 in tree
"../../../../Samples/3_CUDA_Features/<executable_name>/", // up 4 in tree
"../../../Samples/3_CUDA_Features/<executable_name>/", // up 3 in tree
"../../Samples/3_CUDA_Features/<executable_name>/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 4 in tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 3 in tree
"../../Samples/4_CUDA_Libraries/<executable_name>/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/", // up 4 in tree
"../../../Samples/5_Domain_Specific/<executable_name>/", // up 3 in tree
"../../Samples/5_Domain_Specific/<executable_name>/", // up 2 in tree
"../../../../Samples/6_Performance/<executable_name>/", // up 4 in tree
"../../../Samples/6_Performance/<executable_name>/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/data/", // up 4 in tree
"../../../Samples/0_Introduction/<executable_name>/data/", // up 3 in tree
"../../Samples/0_Introduction/<executable_name>/data/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/data/", // up 4 in tree
"../../../Samples/1_Utilities/<executable_name>/data/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/data/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 4 in tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 3 in tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 2 in tree
"../../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 4 in tree
"../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 3 in tree
"../../Samples/3_CUDA_Features/<executable_name>/data/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 4 in tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 3 in tree
"../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 4 in tree
"../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 3 in tree
"../../Samples/5_Domain_Specific/<executable_name>/data/", // up 2 in tree
"../../../../Samples/6_Performance/<executable_name>/data/", // up 4 in tree
"../../../Samples/6_Performance/<executable_name>/data/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/data/", // up 2 in tree
"../../../../Common/data/", // up 4 in tree
"../../../Common/data/", // up 3 in tree
"../../Common/data/" // up 2 in tree
};
// Extract the executable name
std::string executable_name;
if (executable_path != 0) {
executable_name = std::string(executable_path);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// Windows path delimiter
size_t delimiter_pos = executable_name.find_last_of('\\');
executable_name.erase(0, delimiter_pos + 1);
if (executable_name.rfind(".exe") != std::string::npos) {
// we strip .exe, only if the .exe is found
executable_name.resize(executable_name.size() - 4);
}
#else
// Linux & OSX path delimiter
size_t delimiter_pos = executable_name.find_last_of('/');
executable_name.erase(0, delimiter_pos + 1);
#endif
}
// Loop over all search paths and return the first hit
for (unsigned int i = 0; i < sizeof(searchPath) / sizeof(char *); ++i) {
std::string path(searchPath[i]);
size_t executable_name_pos = path.find("<executable_name>");
// If there is executable_name variable in the searchPath
// replace it with the value
if (executable_name_pos != std::string::npos) {
if (executable_path != 0) {
path.replace(executable_name_pos, strlen("<executable_name>"),
executable_name);
} else {
// Skip this path entry if no executable argument is given
continue;
}
}
#ifdef _DEBUG
printf("sdkFindFilePath <%s> in %s\n", filename, path.c_str());
#endif
// Test if the file exists
path.append(filename);
FILE *fp;
FOPEN(fp, path.c_str(), "rb");
if (fp != NULL) {
fclose(fp);
// File found
// returning an allocated array here for backwards compatibility reasons
char *file_path = reinterpret_cast<char *>(malloc(path.length() + 1));
STRCPY(file_path, path.length() + 1, path.c_str());
return file_path;
}
if (fp) {
fclose(fp);
}
}
// File not found
printf("\nerror: sdkFindFilePath: file <%s> not found!\n", filename);
return 0;
}
#endif // COMMON_HELPER_STRING_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/Common/helper_image.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (image,bitmap)
#ifndef COMMON_HELPER_IMAGE_H_
#define COMMON_HELPER_IMAGE_H_
#include <assert.h>
#include <exception.h>
#include <math.h>
#include <stdint.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#ifndef MIN
#define MIN(a, b) ((a < b) ? a : b)
#endif
#ifndef MAX
#define MAX(a, b) ((a > b) ? a : b)
#endif
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
#include <helper_string.h>
// namespace unnamed (internal)
namespace helper_image_internal {
//! size of PGM file header
const unsigned int PGMHeaderSize = 0x40;
// types
//! Data converter from unsigned char / unsigned byte to type T
template <class T>
struct ConverterFromUByte;
//! Data converter from unsigned char / unsigned byte
template <>
struct ConverterFromUByte<unsigned char> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
float operator()(const unsigned char &val) {
return static_cast<unsigned char>(val);
}
};
//! Data converter from unsigned char / unsigned byte to float
template <>
struct ConverterFromUByte<float> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
float operator()(const unsigned char &val) {
return static_cast<float>(val) / 255.0f;
}
};
//! Data converter from unsigned char / unsigned byte to type T
template <class T>
struct ConverterToUByte;
//! Data converter from unsigned char / unsigned byte to unsigned int
template <>
struct ConverterToUByte<unsigned char> {
//! Conversion operator (essentially a passthru
//! @return converted value
//! @param val value to convert
unsigned char operator()(const unsigned char &val) { return val; }
};
//! Data converter from unsigned char / unsigned byte to unsigned int
template <>
struct ConverterToUByte<float> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
unsigned char operator()(const float &val) {
return static_cast<unsigned char>(val * 255.0f);
}
};
} // namespace helper_image_internal
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode)
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result != 0)
#endif
#ifndef SSCANF
#define SSCANF sscanf_s
#endif
#else
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode))
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result == NULL)
#endif
#ifndef SSCANF
#define SSCANF sscanf
#endif
#endif
inline bool __loadPPM(const char *file, unsigned char **data, unsigned int *w,
unsigned int *h, unsigned int *channels) {
FILE *fp = NULL;
if (FOPEN_FAIL(FOPEN(fp, file, "rb"))) {
std::cerr << "__LoadPPM() : Failed to open file: " << file << std::endl;
return false;
}
// check header
char header[helper_image_internal::PGMHeaderSize];
if (fgets(header, helper_image_internal::PGMHeaderSize, fp) == NULL) {
std::cerr << "__LoadPPM() : reading PGM header returned NULL" << std::endl;
return false;
}
if (strncmp(header, "P5", 2) == 0) {
*channels = 1;
} else if (strncmp(header, "P6", 2) == 0) {
*channels = 3;
} else {
std::cerr << "__LoadPPM() : File is not a PPM or PGM image" << std::endl;
*channels = 0;
return false;
}
// parse header, read maxval, width and height
unsigned int width = 0;
unsigned int height = 0;
unsigned int maxval = 0;
unsigned int i = 0;
while (i < 3) {
if (fgets(header, helper_image_internal::PGMHeaderSize, fp) == NULL) {
std::cerr << "__LoadPPM() : reading PGM header returned NULL"
<< std::endl;
return false;
}
if (header[0] == '#') {
continue;
}
if (i == 0) {
i += SSCANF(header, "%u %u %u", &width, &height, &maxval);
} else if (i == 1) {
i += SSCANF(header, "%u %u", &height, &maxval);
} else if (i == 2) {
i += SSCANF(header, "%u", &maxval);
}
}
// check if given handle for the data is initialized
if (NULL != *data) {
if (*w != width || *h != height) {
std::cerr << "__LoadPPM() : Invalid image dimensions." << std::endl;
}
} else {
*data = (unsigned char *)malloc(sizeof(unsigned char) * width * height *
*channels);
*w = width;
*h = height;
}
// read and close file
if (fread(*data, sizeof(unsigned char), width * height * *channels, fp) ==
0) {
std::cerr << "__LoadPPM() read data returned error." << std::endl;
}
fclose(fp);
return true;
}
template <class T>
inline bool sdkLoadPGM(const char *file, T **data, unsigned int *w,
unsigned int *h) {
unsigned char *idata = NULL;
unsigned int channels;
if (true != __loadPPM(file, &idata, w, h, &channels)) {
return false;
}
unsigned int size = *w * *h * channels;
// initialize mem if necessary
// the correct size is checked / set in loadPGMc()
if (NULL == *data) {
*data = reinterpret_cast<T *>(malloc(sizeof(T) * size));
}
// copy and cast data
std::transform(idata, idata + size, *data,
helper_image_internal::ConverterFromUByte<T>());
free(idata);
return true;
}
template <class T>
inline bool sdkLoadPPM4(const char *file, T **data, unsigned int *w,
unsigned int *h) {
unsigned char *idata = 0;
unsigned int channels;
if (__loadPPM(file, &idata, w, h, &channels)) {
// pad 4th component
int size = *w * *h;
// keep the original pointer
unsigned char *idata_orig = idata;
*data = reinterpret_cast<T *>(malloc(sizeof(T) * size * 4));
unsigned char *ptr = *data;
for (int i = 0; i < size; i++) {
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = 0;
}
free(idata_orig);
return true;
} else {
free(idata);
return false;
}
}
inline bool __savePPM(const char *file, unsigned char *data, unsigned int w,
unsigned int h, unsigned int channels) {
assert(NULL != data);
assert(w > 0);
assert(h > 0);
std::fstream fh(file, std::fstream::out | std::fstream::binary);
if (fh.bad()) {
std::cerr << "__savePPM() : Opening file failed." << std::endl;
return false;
}
if (channels == 1) {
fh << "P5\n";
} else if (channels == 3) {
fh << "P6\n";
} else {
std::cerr << "__savePPM() : Invalid number of channels." << std::endl;
return false;
}
fh << w << "\n" << h << "\n" << 0xff << std::endl;
for (unsigned int i = 0; (i < (w * h * channels)) && fh.good(); ++i) {
fh << data[i];
}
fh.flush();
if (fh.bad()) {
std::cerr << "__savePPM() : Writing data failed." << std::endl;
return false;
}
fh.close();
return true;
}
template <class T>
inline bool sdkSavePGM(const char *file, T *data, unsigned int w,
unsigned int h) {
unsigned int size = w * h;
unsigned char *idata = (unsigned char *)malloc(sizeof(unsigned char) * size);
std::transform(data, data + size, idata,
helper_image_internal::ConverterToUByte<T>());
// write file
bool result = __savePPM(file, idata, w, h, 1);
// cleanup
free(idata);
return result;
}
inline bool sdkSavePPM4ub(const char *file, unsigned char *data, unsigned int w,
unsigned int h) {
// strip 4th component
int size = w * h;
unsigned char *ndata =
(unsigned char *)malloc(sizeof(unsigned char) * size * 3);
unsigned char *ptr = ndata;
for (int i = 0; i < size; i++) {
*ptr++ = *data++;
*ptr++ = *data++;
*ptr++ = *data++;
data++;
}
bool result = __savePPM(file, ndata, w, h, 3);
free(ndata);
return result;
}
//////////////////////////////////////////////////////////////////////////////
//! Read file \filename and return the data
//! @return bool if reading the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data uninitialized pointer, returned initialized and pointing to
//! the data read
//! @param len number of data elements in data, -1 on error
//////////////////////////////////////////////////////////////////////////////
template <class T>
inline bool sdkReadFile(const char *filename, T **data, unsigned int *len,
bool verbose) {
// check input arguments
assert(NULL != filename);
assert(NULL != len);
// intermediate storage for the data read
std::vector<T> data_read;
// open file for reading
FILE *fh = NULL;
// check if filestream is valid
if (FOPEN_FAIL(FOPEN(fh, filename, "r"))) {
printf("Unable to open input file: %s\n", filename);
return false;
}
// read all data elements
T token;
while (!feof(fh)) {
fscanf(fh, "%f", &token);
data_read.push_back(token);
}
// the last element is read twice
data_read.pop_back();
fclose(fh);
// check if the given handle is already initialized
if (NULL != *data) {
if (*len != data_read.size()) {
std::cerr << "sdkReadFile() : Initialized memory given but "
<< "size mismatch with signal read "
<< "(data read / data init = " << (unsigned int)data_read.size()
<< " / " << *len << ")" << std::endl;
return false;
}
} else {
// allocate storage for the data read
*data = reinterpret_cast<T *>(malloc(sizeof(T) * data_read.size()));
// store signal size
*len = static_cast<unsigned int>(data_read.size());
}
// copy data
memcpy(*data, &data_read.front(), sizeof(T) * data_read.size());
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Read file \filename and return the data
//! @return bool if reading the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data uninitialized pointer, returned initialized and pointing to
//! the data read
//! @param len number of data elements in data, -1 on error
//////////////////////////////////////////////////////////////////////////////
template <class T>
inline bool sdkReadFileBlocks(const char *filename, T **data, unsigned int *len,
unsigned int block_num, unsigned int block_size,
bool verbose) {
// check input arguments
assert(NULL != filename);
assert(NULL != len);
// open file for reading
FILE *fh = fopen(filename, "rb");
if (fh == NULL && verbose) {
std::cerr << "sdkReadFile() : Opening file failed." << std::endl;
return false;
}
// check if the given handle is already initialized
// allocate storage for the data read
data[block_num] = reinterpret_cast<T *>(malloc(block_size));
// read all data elements
fseek(fh, block_num * block_size, SEEK_SET);
*len = fread(data[block_num], sizeof(T), block_size / sizeof(T), fh);
fclose(fh);
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Write a data file \filename
//! @return true if writing the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data data to write
//! @param len number of data elements in data, -1 on error
//! @param epsilon epsilon for comparison
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool sdkWriteFile(const char *filename, const T *data, unsigned int len,
const S epsilon, bool verbose, bool append = false) {
assert(NULL != filename);
assert(NULL != data);
// open file for writing
// if (append) {
std::fstream fh(filename, std::fstream::out | std::fstream::ate);
if (verbose) {
std::cerr << "sdkWriteFile() : Open file " << filename
<< " for write/append." << std::endl;
}
/* } else {
std::fstream fh(filename, std::fstream::out);
if (verbose) {
std::cerr << "sdkWriteFile() : Open file " << filename << " for
write." << std::endl;
}
}
*/
// check if filestream is valid
if (!fh.good()) {
if (verbose) {
std::cerr << "sdkWriteFile() : Opening file failed." << std::endl;
}
return false;
}
// first write epsilon
fh << "# " << epsilon << "\n";
// write data
for (unsigned int i = 0; (i < len) && (fh.good()); ++i) {
fh << data[i] << ' ';
}
// Check if writing succeeded
if (!fh.good()) {
if (verbose) {
std::cerr << "sdkWriteFile() : Writing file failed." << std::endl;
}
return false;
}
// file ends with nl
fh << std::endl;
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Compare two arrays of arbitrary type
//! @return true if \a reference and \a data are identical, otherwise false
//! @param reference timer_interface to the reference data / gold image
//! @param data handle to the computed data
//! @param len number of elements in reference and data
//! @param epsilon epsilon to use for the comparison
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool compareData(const T *reference, const T *data,
const unsigned int len, const S epsilon,
const float threshold) {
assert(epsilon >= 0);
bool result = true;
unsigned int error_count = 0;
for (unsigned int i = 0; i < len; ++i) {
float diff = static_cast<float>(reference[i]) - static_cast<float>(data[i]);
bool comp = (diff <= epsilon) && (diff >= -epsilon);
result &= comp;
error_count += !comp;
#if 0
if (!comp) {
std::cerr << "ERROR, i = " << i << ",\t "
<< reference[i] << " / "
<< data[i]
<< " (reference / data)\n";
}
#endif
}
if (threshold == 0.0f) {
return (result) ? true : false;
} else {
if (error_count) {
printf("%4.2f(%%) of bytes mismatched (count=%d)\n",
static_cast<float>(error_count) * 100 / static_cast<float>(len),
error_count);
}
return (len * threshold > error_count) ? true : false;
}
}
#ifndef __MIN_EPSILON_ERROR
#define __MIN_EPSILON_ERROR 1e-3f
#endif
//////////////////////////////////////////////////////////////////////////////
//! Compare two arrays of arbitrary type
//! @return true if \a reference and \a data are identical, otherwise false
//! @param reference handle to the reference data / gold image
//! @param data handle to the computed data
//! @param len number of elements in reference and data
//! @param epsilon epsilon to use for the comparison
//! @param epsilon threshold % of (# of bytes) for pass/fail
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool compareDataAsFloatThreshold(const T *reference, const T *data,
const unsigned int len, const S epsilon,
const float threshold) {
assert(epsilon >= 0);
// If we set epsilon to be 0, let's set a minimum threshold
float max_error = MAX((float)epsilon, __MIN_EPSILON_ERROR);
int error_count = 0;
bool result = true;
for (unsigned int i = 0; i < len; ++i) {
float diff =
fabs(static_cast<float>(reference[i]) - static_cast<float>(data[i]));
bool comp = (diff < max_error);
result &= comp;
if (!comp) {
error_count++;
}
}
if (threshold == 0.0f) {
if (error_count) {
printf("total # of errors = %d\n", error_count);
}
return (error_count == 0) ? true : false;
} else {
if (error_count) {
printf("%4.2f(%%) of bytes mismatched (count=%d)\n",
static_cast<float>(error_count) * 100 / static_cast<float>(len),
error_count);
}
return ((len * threshold > error_count) ? true : false);
}
}
inline void sdkDumpBin(void *data, unsigned int bytes, const char *filename) {
printf("sdkDumpBin: <%s>\n", filename);
FILE *fp;
FOPEN(fp, filename, "wb");
fwrite(data, bytes, 1, fp);
fflush(fp);
fclose(fp);
}
inline bool sdkCompareBin2BinUint(const char *src_file, const char *ref_file,
unsigned int nelements, const float epsilon,
const float threshold, char *exec_path) {
unsigned int *src_buffer, *ref_buffer;
FILE *src_fp = NULL, *ref_fp = NULL;
uint64_t error_count = 0;
size_t fsize = 0;
if (FOPEN_FAIL(FOPEN(src_fp, src_file, "rb"))) {
printf("compareBin2Bin <unsigned int> unable to open src_file: %s\n",
src_file);
error_count++;
}
char *ref_file_path = sdkFindFilePath(ref_file, exec_path);
if (ref_file_path == NULL) {
printf("compareBin2Bin <unsigned int> unable to find <%s> in <%s>\n",
ref_file, exec_path);
printf(">>> Check info.xml and [project//data] folder <%s> <<<\n",
ref_file);
printf("Aborting comparison!\n");
printf(" FAILED\n");
error_count++;
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
} else {
if (FOPEN_FAIL(FOPEN(ref_fp, ref_file_path, "rb"))) {
printf(
"compareBin2Bin <unsigned int>"
" unable to open ref_file: %s\n",
ref_file_path);
error_count++;
}
if (src_fp && ref_fp) {
src_buffer = (unsigned int *)malloc(nelements * sizeof(unsigned int));
ref_buffer = (unsigned int *)malloc(nelements * sizeof(unsigned int));
fsize = fread(src_buffer, nelements, sizeof(unsigned int), src_fp);
fsize = fread(ref_buffer, nelements, sizeof(unsigned int), ref_fp);
printf(
"> compareBin2Bin <unsigned int> nelements=%d,"
" epsilon=%4.2f, threshold=%4.2f\n",
nelements, epsilon, threshold);
printf(" src_file <%s>, size=%d bytes\n", src_file,
static_cast<int>(fsize));
printf(" ref_file <%s>, size=%d bytes\n", ref_file_path,
static_cast<int>(fsize));
if (!compareData<unsigned int, float>(ref_buffer, src_buffer, nelements,
epsilon, threshold)) {
error_count++;
}
fclose(src_fp);
fclose(ref_fp);
free(src_buffer);
free(ref_buffer);
} else {
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
}
}
if (error_count == 0) {
printf(" OK\n");
} else {
printf(" FAILURE: %d errors...\n", (unsigned int)error_count);
}
return (error_count == 0); // returns true if all pixels pass
}
inline bool sdkCompareBin2BinFloat(const char *src_file, const char *ref_file,
unsigned int nelements, const float epsilon,
const float threshold, char *exec_path) {
float *src_buffer = NULL, *ref_buffer = NULL;
FILE *src_fp = NULL, *ref_fp = NULL;
size_t fsize = 0;
uint64_t error_count = 0;
if (FOPEN_FAIL(FOPEN(src_fp, src_file, "rb"))) {
printf("compareBin2Bin <float> unable to open src_file: %s\n", src_file);
error_count = 1;
}
char *ref_file_path = sdkFindFilePath(ref_file, exec_path);
if (ref_file_path == NULL) {
printf("compareBin2Bin <float> unable to find <%s> in <%s>\n", ref_file,
exec_path);
printf(">>> Check info.xml and [project//data] folder <%s> <<<\n",
exec_path);
printf("Aborting comparison!\n");
printf(" FAILED\n");
error_count++;
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
} else {
if (FOPEN_FAIL(FOPEN(ref_fp, ref_file_path, "rb"))) {
printf("compareBin2Bin <float> unable to open ref_file: %s\n",
ref_file_path);
error_count = 1;
}
if (src_fp && ref_fp) {
src_buffer = reinterpret_cast<float *>(malloc(nelements * sizeof(float)));
ref_buffer = reinterpret_cast<float *>(malloc(nelements * sizeof(float)));
printf(
"> compareBin2Bin <float> nelements=%d, epsilon=%4.2f,"
" threshold=%4.2f\n",
nelements, epsilon, threshold);
fsize = fread(src_buffer, sizeof(float), nelements, src_fp);
printf(" src_file <%s>, size=%d bytes\n", src_file,
static_cast<int>(fsize * sizeof(float)));
fsize = fread(ref_buffer, sizeof(float), nelements, ref_fp);
printf(" ref_file <%s>, size=%d bytes\n", ref_file_path,
static_cast<int>(fsize * sizeof(float)));
if (!compareDataAsFloatThreshold<float, float>(
ref_buffer, src_buffer, nelements, epsilon, threshold)) {
error_count++;
}
fclose(src_fp);
fclose(ref_fp);
free(src_buffer);
free(ref_buffer);
} else {
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
}
}
if (error_count == 0) {
printf(" OK\n");
} else {
printf(" FAILURE: %d errors...\n", (unsigned int)error_count);
}
return (error_count == 0); // returns true if all pixels pass
}
inline bool sdkCompareL2fe(const float *reference, const float *data,
const unsigned int len, const float epsilon) {
assert(epsilon >= 0);
float error = 0;
float ref = 0;
for (unsigned int i = 0; i < len; ++i) {
float diff = reference[i] - data[i];
error += diff * diff;
ref += reference[i] * reference[i];
}
float normRef = sqrtf(ref);
if (fabs(ref) < 1e-7) {
#ifdef _DEBUG
std::cerr << "ERROR, reference l2-norm is 0\n";
#endif
return false;
}
float normError = sqrtf(error);
error = normError / normRef;
bool result = error < epsilon;
#ifdef _DEBUG
if (!result) {
std::cerr << "ERROR, l2-norm error " << error << " is greater than epsilon "
<< epsilon << "\n";
}
#endif
return result;
}
inline bool sdkLoadPPMub(const char *file, unsigned char **data,
unsigned int *w, unsigned int *h) {
unsigned int channels;
return __loadPPM(file, data, w, h, &channels);
}
inline bool sdkLoadPPM4ub(const char *file, unsigned char **data,
unsigned int *w, unsigned int *h) {
unsigned char *idata = 0;
unsigned int channels;
if (__loadPPM(file, &idata, w, h, &channels)) {
// pad 4th component
int size = *w * *h;
// keep the original pointer
unsigned char *idata_orig = idata;
*data = (unsigned char *)malloc(sizeof(unsigned char) * size * 4);
unsigned char *ptr = *data;
for (int i = 0; i < size; i++) {
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = 0;
}
free(idata_orig);
return true;
} else {
free(idata);
return false;
}
}
inline bool sdkComparePPM(const char *src_file, const char *ref_file,
const float epsilon, const float threshold,
bool verboseErrors) {
unsigned char *src_data, *ref_data;
uint64_t error_count = 0;
unsigned int ref_width, ref_height;
unsigned int src_width, src_height;
if (src_file == NULL || ref_file == NULL) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: src_file or ref_file is NULL."
" Aborting comparison\n";
}
return false;
}
if (verboseErrors) {
std::cerr << "> Compare (a)rendered: <" << src_file << ">\n";
std::cerr << "> (b)reference: <" << ref_file << ">\n";
}
if (sdkLoadPPM4ub(ref_file, &ref_data, &ref_width, &ref_height) != true) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: unable to load ref image file: " << ref_file
<< "\n";
}
return false;
}
if (sdkLoadPPM4ub(src_file, &src_data, &src_width, &src_height) != true) {
std::cerr << "PPMvsPPM: unable to load src image file: " << src_file
<< "\n";
return false;
}
if (src_height != ref_height || src_width != ref_width) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: source and ref size mismatch (" << src_width
<< "," << src_height << ")vs(" << ref_width << "," << ref_height
<< ")\n";
}
}
if (verboseErrors) {
std::cerr << "PPMvsPPM: comparing images size (" << src_width << ","
<< src_height << ") epsilon(" << epsilon << "), threshold("
<< threshold * 100 << "%)\n";
}
if (compareData(ref_data, src_data, src_width * src_height * 4, epsilon,
threshold) == false) {
error_count = 1;
}
if (error_count == 0) {
if (verboseErrors) {
std::cerr << " OK\n\n";
}
} else {
if (verboseErrors) {
std::cerr << " FAILURE! " << error_count << " errors...\n\n";
}
}
// returns true if all pixels pass
return (error_count == 0) ? true : false;
}
inline bool sdkComparePGM(const char *src_file, const char *ref_file,
const float epsilon, const float threshold,
bool verboseErrors) {
unsigned char *src_data = 0, *ref_data = 0;
uint64_t error_count = 0;
unsigned int ref_width, ref_height;
unsigned int src_width, src_height;
if (src_file == NULL || ref_file == NULL) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: src_file or ref_file is NULL."
" Aborting comparison\n";
}
return false;
}
if (verboseErrors) {
std::cerr << "> Compare (a)rendered: <" << src_file << ">\n";
std::cerr << "> (b)reference: <" << ref_file << ">\n";
}
if (sdkLoadPPMub(ref_file, &ref_data, &ref_width, &ref_height) != true) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: unable to load ref image file: " << ref_file
<< "\n";
}
return false;
}
if (sdkLoadPPMub(src_file, &src_data, &src_width, &src_height) != true) {
std::cerr << "PGMvsPGM: unable to load src image file: " << src_file
<< "\n";
return false;
}
if (src_height != ref_height || src_width != ref_width) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: source and ref size mismatch (" << src_width
<< "," << src_height << ")vs(" << ref_width << "," << ref_height
<< ")\n";
}
}
if (verboseErrors)
std::cerr << "PGMvsPGM: comparing images size (" << src_width << ","
<< src_height << ") epsilon(" << epsilon << "), threshold("
<< threshold * 100 << "%)\n";
if (compareData(ref_data, src_data, src_width * src_height, epsilon,
threshold) == false) {
error_count = 1;
}
if (error_count == 0) {
if (verboseErrors) {
std::cerr << " OK\n\n";
}
} else {
if (verboseErrors) {
std::cerr << " FAILURE! " << error_count << " errors...\n\n";
}
}
// returns true if all pixels pass
return (error_count == 0) ? true : false;
}
#endif // COMMON_HELPER_IMAGE_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/Common/exception.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* CUda UTility Library */
#ifndef COMMON_EXCEPTION_H_
#define COMMON_EXCEPTION_H_
// includes, system
#include <stdlib.h>
#include <exception>
#include <iostream>
#include <stdexcept>
#include <string>
//! Exception wrapper.
//! @param Std_Exception Exception out of namespace std for easy typing.
template <class Std_Exception>
class Exception : public Std_Exception {
public:
//! @brief Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const char *detailed = "-");
//! Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const std::string &detailed);
//! Destructor
virtual ~Exception() throw();
private:
//! Constructor, default (private)
Exception();
//! Constructor, standard
//! @param str string returned by what()
explicit Exception(const std::string &str);
};
////////////////////////////////////////////////////////////////////////////////
//! Exception handler function for arbitrary exceptions
//! @param ex exception to handle
////////////////////////////////////////////////////////////////////////////////
template <class Exception_Typ>
inline void handleException(const Exception_Typ &ex) {
std::cerr << ex.what() << std::endl;
exit(EXIT_FAILURE);
}
//! Convenience macros
//! Exception caused by dynamic program behavior, e.g. file does not exist
#define RUNTIME_EXCEPTION(msg) \
Exception<std::runtime_error>::throw_it(__FILE__, __LINE__, msg)
//! Logic exception in program, e.g. an assert failed
#define LOGIC_EXCEPTION(msg) \
Exception<std::logic_error>::throw_it(__FILE__, __LINE__, msg)
//! Out of range exception
#define RANGE_EXCEPTION(msg) \
Exception<std::range_error>::throw_it(__FILE__, __LINE__, msg)
////////////////////////////////////////////////////////////////////////////////
//! Implementation
// includes, system
#include <sstream>
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const char *detailed) {
std::stringstream s;
// Quiet heavy-weight but exceptions are not for
// performance / release versions
s << "Exception in file '" << file << "' in line " << line << "\n"
<< "Detailed description: " << detailed << "\n";
throw Exception(s.str());
}
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const std::string &msg) {
throw_it(file, line, msg.c_str());
}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, default (private).
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception() : Std_Exception("Unknown Exception.\n") {}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, standard (private).
//! String returned by what().
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception(const std::string &s) : Std_Exception(s) {}
////////////////////////////////////////////////////////////////////////////////
//! Destructor
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::~Exception() throw() {}
// functions, exported
#endif // COMMON_EXCEPTION_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/Common/helper_cuda.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions for initialization and error checking
#ifndef COMMON_HELPER_CUDA_H_
#define COMMON_HELPER_CUDA_H_
#pragma once
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <helper_string.h>
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// Note, it is required that your SDK sample to include the proper header
// files, please refer the CUDA examples for examples of the needed CUDA
// headers, which may change depending on which CUDA functions are used.
// CUDA Runtime error messages
#ifdef __DPCT_HPP__
static const char *_cudaGetErrorEnum(dpct::err0 error) {
/*
DPCT1009:16: SYCL uses exceptions to report errors and does not use the error
codes. The original code was commented out and a warning string was inserted.
You need to rewrite this code.
*/
return "cudaGetErrorName is not supported" /*cudaGetErrorName(error)*/;
}
#endif
#ifdef CUDA_DRIVER_API
// CUDA Driver API errors
static const char *_cudaGetErrorEnum(CUresult error) {
static char unknown[] = "<unknown>";
const char *ret = NULL;
cuGetErrorName(error, &ret);
return ret ? ret : unknown;
}
#endif
#ifdef CUBLAS_API_H_
// cuBLAS API errors
static const char *_cudaGetErrorEnum(cublasStatus_t error) {
switch (error) {
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
case CUBLAS_STATUS_NOT_SUPPORTED:
return "CUBLAS_STATUS_NOT_SUPPORTED";
case CUBLAS_STATUS_LICENSE_ERROR:
return "CUBLAS_STATUS_LICENSE_ERROR";
}
return "<unknown>";
}
#endif
#ifdef _CUFFT_H_
// cuFFT API errors
static const char *_cudaGetErrorEnum(cufftResult error) {
switch (error) {
case CUFFT_SUCCESS:
return "CUFFT_SUCCESS";
case CUFFT_INVALID_PLAN:
return "CUFFT_INVALID_PLAN";
case CUFFT_ALLOC_FAILED:
return "CUFFT_ALLOC_FAILED";
case CUFFT_INVALID_TYPE:
return "CUFFT_INVALID_TYPE";
case CUFFT_INVALID_VALUE:
return "CUFFT_INVALID_VALUE";
case CUFFT_INTERNAL_ERROR:
return "CUFFT_INTERNAL_ERROR";
case CUFFT_EXEC_FAILED:
return "CUFFT_EXEC_FAILED";
case CUFFT_SETUP_FAILED:
return "CUFFT_SETUP_FAILED";
case CUFFT_INVALID_SIZE:
return "CUFFT_INVALID_SIZE";
case CUFFT_UNALIGNED_DATA:
return "CUFFT_UNALIGNED_DATA";
case CUFFT_INCOMPLETE_PARAMETER_LIST:
return "CUFFT_INCOMPLETE_PARAMETER_LIST";
case CUFFT_INVALID_DEVICE:
return "CUFFT_INVALID_DEVICE";
case CUFFT_PARSE_ERROR:
return "CUFFT_PARSE_ERROR";
case CUFFT_NO_WORKSPACE:
return "CUFFT_NO_WORKSPACE";
case CUFFT_NOT_IMPLEMENTED:
return "CUFFT_NOT_IMPLEMENTED";
case CUFFT_LICENSE_ERROR:
return "CUFFT_LICENSE_ERROR";
case CUFFT_NOT_SUPPORTED:
return "CUFFT_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSPARSEAPI
// cuSPARSE API errors
static const char *_cudaGetErrorEnum(cusparseStatus_t error) {
switch (error) {
case CUSPARSE_STATUS_SUCCESS:
return "CUSPARSE_STATUS_SUCCESS";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "CUSPARSE_STATUS_NOT_INITIALIZED";
case CUSPARSE_STATUS_ALLOC_FAILED:
return "CUSPARSE_STATUS_ALLOC_FAILED";
case CUSPARSE_STATUS_INVALID_VALUE:
return "CUSPARSE_STATUS_INVALID_VALUE";
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "CUSPARSE_STATUS_ARCH_MISMATCH";
case CUSPARSE_STATUS_MAPPING_ERROR:
return "CUSPARSE_STATUS_MAPPING_ERROR";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "CUSPARSE_STATUS_EXECUTION_FAILED";
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "CUSPARSE_STATUS_INTERNAL_ERROR";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSOLVER_COMMON_H_
// cuSOLVER API errors
static const char *_cudaGetErrorEnum(cusolverStatus_t error) {
switch (error) {
case CUSOLVER_STATUS_SUCCESS:
return "CUSOLVER_STATUS_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED:
return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED:
return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE:
return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH:
return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_MAPPING_ERROR:
return "CUSOLVER_STATUS_MAPPING_ERROR";
case CUSOLVER_STATUS_EXECUTION_FAILED:
return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR:
return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case CUSOLVER_STATUS_NOT_SUPPORTED:
return "CUSOLVER_STATUS_NOT_SUPPORTED ";
case CUSOLVER_STATUS_ZERO_PIVOT:
return "CUSOLVER_STATUS_ZERO_PIVOT";
case CUSOLVER_STATUS_INVALID_LICENSE:
return "CUSOLVER_STATUS_INVALID_LICENSE";
}
return "<unknown>";
}
#endif
#ifdef CURAND_H_
// cuRAND API errors
static const char *_cudaGetErrorEnum(int error) {
switch (error) {
case 0:
return "CURAND_STATUS_SUCCESS";
case 100:
return "CURAND_STATUS_VERSION_MISMATCH";
case 101:
return "CURAND_STATUS_NOT_INITIALIZED";
case 102:
return "CURAND_STATUS_ALLOCATION_FAILED";
case 103:
return "CURAND_STATUS_TYPE_ERROR";
case 104:
return "CURAND_STATUS_OUT_OF_RANGE";
case 105:
return "CURAND_STATUS_LENGTH_NOT_MULTIPLE";
case 106:
return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case 201:
return "CURAND_STATUS_LAUNCH_FAILURE";
case 202:
return "CURAND_STATUS_PREEXISTING_FAILURE";
case 203:
return "CURAND_STATUS_INITIALIZATION_FAILED";
case 204:
return "CURAND_STATUS_ARCH_MISMATCH";
case 999:
return "CURAND_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NVJPEGAPI
// nvJPEG API errors
static const char *_cudaGetErrorEnum(nvjpegStatus_t error) {
switch (error) {
case NVJPEG_STATUS_SUCCESS:
return "NVJPEG_STATUS_SUCCESS";
case NVJPEG_STATUS_NOT_INITIALIZED:
return "NVJPEG_STATUS_NOT_INITIALIZED";
case NVJPEG_STATUS_INVALID_PARAMETER:
return "NVJPEG_STATUS_INVALID_PARAMETER";
case NVJPEG_STATUS_BAD_JPEG:
return "NVJPEG_STATUS_BAD_JPEG";
case NVJPEG_STATUS_JPEG_NOT_SUPPORTED:
return "NVJPEG_STATUS_JPEG_NOT_SUPPORTED";
case NVJPEG_STATUS_ALLOCATOR_FAILURE:
return "NVJPEG_STATUS_ALLOCATOR_FAILURE";
case NVJPEG_STATUS_EXECUTION_FAILED:
return "NVJPEG_STATUS_EXECUTION_FAILED";
case NVJPEG_STATUS_ARCH_MISMATCH:
return "NVJPEG_STATUS_ARCH_MISMATCH";
case NVJPEG_STATUS_INTERNAL_ERROR:
return "NVJPEG_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NV_NPPIDEFS_H
// NPP API errors
static const char *_cudaGetErrorEnum(NppStatus error) {
switch (error) {
case NPP_NOT_SUPPORTED_MODE_ERROR:
return "NPP_NOT_SUPPORTED_MODE_ERROR";
case NPP_ROUND_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ROUND_MODE_NOT_SUPPORTED_ERROR";
case NPP_RESIZE_NO_OPERATION_ERROR:
return "NPP_RESIZE_NO_OPERATION_ERROR";
case NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY:
return "NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_BAD_ARG_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFF_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECT_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUAD_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEM_ALLOC_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTO_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_INPUT:
return "NPP_INVALID_INPUT";
case NPP_POINTER_ERROR:
return "NPP_POINTER_ERROR";
case NPP_WARNING:
return "NPP_WARNING";
case NPP_ODD_ROI_WARNING:
return "NPP_ODD_ROI_WARNING";
#else
// These are for CUDA 5.5 or higher
case NPP_BAD_ARGUMENT_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFFICIENT_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECTANGLE_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUADRANGLE_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEMORY_ALLOCATION_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_HOST_POINTER_ERROR:
return "NPP_INVALID_HOST_POINTER_ERROR";
case NPP_INVALID_DEVICE_POINTER_ERROR:
return "NPP_INVALID_DEVICE_POINTER_ERROR";
#endif
case NPP_LUT_NUMBER_OF_LEVELS_ERROR:
return "NPP_LUT_NUMBER_OF_LEVELS_ERROR";
case NPP_TEXTURE_BIND_ERROR:
return "NPP_TEXTURE_BIND_ERROR";
case NPP_WRONG_INTERSECTION_ROI_ERROR:
return "NPP_WRONG_INTERSECTION_ROI_ERROR";
case NPP_NOT_EVEN_STEP_ERROR:
return "NPP_NOT_EVEN_STEP_ERROR";
case NPP_INTERPOLATION_ERROR:
return "NPP_INTERPOLATION_ERROR";
case NPP_RESIZE_FACTOR_ERROR:
return "NPP_RESIZE_FACTOR_ERROR";
case NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR:
return "NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_MEMFREE_ERR:
return "NPP_MEMFREE_ERR";
case NPP_MEMSET_ERR:
return "NPP_MEMSET_ERR";
case NPP_MEMCPY_ERR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERR:
return "NPP_MIRROR_FLIP_ERR";
#else
case NPP_MEMFREE_ERROR:
return "NPP_MEMFREE_ERROR";
case NPP_MEMSET_ERROR:
return "NPP_MEMSET_ERROR";
case NPP_MEMCPY_ERROR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERROR:
return "NPP_MIRROR_FLIP_ERROR";
#endif
case NPP_ALIGNMENT_ERROR:
return "NPP_ALIGNMENT_ERROR";
case NPP_STEP_ERROR:
return "NPP_STEP_ERROR";
case NPP_SIZE_ERROR:
return "NPP_SIZE_ERROR";
case NPP_NULL_POINTER_ERROR:
return "NPP_NULL_POINTER_ERROR";
case NPP_CUDA_KERNEL_EXECUTION_ERROR:
return "NPP_CUDA_KERNEL_EXECUTION_ERROR";
case NPP_NOT_IMPLEMENTED_ERROR:
return "NPP_NOT_IMPLEMENTED_ERROR";
case NPP_ERROR:
return "NPP_ERROR";
case NPP_SUCCESS:
return "NPP_SUCCESS";
case NPP_WRONG_INTERSECTION_QUAD_WARNING:
return "NPP_WRONG_INTERSECTION_QUAD_WARNING";
case NPP_MISALIGNED_DST_ROI_WARNING:
return "NPP_MISALIGNED_DST_ROI_WARNING";
case NPP_AFFINE_QUAD_INCORRECT_WARNING:
return "NPP_AFFINE_QUAD_INCORRECT_WARNING";
case NPP_DOUBLE_SIZE_WARNING:
return "NPP_DOUBLE_SIZE_WARNING";
case NPP_WRONG_INTERSECTION_ROI_WARNING:
return "NPP_WRONG_INTERSECTION_ROI_WARNING";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x6000
/* These are 6.0 or higher */
case NPP_LUT_PALETTE_BITSIZE_ERROR:
return "NPP_LUT_PALETTE_BITSIZE_ERROR";
case NPP_ZC_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ZC_MODE_NOT_SUPPORTED_ERROR";
case NPP_QUALITY_INDEX_ERROR:
return "NPP_QUALITY_INDEX_ERROR";
case NPP_CHANNEL_ORDER_ERROR:
return "NPP_CHANNEL_ORDER_ERROR";
case NPP_ZERO_MASK_VALUE_ERROR:
return "NPP_ZERO_MASK_VALUE_ERROR";
case NPP_NUMBER_OF_CHANNELS_ERROR:
return "NPP_NUMBER_OF_CHANNELS_ERROR";
case NPP_COI_ERROR:
return "NPP_COI_ERROR";
case NPP_DIVISOR_ERROR:
return "NPP_DIVISOR_ERROR";
case NPP_CHANNEL_ERROR:
return "NPP_CHANNEL_ERROR";
case NPP_STRIDE_ERROR:
return "NPP_STRIDE_ERROR";
case NPP_ANCHOR_ERROR:
return "NPP_ANCHOR_ERROR";
case NPP_MASK_SIZE_ERROR:
return "NPP_MASK_SIZE_ERROR";
case NPP_MOMENT_00_ZERO_ERROR:
return "NPP_MOMENT_00_ZERO_ERROR";
case NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR:
return "NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR";
case NPP_THRESHOLD_ERROR:
return "NPP_THRESHOLD_ERROR";
case NPP_CONTEXT_MATCH_ERROR:
return "NPP_CONTEXT_MATCH_ERROR";
case NPP_FFT_FLAG_ERROR:
return "NPP_FFT_FLAG_ERROR";
case NPP_FFT_ORDER_ERROR:
return "NPP_FFT_ORDER_ERROR";
case NPP_SCALE_RANGE_ERROR:
return "NPP_SCALE_RANGE_ERROR";
case NPP_DATA_TYPE_ERROR:
return "NPP_DATA_TYPE_ERROR";
case NPP_OUT_OFF_RANGE_ERROR:
return "NPP_OUT_OFF_RANGE_ERROR";
case NPP_DIVIDE_BY_ZERO_ERROR:
return "NPP_DIVIDE_BY_ZERO_ERROR";
case NPP_RANGE_ERROR:
return "NPP_RANGE_ERROR";
case NPP_NO_MEMORY_ERROR:
return "NPP_NO_MEMORY_ERROR";
case NPP_ERROR_RESERVED:
return "NPP_ERROR_RESERVED";
case NPP_NO_OPERATION_WARNING:
return "NPP_NO_OPERATION_WARNING";
case NPP_DIVIDE_BY_ZERO_WARNING:
return "NPP_DIVIDE_BY_ZERO_WARNING";
#endif
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x7000
/* These are 7.0 or higher */
case NPP_OVERFLOW_ERROR:
return "NPP_OVERFLOW_ERROR";
case NPP_CORRUPTED_DATA_ERROR:
return "NPP_CORRUPTED_DATA_ERROR";
#endif
}
return "<unknown>";
}
#endif
template <typename T>
void check(T result, char const *const func, const char *const file,
int const line) {
}
#ifdef __DPCT_HPP__
// This will output the proper CUDA error strings in the event
// that a CUDA host call returns an error
#define checkCudaErrors(val) check((val), #val, __FILE__, __LINE__)
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError(msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:17: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
// This will only print the proper error string when calling cudaGetLastError
// but not exit program incase error detected.
#define printLastCudaError(msg) __printLastCudaError(msg, __FILE__, __LINE__)
inline void __printLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:19: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
#endif
#ifndef MAX
#define MAX(a, b) (a > b ? a : b)
#endif
// Float To Int conversion
inline int ftoi(float value) {
return (value >= 0 ? static_cast<int>(value + 0.5)
: static_cast<int>(value - 0.5));
}
// Beginning of GPU Architecture definitions
inline int _ConvertSMVer2Cores(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the # of cores per SM
typedef struct dpct_type_113531 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{0x30, 192},
{0x32, 192},
{0x35, 192},
{0x37, 192},
{0x50, 128},
{0x52, 128},
{0x53, 128},
{0x60, 64},
{0x61, 128},
{0x62, 128},
{0x70, 64},
{0x72, 64},
{0x75, 64},
{0x80, 64},
{0x86, 128},
{0x87, 128},
{0x89, 128},
{0x90, 128},
{-1, -1}};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoCores for SM %d.%d is undefined."
" Default to use %d Cores/SM\n",
major, minor, nGpuArchCoresPerSM[index - 1].Cores);
return nGpuArchCoresPerSM[index - 1].Cores;
}
inline const char* _ConvertSMVer2ArchName(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the GPU Arch name)
typedef struct dpct_type_281558 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
const char* name;
} sSMtoArchName;
sSMtoArchName nGpuArchNameSM[] = {
{0x30, "Kepler"},
{0x32, "Kepler"},
{0x35, "Kepler"},
{0x37, "Kepler"},
{0x50, "Maxwell"},
{0x52, "Maxwell"},
{0x53, "Maxwell"},
{0x60, "Pascal"},
{0x61, "Pascal"},
{0x62, "Pascal"},
{0x70, "Volta"},
{0x72, "Xavier"},
{0x75, "Turing"},
{0x80, "Ampere"},
{0x86, "Ampere"},
{0x87, "Ampere"},
{0x89, "Ada"},
{0x90, "Hopper"},
{-1, "Graphics Device"}};
int index = 0;
while (nGpuArchNameSM[index].SM != -1) {
if (nGpuArchNameSM[index].SM == ((major << 4) + minor)) {
return nGpuArchNameSM[index].name;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoArchName for SM %d.%d is undefined."
" Default to use %s\n",
major, minor, nGpuArchNameSM[index - 1].name);
return nGpuArchNameSM[index - 1].name;
}
// end of GPU Architecture definitions
#ifdef __DPCT_HPP__
// General GPU Device CUDA Initialization
inline int gpuDeviceInit(int devID) {
int device_count;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr,
"gpuDeviceInit() CUDA error: "
"no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
if (devID < 0) {
devID = 0;
}
if (devID > device_count - 1) {
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n",
device_count);
fprintf(stderr,
">> gpuDeviceInit (-device=%d) is not a valid"
" GPU device. <<\n",
devID);
fprintf(stderr, "\n");
return -devID;
}
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:21: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(DPCT_CHECK_ERROR(
major = dpct::dev_mgr::instance().get_device(devID).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor = dpct::dev_mgr::instance().get_device(devID).get_minor_version()));
/*
DPCT1035:22: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
if (computeMode == 0) {
fprintf(stderr,
"Error: device is running in <Compute Mode "
"Prohibited>, no threads can use cudaSetDevice().\n");
return -1;
}
if (major < 1) {
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(EXIT_FAILURE);
}
/*
DPCT1093:23: The "devID" device may be not the one intended for use. Adjust
the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID)));
printf("gpuDeviceInit() CUDA Device [%d]: \"%s\n", devID, _ConvertSMVer2ArchName(major, minor));
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
inline int gpuGetMaxGflopsDeviceId() try {
int current_device = 0, sm_per_multiproc = 0;
int max_perf_device = 0;
int device_count = 0;
int devices_prohibited = 0;
uint64_t max_compute_perf = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the best CUDA capable GPU device
current_device = 0;
while (current_device < device_count) {
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:24: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version()));
// If this GPU is not running on Compute Mode prohibited,
// then we can add it to the list
/*
DPCT1035:25: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (computeMode != 0) {
if (major == 9999 && minor == 9999) {
sm_per_multiproc = 1;
} else {
sm_per_multiproc =
_ConvertSMVer2Cores(major, minor);
}
int multiProcessorCount = 0, clockRate = 0;
checkCudaErrors(
DPCT_CHECK_ERROR(multiProcessorCount = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_compute_units()));
dpct::err0 result =
DPCT_CHECK_ERROR(clockRate = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_clock_frequency());
uint64_t compute_perf = (uint64_t)multiProcessorCount * sm_per_multiproc * clockRate;
if (compute_perf > max_compute_perf) {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
} else {
devices_prohibited++;
}
++current_device;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" all devices have compute mode prohibited.\n");
exit(EXIT_FAILURE);
}
return max_perf_device;
}
catch (sycl::exception const &exc) {
std::cerr << exc.what() << "Exception caught at file:" << __FILE__
<< ", line:" << __LINE__ << std::endl;
std::exit(1);
}
// Initialization code to find the best CUDA Device
inline int findCudaDevice(int argc, const char **argv) {
int devID = 0;
// If the command-line has a device number specified, use it
if (checkCmdLineFlag(argc, argv, "device")) {
devID = getCmdLineArgumentInt(argc, argv, "device=");
if (devID < 0) {
printf("Invalid command line parameter\n ");
exit(EXIT_FAILURE);
} else {
devID = gpuDeviceInit(devID);
if (devID < 0) {
printf("exiting...\n");
exit(EXIT_FAILURE);
}
}
} else {
// Otherwise pick the device with highest Gflops/s
devID = gpuGetMaxGflopsDeviceId();
/*
DPCT1093:26: The "devID" device may be not the one intended for use. Adjust
the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID)));
int major = 0, minor = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
major =
dpct::dev_mgr::instance().get_device(devID).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor =
dpct::dev_mgr::instance().get_device(devID).get_minor_version()));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
devID, _ConvertSMVer2ArchName(major, minor), major, minor);
}
return devID;
}
inline int findIntegratedGPU() {
int current_device = 0;
int device_count = 0;
int devices_prohibited = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr, "CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the integrated GPU which is compute capable
while (current_device < device_count) {
int computeMode = -1, integrated = -1;
/*
DPCT1035:27: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(
DPCT_CHECK_ERROR(integrated = dpct::dev_mgr::instance()
.get_device(current_device)
.get_integrated()));
// If GPU is integrated and is not running on Compute Mode prohibited,
// then cuda can map to GLES resource
/*
DPCT1035:28: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (integrated && (computeMode != 0)) {
/*
DPCT1093:29: The "current_device" device may be not the one intended for
use. Adjust the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(current_device)));
int major = 0, minor = 0;
checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version()));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
current_device, _ConvertSMVer2ArchName(major, minor), major, minor);
return current_device;
} else {
devices_prohibited++;
}
current_device++;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"CUDA error:"
" No GLES-CUDA Interop capable GPU found.\n");
exit(EXIT_FAILURE);
}
return -1;
}
// General check for CUDA GPU SM Capabilities
inline bool checkCudaCapabilities(int major_version, int minor_version) {
int dev;
int major = 0, minor = 0;
checkCudaErrors(dev = dpct::dev_mgr::instance().current_device_id());
checkCudaErrors(DPCT_CHECK_ERROR(
major = dpct::dev_mgr::instance().get_device(dev).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor = dpct::dev_mgr::instance().get_device(dev).get_minor_version()));
if ((major > major_version) ||
(major == major_version &&
minor >= minor_version)) {
printf(" Device %d: <%16s >, Compute SM %d.%d detected\n", dev,
_ConvertSMVer2ArchName(major, minor), major, minor);
return true;
} else {
printf(
" No GPU device was found that can support "
"CUDA compute capability %d.%d.\n",
major_version, minor_version);
return false;
}
}
#endif
// end of CUDA Helper Functions
#endif // COMMON_HELPER_CUDA_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/Common/helper_functions.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (string parsing,
// timers, image helpers, etc)
#ifndef COMMON_HELPER_FUNCTIONS_H_
#define COMMON_HELPER_FUNCTIONS_H_
#ifdef WIN32
#pragma warning(disable : 4996)
#endif
// includes, project
#include <assert.h>
#include <exception.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
// includes, timer, string parsing, image helpers
#include <helper_image.h> // helper functions for image compare, dump, data comparisons
#include <helper_string.h> // helper functions for string parsing
#include <helper_timer.h> // helper functions for timers
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
#endif // COMMON_HELPER_FUNCTIONS_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/Samples/5_Domain_Specific/HSOpticalFlow/addKernel.dp.hpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include "common.h"
///////////////////////////////////////////////////////////////////////////////
/// \brief add two vectors of size _count_
///
/// CUDA kernel
/// \param[in] op1 term one
/// \param[in] op2 term two
/// \param[in] count vector size
/// \param[out] sum result
///////////////////////////////////////////////////////////////////////////////
void AddKernel(const float *op1, const float *op2, int count,
float *sum, const sycl::nd_item<3> &item_ct1) {
const int pos = item_ct1.get_local_id(2) +
item_ct1.get_group(2) * item_ct1.get_local_range(2);
if (pos >= count) return;
sum[pos] = op1[pos] + op2[pos];
}
///////////////////////////////////////////////////////////////////////////////
/// \brief add two vectors of size _count_
/// \param[in] op1 term one
/// \param[in] op2 term two
/// \param[in] count vector size
/// \param[out] sum result
///////////////////////////////////////////////////////////////////////////////
static void Add(const float *op1, const float *op2, int count, float *sum, sycl::queue q) {
sycl::range<3> threads(1, 1, 256);
sycl::range<3> blocks(1, 1, iDivUp(count, threads[2]));
q.parallel_for(
sycl::nd_range<3>(blocks * threads, threads),
[=](sycl::nd_item<3> item_ct1) {
AddKernel(op1, op2, count, sum, item_ct1);
});
}
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/Samples/5_Domain_Specific/HSOpticalFlow/flowSYCL.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef FLOW_CUDA_H
#define FLOW_CUDA_H
void ComputeFlowCUDA(
const float *I0, // source frame
const float *I1, // tracked frame
int width, // frame width
int height, // frame height
int stride, // row access stride
float alpha, // smoothness coefficient
int nLevels, // number of levels in pyramid
int nWarpIters, // number of warping iterations per pyramid level
int nSolverIters, // number of solver iterations (for linear system)
float *u, // output horizontal flow
float *v); // output vertical flow
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/Samples/5_Domain_Specific/HSOpticalFlow/flowGold.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef FLOW_GOLD_H
#define FLOW_GOLD_H
void ComputeFlowGold(
const float *I0, // source frame
const float *I1, // tracked frame
int width, // frame width
int height, // frame height
int stride, // row access stride
float alpha, // smoothness coefficient
int nLevels, // number of levels in pyramid
int nWarpIters, // number of warping iterations per pyramid level
int nIters, // number of solver iterations (for linear system)
float *u, // output horizontal flow
float *v); // output vertical flow
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/Samples/5_Domain_Specific/HSOpticalFlow/flowSYCL.dp.cpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include "common.h"
// include kernels
#include "downscaleKernel.dp.hpp"
#include "upscaleKernel.dp.hpp"
#include "warpingKernel.dp.hpp"
#include "derivativesKernel.dp.hpp"
#include "solverKernel.dp.hpp"
#include "addKernel.dp.hpp"
///////////////////////////////////////////////////////////////////////////////
/// \brief method logic
///
/// handles memory allocations, control flow
/// \param[in] I0 source image
/// \param[in] I1 tracked image
/// \param[in] width images width
/// \param[in] height images height
/// \param[in] stride images stride
/// \param[in] alpha degree of displacement field smoothness
/// \param[in] nLevels number of levels in a pyramid
/// \param[in] nWarpIters number of warping iterations per pyramid level
/// \param[in] nSolverIters number of solver iterations (Jacobi iterations)
/// \param[out] u horizontal displacement
/// \param[out] v vertical displacement
///////////////////////////////////////////////////////////////////////////////
void ComputeFlowCUDA(const float *I0, const float *I1, int width, int height,
int stride, float alpha, int nLevels, int nWarpIters,
int nSolverIters, float *u, float *v) {
printf("Computing optical flow on Device...\n");
sycl::queue q{aspect_selector(sycl::aspect::ext_intel_legacy_image), sycl::property::queue::in_order()};
std::cout << "\nRunning on "
<< q.get_device().get_info<sycl::info::device::name>() << "\n";
// pI0 and pI1 will hold device pointers
const float **pI0 = new const float *[nLevels];
const float **pI1 = new const float *[nLevels];
int *pW = new int[nLevels];
int *pH = new int[nLevels];
int *pS = new int[nLevels];
// device memory pointers
float *d_tmp;
float *d_du0;
float *d_dv0;
float *d_du1;
float *d_dv1;
float *d_Ix;
float *d_Iy;
float *d_Iz;
float *d_u;
float *d_v;
float *d_nu;
float *d_nv;
const int dataSize = stride * height * sizeof(float);
checkCudaErrors(DPCT_CHECK_ERROR(d_tmp = (float *)sycl::malloc_device(
dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(d_du0 = (float *)sycl::malloc_device(
dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(d_dv0 = (float *)sycl::malloc_device(
dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(d_du1 = (float *)sycl::malloc_device(
dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(d_dv1 = (float *)sycl::malloc_device(
dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(d_Ix = (float *)sycl::malloc_device(
dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(d_Iy = (float *)sycl::malloc_device(
dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(d_Iz = (float *)sycl::malloc_device(
dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(
d_u = (float *)sycl::malloc_device(dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(
d_v = (float *)sycl::malloc_device(dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(d_nu = (float *)sycl::malloc_device(
dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(d_nv = (float *)sycl::malloc_device(
dataSize, q)));
// prepare pyramid
int currentLevel = nLevels - 1;
// allocate GPU memory for input images
checkCudaErrors(DPCT_CHECK_ERROR(
*(pI0 + currentLevel) = (const float *)sycl::malloc_device(
dataSize, q)));
checkCudaErrors(DPCT_CHECK_ERROR(
*(pI1 + currentLevel) = (const float *)sycl::malloc_device(
dataSize, q)));
float *pI0_h =
(float *)sycl::malloc_host(stride * height * sizeof(sycl::float4), q);
float *I0_h = (float *)sycl::malloc_host(dataSize, q);
float *pI1_h =
(float *)sycl::malloc_host(stride * height * sizeof(sycl::float4), q);
float *I1_h = (float *)sycl::malloc_host(dataSize, q);
float *src_d0 =
(float *)sycl::malloc_device(stride * height * sizeof(sycl::float4), q);
float *src_d1 =
(float *)sycl::malloc_device(stride * height * sizeof(sycl::float4), q);
q.memcpy((void *)I0_h, I0, dataSize);
q.memcpy((void *)I1_h, I1, dataSize);
q.memcpy((void *)pI0[currentLevel], I0, dataSize);
q.memcpy((void *)pI1[currentLevel], I1, dataSize);
q.wait();
pW[currentLevel] = width;
pH[currentLevel] = height;
pS[currentLevel] = stride;
for (; currentLevel > 0; --currentLevel) {
int nw = pW[currentLevel] / 2;
int nh = pH[currentLevel] / 2;
int ns = iAlignUp(nw);
checkCudaErrors(DPCT_CHECK_ERROR(
*(pI0 + currentLevel - 1) = (const float *)sycl::malloc_device(
ns * nh * sizeof(float), q)));
checkCudaErrors(DPCT_CHECK_ERROR(
*(pI1 + currentLevel - 1) = (const float *)sycl::malloc_device(
ns * nh * sizeof(float), q)));
Downscale(pI0[currentLevel], pI0_h, I0_h, src_d0, pW[currentLevel],
pH[currentLevel], pS[currentLevel], nw, nh, ns,
(float *)pI0[currentLevel - 1], q);
Downscale(pI1[currentLevel], pI0_h, I0_h, src_d0, pW[currentLevel],
pH[currentLevel], pS[currentLevel], nw, nh, ns,
(float *)pI1[currentLevel - 1], q);
pW[currentLevel - 1] = nw;
pH[currentLevel - 1] = nh;
pS[currentLevel - 1] = ns;
}
checkCudaErrors(
DPCT_CHECK_ERROR(q.memset(d_u, 0, stride * height * sizeof(float))));
checkCudaErrors(
DPCT_CHECK_ERROR(q.memset(d_v, 0, stride * height * sizeof(float))));
checkCudaErrors(
DPCT_CHECK_ERROR(q.wait()));
// compute flow
for (; currentLevel < nLevels; ++currentLevel) {
for (int warpIter = 0; warpIter < nWarpIters; ++warpIter) {
checkCudaErrors(DPCT_CHECK_ERROR(
q.memset(d_du0, 0, dataSize)));
checkCudaErrors(DPCT_CHECK_ERROR(
q.memset(d_dv0, 0, dataSize)));
checkCudaErrors(DPCT_CHECK_ERROR(
q.memset(d_du1, 0, dataSize)));
checkCudaErrors(DPCT_CHECK_ERROR(
q.memset(d_dv1, 0, dataSize)));
// on current level we compute optical flow
// between frame 0 and warped frame 1
WarpImage(pI1[currentLevel], pI0_h, I0_h, src_d0, pW[currentLevel], pH[currentLevel],
pS[currentLevel], d_u, d_v, d_tmp, q);
ComputeDerivatives(pI0[currentLevel], d_tmp, pI0_h, pI1_h, I0_h, I1_h,
src_d0, src_d1, pW[currentLevel],
pH[currentLevel], pS[currentLevel], d_Ix, d_Iy, d_Iz, q);
for (int iter = 0; iter < nSolverIters; ++iter) {
SolveForUpdate(d_du0, d_dv0, d_Ix, d_Iy, d_Iz, pW[currentLevel],
pH[currentLevel], pS[currentLevel], alpha, d_du1, d_dv1, q);
Swap(d_du0, d_du1);
Swap(d_dv0, d_dv1);
}
// update u, v
Add(d_u, d_du0, pH[currentLevel] * pS[currentLevel], d_u, q);
Add(d_v, d_dv0, pH[currentLevel] * pS[currentLevel], d_v, q);
}
if (currentLevel != nLevels - 1) {
// prolongate solution
float scaleX = (float)pW[currentLevel + 1] / (float)pW[currentLevel];
Upscale(d_u, pI0_h, I0_h, src_d0, pW[currentLevel], pH[currentLevel], pS[currentLevel],
pW[currentLevel + 1], pH[currentLevel + 1], pS[currentLevel + 1],
scaleX, d_nu, q);
float scaleY = (float)pH[currentLevel + 1] / (float)pH[currentLevel];
Upscale(d_v, pI0_h, I0_h, src_d0, pW[currentLevel], pH[currentLevel], pS[currentLevel],
pW[currentLevel + 1], pH[currentLevel + 1], pS[currentLevel + 1],
scaleY, d_nv, q);
Swap(d_u, d_nu);
Swap(d_v, d_nv);
}
}
checkCudaErrors(DPCT_CHECK_ERROR(
q.memcpy(u, d_u, dataSize)));
checkCudaErrors(DPCT_CHECK_ERROR(
q.memcpy(v, d_v, dataSize)));
checkCudaErrors(
DPCT_CHECK_ERROR(q.wait()));
// cleanup
for (int i = 0; i < nLevels; ++i) {
checkCudaErrors(DPCT_CHECK_ERROR(
sycl::free((void *)pI0[i], q)));
checkCudaErrors(DPCT_CHECK_ERROR(
sycl::free((void *)pI1[i], q)));
}
delete[] pI0;
delete[] pI1;
delete[] pW;
delete[] pH;
delete[] pS;
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_tmp, q)));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_du0, q)));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_dv0, q)));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_du1, q)));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_dv1, q)));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_Ix, q)));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_Iy, q)));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_Iz, q)));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_nu, q)));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_nv, q)));
checkCudaErrors(DPCT_CHECK_ERROR(sycl::free(d_u, q)));
checkCudaErrors(DPCT_CHECK_ERROR(sycl::free(d_v, q)));
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/Samples/5_Domain_Specific/HSOpticalFlow/common.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
///////////////////////////////////////////////////////////////////////////////
// Header for common includes and utility functions
///////////////////////////////////////////////////////////////////////////////
#ifndef COMMON_H
#define COMMON_H
///////////////////////////////////////////////////////////////////////////////
// Common includes
///////////////////////////////////////////////////////////////////////////////
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <memory.h>
#include <math.h>
#include <helper_cuda.h>
///////////////////////////////////////////////////////////////////////////////
// Common constants
///////////////////////////////////////////////////////////////////////////////
const int StrideAlignment = 8;
///////////////////////////////////////////////////////////////////////////////
// Common functions
///////////////////////////////////////////////////////////////////////////////
// Align up n to the nearest multiple of m
inline int iAlignUp(int n, int m = StrideAlignment) {
int mod = n % m;
if (mod)
return n + m - mod;
else
return n;
}
// round up n/m
inline int iDivUp(int n, int m) { return (n + m - 1) / m; }
// swap two values
template <typename T>
inline void Swap(T &a, T &b) {
T t = a;
a = b;
b = t;
}
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/Samples/5_Domain_Specific/HSOpticalFlow/downscaleKernel.dp.hpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include "common.h"
///////////////////////////////////////////////////////////////////////////////
/// \brief downscale image
///
/// CUDA kernel, relies heavily on texture unit
/// \param[in] width image width
/// \param[in] height image height
/// \param[in] stride image stride
/// \param[out] out result
///////////////////////////////////////////////////////////////////////////////
void DownscaleKernel(int width, int height, int stride, float *out,
sycl::accessor<sycl::float4, 2, sycl::access::mode::read,
sycl::access::target::image>
tex_acc,
sycl::sampler texDesc,
const sycl::nd_item<3> &item_ct1) {
const int ix = item_ct1.get_local_id(2) +
item_ct1.get_group(2) * item_ct1.get_local_range(2);
const int iy = item_ct1.get_local_id(1) +
item_ct1.get_group(1) * item_ct1.get_local_range(1);
if (ix >= width || iy >= height) {
return;
}
int srcx = ix * 2;
int srcy = iy * 2;
auto inputCoords1 = sycl::float2(srcx + 0, srcy + 0);
auto inputCoords2 = sycl::float2(srcx + 0, srcy + 1);
auto inputCoords3 = sycl::float2(srcx + 1, srcy + 0);
auto inputCoords4 = sycl::float2(srcx + 1, srcy + 1);
out[ix + iy * stride] = 0.25f * (tex_acc.read(inputCoords1, texDesc)[0] +
tex_acc.read(inputCoords2, texDesc)[0] +
tex_acc.read(inputCoords3, texDesc)[0] +
tex_acc.read(inputCoords4, texDesc)[0]);
}
///////////////////////////////////////////////////////////////////////////////
/// \brief downscale image
///
/// \param[in] src image to downscale
/// \param[in] width image width
/// \param[in] height image height
/// \param[in] stride image stride
/// \param[out] out result
///////////////////////////////////////////////////////////////////////////////
static void Downscale(const float *src, float *pI0_h, float *I0_h, float *src_p, int width, int height, int stride,
int newWidth, int newHeight, int newStride, float *out, sycl::queue q) {
sycl::range<3> threads(1, 8, 32);
sycl::range<3> blocks(1, iDivUp(newHeight, threads[1]),
iDivUp(newWidth, threads[2]));
int dataSize = height * stride * sizeof(float);
q.memcpy(I0_h, src, dataSize).wait();
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
int index = i * stride + j;
pI0_h[index * 4 + 0] = I0_h[index];
pI0_h[index * 4 + 1] = pI0_h[index * 4 + 2] = pI0_h[index * 4 + 3] = 0.f;
}
}
q.memcpy(src_p, pI0_h, height * width * sizeof(sycl::float4)).wait();
auto texDescr = sycl::sampler(
sycl::coordinate_normalization_mode::unnormalized,
sycl::addressing_mode::clamp_to_edge, sycl::filtering_mode::nearest);
auto texFine = sycl::image<2>(src_p, sycl::image_channel_order::rgba,
sycl::image_channel_type::fp32,
sycl::range<2>(width, height),
sycl::range<1>(stride * sizeof(sycl::float4)));
q.submit([&](sycl::handler &cgh) {
auto tex_acc =
texFine.template get_access<sycl::float4,
sycl::access::mode::read>(cgh);
cgh.parallel_for(sycl::nd_range<3>(blocks * threads, threads),
[=](sycl::nd_item<3> item_ct1) {
DownscaleKernel(newWidth, newHeight, newStride, out,
tex_acc, texDescr, item_ct1);
});
});
} | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/Samples/5_Domain_Specific/HSOpticalFlow/main.cpp.dp.cpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
const static char *const sSDKsample = "HSOpticalFlow";
// CPU-GPU discrepancy threshold for self-test
const float THRESHOLD = 0.05f;
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include "common.h"
#include "flowGold.h"
#include "flowSYCL.h"
#include <helper_functions.h>
#include <cmath>
#include <chrono>
using Time = std::chrono::steady_clock;
using ms = std::chrono::milliseconds;
using float_ms = std::chrono::duration<float, ms::period>;
///////////////////////////////////////////////////////////////////////////////
/// \brief save optical flow in format described on vision.middlebury.edu/flow
/// \param[in] name output file name
/// \param[in] w optical flow field width
/// \param[in] h optical flow field height
/// \param[in] s optical flow field row stride
/// \param[in] u horizontal displacement
/// \param[in] v vertical displacement
///////////////////////////////////////////////////////////////////////////////
void WriteFloFile(const char *name, int w, int h, int s, const float *u,
const float *v) {
FILE *stream;
stream = fopen(name, "wb");
if (stream == 0) {
printf("Could not save flow to \"%s\"\n", name);
return;
}
float data = 202021.25f;
fwrite(&data, sizeof(float), 1, stream);
fwrite(&w, sizeof(w), 1, stream);
fwrite(&h, sizeof(h), 1, stream);
for (int i = 0; i < h; ++i) {
for (int j = 0; j < w; ++j) {
const int pos = j + i * s;
fwrite(u + pos, sizeof(float), 1, stream);
fwrite(v + pos, sizeof(float), 1, stream);
}
}
fclose(stream);
}
///////////////////////////////////////////////////////////////////////////////
/// \brief
/// load 4-channel unsigned byte image
/// and convert it to single channel FP32 image
/// \param[out] img_data pointer to raw image data
/// \param[out] img_w image width
/// \param[out] img_h image height
/// \param[out] img_s image row stride
/// \param[in] name image file name
/// \param[in] exePath executable file path
/// \return true if image is successfully loaded or false otherwise
///////////////////////////////////////////////////////////////////////////////
bool LoadImageAsFP32(float *&img_data, int &img_w, int &img_h, int &img_s,
const char *name, const char *exePath) {
printf("Loading \"%s\" ...\n", name);
char *name_ = sdkFindFilePath(name, exePath);
if (!name_) {
printf("File not found\n");
return false;
}
unsigned char *data = 0;
unsigned int w = 0, h = 0;
bool result = sdkLoadPPM4ub(name_, &data, &w, &h);
if (result == false) {
printf("Invalid file format\n");
return false;
}
img_w = w;
img_h = h;
img_s = iAlignUp(img_w);
img_data = new float[img_s * h];
// source is 4 channel image
const int widthStep = 4 * img_w;
for (int i = 0; i < img_h; ++i) {
for (int j = 0; j < img_w; ++j) {
img_data[j + i * img_s] = ((float)data[j * 4 + i * widthStep]) / 255.0f;
}
}
return true;
}
///////////////////////////////////////////////////////////////////////////////
/// \brief compare given flow field with gold (L1 norm)
/// \param[in] width optical flow field width
/// \param[in] height optical flow field height
/// \param[in] stride optical flow field row stride
/// \param[in] h_uGold horizontal displacement, gold
/// \param[in] h_vGold vertical displacement, gold
/// \param[in] h_u horizontal displacement
/// \param[in] h_v vertical displacement
/// \return true if discrepancy is lower than a given threshold
///////////////////////////////////////////////////////////////////////////////
bool CompareWithGold(int width, int height, int stride, const float *h_uGold,
const float *h_vGold, const float *h_u, const float *h_v) {
float error = 0.0f;
for (int i = 0; i < height; ++i) {
for (int j = 0; j < width; ++j) {
const int pos = j + i * stride;
error += fabsf(h_u[pos] - h_uGold[pos]) + fabsf(h_v[pos] - h_vGold[pos]);
}
}
error /= (float)(width * height);
printf("L1 error : %.6f\n", error);
return (error < 1.0f);
}
///////////////////////////////////////////////////////////////////////////////
/// application entry point
///////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
// welcome message
printf("%s Starting...\n\n", sSDKsample);
// find images
const char *const sourceFrameName = "frame10.ppm";
const char *const targetFrameName = "frame11.ppm";
// image dimensions
int width;
int height;
// row access stride
int stride;
// flow is computed from source image to target image
float *h_source; // source image, host memory
float *h_target; // target image, host memory
// load image from file
if (!LoadImageAsFP32(h_source, width, height, stride, sourceFrameName,
argv[0])) {
exit(EXIT_FAILURE);
}
if (!LoadImageAsFP32(h_target, width, height, stride, targetFrameName,
argv[0])) {
exit(EXIT_FAILURE);
}
// allocate host memory for CPU results
float *h_uGold = new float[stride * height];
float *h_vGold = new float[stride * height];
// allocate host memory for GPU results
float *h_u = new float[stride * height];
float *h_v = new float[stride * height];
// smoothness
// if image brightness is not within [0,1]
// this paramter should be scaled appropriately
const float alpha = 0.2f;
// number of pyramid levels
const int nLevels = 5;
// number of solver iterations on each level
const int nSolverIters = 500;
// number of warping iterations
const int nWarpIters = 3;
// start Host Timer
auto startGoldTime = Time::now();
ComputeFlowGold(h_source, h_target, width, height, stride, alpha, nLevels,
nWarpIters, nSolverIters, h_uGold, h_vGold);
// stop Host timer
auto stopGoldTime = Time::now();
// start Device Timer
auto startSYCLTime = Time::now();
ComputeFlowCUDA(h_source, h_target, width, height, stride, alpha, nLevels,
nWarpIters, nSolverIters, h_u, h_v);
// stop Device Timer
auto stopSYCLTime = Time::now();
auto Gold_duration =
std::chrono::duration_cast<float_ms>(stopGoldTime - startGoldTime)
.count();
printf("Processing time on CPU: %f (ms)\n", Gold_duration);
auto SYCL_duration =
std::chrono::duration_cast<float_ms>(stopSYCLTime - startSYCLTime)
.count();
printf("Processing time on Device: %f (ms)\n", SYCL_duration);
// compare results (L1 norm)
bool status =
CompareWithGold(width, height, stride, h_uGold, h_vGold, h_u, h_v);
WriteFloFile("FlowGPU.flo", width, height, stride, h_u, h_v);
WriteFloFile("FlowCPU.flo", width, height, stride, h_uGold, h_vGold);
// free resources
delete[] h_uGold;
delete[] h_vGold;
delete[] h_u;
delete[] h_v;
delete[] h_source;
delete[] h_target;
// report self-test status
exit(status ? EXIT_SUCCESS : EXIT_FAILURE);
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/Samples/5_Domain_Specific/HSOpticalFlow/flowGold.cpp.dp.cpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "common.h"
#include "flowGold.h"
#include <cmath>
///////////////////////////////////////////////////////////////////////////////
/// \brief host texture fetch
///
/// read from arbitrary position within image using bilinear interpolation
/// out of range coords are mirrored
/// \param[in] t texture raw data
/// \param[in] w texture width
/// \param[in] h texture height
/// \param[in] s texture stride
/// \param[in] x x coord of the point to fetch value at
/// \param[in] y y coord of the point to fetch value at
/// \return fetched value
///////////////////////////////////////////////////////////////////////////////
inline float Tex2D(const float *t, int w, int h, int s, float x, float y) {
// integer parts in floating point format
float intPartX, intPartY;
x -= 0.5f;
y -= 0.5f;
// get fractional parts of coordinates
float dx = fabsf(modff(x, &intPartX));
float dy = fabsf(modff(y, &intPartY));
// assume pixels are squares
// one of the corners
int ix0 = (int)intPartX;
int iy0 = (int)intPartY;
// mirror out-of-range position
if (ix0 < 0) ix0 = 0;
if (iy0 < 0) iy0 = 0;
if (ix0 >= w) ix0 = w - 1;
if (iy0 >= h) iy0 = h - 1;
// corner which is opposite to (ix0, iy0)
int ix1 = ix0 + 1;
int iy1 = iy0 + 1;
if (ix1 >= w) ix1 = w - 1;
if (iy1 >= h) iy1 = h - 1;
float res = t[ix0 + iy0 * s] * (1.0f - dx) * (1.0f - dy);
res += t[ix1 + iy0 * s] * dx * (1.0f - dy);
res += t[ix0 + iy1 * s] * (1.0f - dx) * dy;
res += t[ix1 + iy1 * s] * dx * dy;
return res;
}
///////////////////////////////////////////////////////////////////////////////
/// \brief host texture fetch
///
/// read specific texel value
/// out of range coords are mirrored
/// \param[in] t texture raw data
/// \param[in] w texture width
/// \param[in] h texture height
/// \param[in] s texture stride
/// \param[in] x x coord of the point to fetch value at
/// \param[in] y y coord of the point to fetch value at
/// \return fetched value
///////////////////////////////////////////////////////////////////////////////
inline float Tex2Di(const float *src, int w, int h, int s, int x, int y) {
if (x < 0) x = 0;
if (y < 0) y = 0;
if (x >= w) x = w - 1;
if (y >= h) y = h - 1;
return src[x + y * s];
}
///////////////////////////////////////////////////////////////////////////////
/// \brief resize image
/// \param[in] src image to downscale
/// \param[in] width image width
/// \param[in] height image height
/// \param[in] stride image stride
/// \param[in] newWidth image new width
/// \param[in] newHeight image new height
/// \param[in] newStride image new stride
/// \param[out] out downscaled image data
///////////////////////////////////////////////////////////////////////////////
static void Downscale(const float *src, int width, int height, int stride,
int newWidth, int newHeight, int newStride, float *out) {
for (int i = 0; i < newHeight; ++i) {
for (int j = 0; j < newWidth; ++j) {
const int srcX = j * 2;
const int srcY = i * 2;
// average 4 neighbouring pixels
float sum;
sum = Tex2Di(src, width, height, stride, srcX + 0, srcY + 0);
sum += Tex2Di(src, width, height, stride, srcX + 0, srcY + 1);
sum += Tex2Di(src, width, height, stride, srcX + 1, srcY + 0);
sum += Tex2Di(src, width, height, stride, srcX + 1, srcY + 1);
// normalize
sum *= 0.25f;
out[j + i * newStride] = sum;
}
}
}
///////////////////////////////////////////////////////////////////////////////
/// \brief upscale one component of a displacement field
/// \param[in] src field component to upscale
/// \param[in] width field current width
/// \param[in] height field current height
/// \param[in] stride field current stride
/// \param[in] newWidth field new width
/// \param[in] newHeight field new height
/// \param[in] newStride field new stride
/// \param[in] scale value scale factor (multiplier)
/// \param[out] out upscaled field component
///////////////////////////////////////////////////////////////////////////////
static void Upscale(const float *src, int width, int height, int stride,
int newWidth, int newHeight, int newStride, float scale,
float *out) {
for (int i = 0; i < newHeight; ++i) {
for (int j = 0; j < newWidth; ++j) {
// position within smaller image
float x = ((float)j - 0.5f) * 0.5f;
float y = ((float)i - 0.5f) * 0.5f;
out[j + i * newStride] = Tex2D(src, width, height, stride, x, y) * scale;
}
}
}
///////////////////////////////////////////////////////////////////////////////
/// \brief warp image with provided vector field
///
/// For each output pixel there is a vector which tells which pixel
/// from a source image should be mapped to this particular output
/// pixel.
/// It is assumed that images and the vector field have the same stride and
/// resolution.
/// \param[in] src source image
/// \param[in] w width
/// \param[in] h height
/// \param[in] s stride
/// \param[in] u horizontal displacement
/// \param[in] v vertical displacement
/// \param[out] out warped image
///////////////////////////////////////////////////////////////////////////////
static void WarpImage(const float *src, int w, int h, int s, const float *u,
const float *v, float *out) {
for (int i = 0; i < h; ++i) {
for (int j = 0; j < w; ++j) {
const int pos = j + i * s;
// warped coords
float x = (float)j + u[pos];
float y = (float)i + v[pos];
out[pos] = Tex2D(src, w, h, s, x, y);
}
}
}
///////////////////////////////////////////////////////////////////////////////
/// \brief computes image derivatives for a pair of images
/// \param[in] I0 source image
/// \param[in] I1 tracked image
/// \param[in] w images width
/// \param[in] h images height
/// \param[in] s images stride
/// \param[out] Ix x derivative
/// \param[out] Iy y derivative
/// \param[out] Iz temporal derivative
///////////////////////////////////////////////////////////////////////////////
static void ComputeDerivatives(const float *I0, const float *I1, int w, int h,
int s, float *Ix, float *Iy, float *Iz) {
for (int i = 0; i < h; ++i) {
for (int j = 0; j < w; ++j) {
const int pos = j + i * s;
float t0, t1;
// derivative filter is (1, -8, 0, 8, -1)/12
// x derivative
t0 = Tex2Di(I0, w, h, s, j - 2, i);
t0 -= Tex2Di(I0, w, h, s, j - 1, i) * 8.0f;
t0 += Tex2Di(I0, w, h, s, j + 1, i) * 8.0f;
t0 -= Tex2Di(I0, w, h, s, j + 2, i);
t0 /= 12.0f;
t1 = Tex2Di(I1, w, h, s, j - 2, i);
t1 -= Tex2Di(I1, w, h, s, j - 1, i) * 8.0f;
t1 += Tex2Di(I1, w, h, s, j + 1, i) * 8.0f;
t1 -= Tex2Di(I1, w, h, s, j + 2, i);
t1 /= 12.0f;
// spatial derivatives are averaged
Ix[pos] = (t0 + t1) * 0.5f;
// t derivative
Iz[pos] = I1[pos] - I0[pos];
// y derivative
t0 = Tex2Di(I0, w, h, s, j, i - 2);
t0 -= Tex2Di(I0, w, h, s, j, i - 1) * 8.0f;
t0 += Tex2Di(I0, w, h, s, j, i + 1) * 8.0f;
t0 -= Tex2Di(I0, w, h, s, j, i + 2);
t0 /= 12.0f;
t1 = Tex2Di(I1, w, h, s, j, i - 2);
t1 -= Tex2Di(I1, w, h, s, j, i - 1) * 8.0f;
t1 += Tex2Di(I1, w, h, s, j, i + 1) * 8.0f;
t1 -= Tex2Di(I1, w, h, s, j, i + 2);
t1 /= 12.0f;
Iy[pos] = (t0 + t1) * 0.5f;
}
}
}
///////////////////////////////////////////////////////////////////////////////
/// \brief one iteration of classical Horn-Schunck method
///
/// It is one iteration of Jacobi method for a corresponding linear system
/// \param[in] du0 current horizontal displacement approximation
/// \param[in] dv0 current vertical displacement approximation
/// \param[in] Ix image x derivative
/// \param[in] Iy image y derivative
/// \param[in] Iz temporal derivative
/// \param[in] w width
/// \param[in] h height
/// \param[in] s stride
/// \param[in] alpha degree of smoothness
/// \param[out] du1 new horizontal displacement approximation
/// \param[out] dv1 new vertical displacement approximation
///////////////////////////////////////////////////////////////////////////////
static void SolveForUpdate(const float *du0, const float *dv0, const float *Ix,
const float *Iy, const float *Iz, int w, int h,
int s, float alpha, float *du1, float *dv1) {
for (int i = 0; i < h; ++i) {
for (int j = 0; j < w; ++j) {
const int pos = j + i * s;
int left, right, up, down;
// handle borders
if (j != 0)
left = pos - 1;
else
left = pos;
if (j != w - 1)
right = pos + 1;
else
right = pos;
if (i != 0)
down = pos - s;
else
down = pos;
if (i != h - 1)
up = pos + s;
else
up = pos;
float sumU = (du0[left] + du0[right] + du0[up] + du0[down]) * 0.25f;
float sumV = (dv0[left] + dv0[right] + dv0[up] + dv0[down]) * 0.25f;
float frac = (Ix[pos] * sumU + Iy[pos] * sumV + Iz[pos]) /
(Ix[pos] * Ix[pos] + Iy[pos] * Iy[pos] + alpha);
du1[pos] = sumU - Ix[pos] * frac;
dv1[pos] = sumV - Iy[pos] * frac;
}
}
}
///////////////////////////////////////////////////////////////////////////////
/// \brief method logic
///
/// handles memory allocation and control flow
/// \param[in] I0 source image
/// \param[in] I1 tracked image
/// \param[in] width images width
/// \param[in] height images height
/// \param[in] stride images stride
/// \param[in] alpha degree of displacement field smoothness
/// \param[in] nLevels number of levels in a pyramid
/// \param[in] nWarpIters number of warping iterations per pyramid level
/// \param[in] nSolverIters number of solver iterations (Jacobi iterations)
/// \param[out] u horizontal displacement
/// \param[out] v vertical displacement
///////////////////////////////////////////////////////////////////////////////
void ComputeFlowGold(const float *I0, const float *I1, int width, int height,
int stride, float alpha, int nLevels, int nWarpIters,
int nSolverIters, float *u, float *v) {
printf("Computing optical flow on CPU...\n");
float *u0 = u;
float *v0 = v;
const float **pI0 = new const float *[nLevels];
const float **pI1 = new const float *[nLevels];
int *pW = new int[nLevels];
int *pH = new int[nLevels];
int *pS = new int[nLevels];
const int pixelCountAligned = height * stride;
float *tmp = new float[pixelCountAligned];
float *du0 = new float[pixelCountAligned];
float *dv0 = new float[pixelCountAligned];
float *du1 = new float[pixelCountAligned];
float *dv1 = new float[pixelCountAligned];
float *Ix = new float[pixelCountAligned];
float *Iy = new float[pixelCountAligned];
float *Iz = new float[pixelCountAligned];
float *nu = new float[pixelCountAligned];
float *nv = new float[pixelCountAligned];
// prepare pyramid
int currentLevel = nLevels - 1;
pI0[currentLevel] = I0;
pI1[currentLevel] = I1;
pW[currentLevel] = width;
pH[currentLevel] = height;
pS[currentLevel] = stride;
for (; currentLevel > 0; --currentLevel) {
int nw = pW[currentLevel] / 2;
int nh = pH[currentLevel] / 2;
int ns = iAlignUp(nw);
pI0[currentLevel - 1] = new float[ns * nh];
pI1[currentLevel - 1] = new float[ns * nh];
Downscale(pI0[currentLevel], pW[currentLevel], pH[currentLevel],
pS[currentLevel], nw, nh, ns, (float *)pI0[currentLevel - 1]);
Downscale(pI1[currentLevel], pW[currentLevel], pH[currentLevel],
pS[currentLevel], nw, nh, ns, (float *)pI1[currentLevel - 1]);
pW[currentLevel - 1] = nw;
pH[currentLevel - 1] = nh;
pS[currentLevel - 1] = ns;
}
// initial approximation
memset(u, 0, stride * height * sizeof(float));
memset(v, 0, stride * height * sizeof(float));
// compute flow
for (; currentLevel < nLevels; ++currentLevel) {
for (int warpIter = 0; warpIter < nWarpIters; ++warpIter) {
memset(du0, 0, pixelCountAligned * sizeof(float));
memset(dv0, 0, pixelCountAligned * sizeof(float));
memset(du1, 0, pixelCountAligned * sizeof(float));
memset(dv1, 0, pixelCountAligned * sizeof(float));
WarpImage(pI1[currentLevel], pW[currentLevel], pH[currentLevel],
pS[currentLevel], u, v, tmp);
// on current level we compute optical flow
// between frame 0 and warped frame 1
ComputeDerivatives(pI0[currentLevel], tmp, pW[currentLevel],
pH[currentLevel], pS[currentLevel], Ix, Iy, Iz);
for (int iter = 0; iter < nSolverIters; ++iter) {
SolveForUpdate(du0, dv0, Ix, Iy, Iz, pW[currentLevel], pH[currentLevel],
pS[currentLevel], alpha, du1, dv1);
Swap(du0, du1);
Swap(dv0, dv1);
}
// update u, v
for (int i = 0; i < pH[currentLevel] * pS[currentLevel]; ++i) {
u[i] += du0[i];
v[i] += dv0[i];
}
} // end for (int warpIter = 0; warpIter < nWarpIters; ++warpIter)
if (currentLevel != nLevels - 1) {
// prolongate solution
float scaleX = (float)pW[currentLevel + 1] / (float)pW[currentLevel];
Upscale(u, pW[currentLevel], pH[currentLevel], pS[currentLevel],
pW[currentLevel + 1], pH[currentLevel + 1], pS[currentLevel + 1],
scaleX, nu);
float scaleY = (float)pH[currentLevel + 1] / (float)pH[currentLevel];
Upscale(v, pW[currentLevel], pH[currentLevel], pS[currentLevel],
pW[currentLevel + 1], pH[currentLevel + 1], pS[currentLevel + 1],
scaleY, nv);
Swap(u, nu);
Swap(v, nv);
}
} // end for (; currentLevel < nLevels; ++currentLevel)
if (u != u0) {
// solution is not in the specified array
// copy
memcpy(u0, u, pixelCountAligned * sizeof(float));
memcpy(v0, v, pixelCountAligned * sizeof(float));
Swap(u, nu);
Swap(v, nv);
}
// cleanup
// last level is not being freed here
// because it refers to input images
for (int i = 0; i < nLevels - 1; ++i) {
delete[] pI0[i];
delete[] pI1[i];
}
delete[] pI0;
delete[] pI1;
delete[] pW;
delete[] pH;
delete[] pS;
delete[] tmp;
delete[] du0;
delete[] dv0;
delete[] du1;
delete[] dv1;
delete[] Ix;
delete[] Iy;
delete[] Iz;
delete[] nu;
delete[] nv;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/Samples/5_Domain_Specific/HSOpticalFlow/warpingKernel.dp.hpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include "common.h"
///////////////////////////////////////////////////////////////////////////////
/// \brief warp image with a given displacement field, CUDA kernel.
/// \param[in] width image width
/// \param[in] height image height
/// \param[in] stride image stride
/// \param[in] u horizontal displacement
/// \param[in] v vertical displacement
/// \param[out] out result
///////////////////////////////////////////////////////////////////////////////
void WarpingKernel(int width, int height, int stride, const float *u,
const float *v, float *out,
sycl::accessor<sycl::float4, 2, sycl::access::mode::read,
sycl::access::target::image>
texToWarp,
sycl::sampler texDesc,
const sycl::nd_item<3> &item_ct1) {
const int ix = item_ct1.get_local_id(2) +
item_ct1.get_group(2) * item_ct1.get_local_range(2);
const int iy = item_ct1.get_local_id(1) +
item_ct1.get_group(1) * item_ct1.get_local_range(1);
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float x = ((float)ix + u[pos]);
float y = ((float)iy + v[pos]);
auto inputCoord = sycl::float2(x, y);
out[pos] = texToWarp.read(inputCoord, texDesc)[0];
}
///////////////////////////////////////////////////////////////////////////////
/// \brief warp image with provided vector field, CUDA kernel wrapper.
///
/// For each output pixel there is a vector which tells which pixel
/// from a source image should be mapped to this particular output
/// pixel.
/// It is assumed that images and the vector field have the same stride and
/// resolution.
/// \param[in] src source image
/// \param[in] w width
/// \param[in] h height
/// \param[in] s stride
/// \param[in] u horizontal displacement
/// \param[in] v vertical displacement
/// \param[out] out warped image
///////////////////////////////////////////////////////////////////////////////
static void WarpImage(const float *src, float *pI0_h, float *I0_h, float *src_p, int w, int h, int s, const float *u,
const float *v, float *out, sycl::queue q) {
sycl::range<3> threads(1, 6, 32);
sycl::range<3> blocks(1, iDivUp(h, threads[1]), iDivUp(w, threads[2]));
int dataSize = s * h * sizeof(float);
q.memcpy(I0_h, src, dataSize).wait();
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
int index = i * s + j;
pI0_h[index * 4 + 0] = I0_h[index];
pI0_h[index * 4 + 1] = pI0_h[index * 4 + 2] = pI0_h[index * 4 + 3] = 0.f;
}
}
q.memcpy(src_p, pI0_h, s * h * sizeof(sycl::float4)).wait();
auto texDescr = sycl::sampler(
sycl::coordinate_normalization_mode::unnormalized,
sycl::addressing_mode::clamp_to_edge, sycl::filtering_mode::linear);
auto texToWarp =
sycl::image<2>(src_p, sycl::image_channel_order::rgba,
sycl::image_channel_type::fp32, sycl::range<2>(w, h),
sycl::range<1>(s * sizeof(sycl::float4)));
q.submit([&](sycl::handler &cgh) {
auto texToWarp_acc =
texToWarp.template get_access<sycl::float4,
sycl::access::mode::read>(cgh);
cgh.parallel_for(sycl::nd_range<3>(blocks * threads, threads),
[=](sycl::nd_item<3> item_ct1) {
WarpingKernel(w, h, s, u, v, out,
texToWarp_acc, texDescr, item_ct1);
});
});
} | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/Samples/5_Domain_Specific/HSOpticalFlow/derivativesKernel.dp.hpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include "common.h"
///////////////////////////////////////////////////////////////////////////////
/// \brief compute image derivatives
///
/// CUDA kernel, relies heavily on texture unit
/// \param[in] width image width
/// \param[in] height image height
/// \param[in] stride image stride
/// \param[out] Ix x derivative
/// \param[out] Iy y derivative
/// \param[out] Iz temporal derivative
///////////////////////////////////////////////////////////////////////////////
void ComputeDerivativesKernel(int width, int height, int stride, float *Ix,
float *Iy, float *Iz,
sycl::accessor<sycl::float4, 2, sycl::access::mode::read,
sycl::access::target::image>
texSource,
sycl::accessor<sycl::float4, 2, sycl::access::mode::read,
sycl::access::target::image>
texTarget,
sycl::sampler texDesc,
const sycl::nd_item<3> &item_ct1) {
const int ix = item_ct1.get_local_id(2) +
item_ct1.get_group(2) * item_ct1.get_local_range(2);
const int iy = item_ct1.get_local_id(1) +
item_ct1.get_group(1) * item_ct1.get_local_range(1);
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float t0, t1;
auto x_inputCoords1 = sycl::float2(ix - 2.0f, iy);
auto x_inputCoords2 = sycl::float2(ix - 1.0f, iy);
auto x_inputCoords3 = sycl::float2(ix + 1.0f, iy);
auto x_inputCoords4 = sycl::float2(ix + 2.0f, iy);
t0 = texSource.read(x_inputCoords1, texDesc)[0];
t0 -= texSource.read(x_inputCoords2, texDesc)[0] * 8.0f;
t0 += texSource.read(x_inputCoords3, texDesc)[0] * 8.0f;
t0 -= texSource.read(x_inputCoords4, texDesc)[0];
t0 /= 12.0f;
t1 = texTarget.read(x_inputCoords1, texDesc)[0];
t1 -= texTarget.read(x_inputCoords2, texDesc)[0] * 8.0f;
t1 += texTarget.read(x_inputCoords3, texDesc)[0] * 8.0f;
t1 -= texTarget.read(x_inputCoords4, texDesc)[0];
t1 /= 12.0f;
Ix[pos] = (t0 + t1) * 0.5f;
// t derivative
auto inputCoord = sycl::float2(ix, iy);
Iz[pos] = texTarget.read(inputCoord, texDesc)[0] -
texSource.read(inputCoord, texDesc)[0];
// y derivative
auto y_inputCoords1 = sycl::float2(ix, iy - 2.0f);
auto y_inputCoords2 = sycl::float2(ix, iy - 1.0f);
auto y_inputCoords3 = sycl::float2(ix, iy + 1.0f);
auto y_inputCoords4 = sycl::float2(ix, iy + 2.0f);
t0 = texSource.read(y_inputCoords1, texDesc)[0];
t0 -= texSource.read(y_inputCoords2, texDesc)[0] * 8.0f;
t0 += texSource.read(y_inputCoords3, texDesc)[0] * 8.0f;
t0 -= texSource.read(y_inputCoords4, texDesc)[0];
t0 /= 12.0f;
t1 = texTarget.read(y_inputCoords1, texDesc)[0];
t1 -= texTarget.read(y_inputCoords2, texDesc)[0] * 8.0f;
t1 += texTarget.read(y_inputCoords3, texDesc)[0] * 8.0f;
t1 -= texTarget.read(y_inputCoords4, texDesc)[0];
t1 /= 12.0f;
Iy[pos] = (t0 + t1) * 0.5f;
}
///////////////////////////////////////////////////////////////////////////////
/// \brief compute image derivatives
///
/// \param[in] I0 source image
/// \param[in] I1 tracked image
/// \param[in] w image width
/// \param[in] h image height
/// \param[in] s image stride
/// \param[out] Ix x derivative
/// \param[out] Iy y derivative
/// \param[out] Iz temporal derivative
///////////////////////////////////////////////////////////////////////////////
static void ComputeDerivatives(const float *I0, const float *I1, float *pI0_h,
float *pI1_h, float *I0_h, float *I1_h,
float *src_d0, float *src_d1, int w, int h,
int s, float *Ix, float *Iy, float *Iz, sycl::queue q) {
sycl::range<3> threads(1, 6, 32);
sycl::range<3> blocks(1, iDivUp(h, threads[1]), iDivUp(w, threads[2]));
int dataSize = s * h * sizeof(float);
q.memcpy(I0_h, I0, dataSize);
q.memcpy(I1_h, I1, dataSize);
q.wait();
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
int index = i * s + j;
pI0_h[index * 4 + 0] = I0_h[index];
pI0_h[index * 4 + 1] = pI0_h[index * 4 + 2] = pI0_h[index * 4 + 3] = 0.f;
}
}
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
int index = i * s + j;
pI1_h[index * 4 + 0] = I1_h[index];
pI1_h[index * 4 + 1] = pI1_h[index * 4 + 2] = pI1_h[index * 4 + 3] = 0.f;
}
}
q.memcpy(src_d0, pI0_h, s * h * sizeof(sycl::float4));
q.memcpy(src_d1, pI1_h, s * h * sizeof(sycl::float4));
q.wait();
auto texDescr = sycl::sampler(
sycl::coordinate_normalization_mode::unnormalized,
sycl::addressing_mode::clamp_to_edge, sycl::filtering_mode::nearest);
auto texSource =
sycl::image<2>(src_d0, sycl::image_channel_order::rgba,
sycl::image_channel_type::fp32, sycl::range<2>(w, h),
sycl::range<1>(s * sizeof(sycl::float4)));
auto texTarget =
sycl::image<2>(src_d1, sycl::image_channel_order::rgba,
sycl::image_channel_type::fp32, sycl::range<2>(w, h),
sycl::range<1>(s * sizeof(sycl::float4)));
q.submit([&](sycl::handler &cgh) {
auto texSource_acc =
texSource.template get_access<sycl::float4,
sycl::access::mode::read>(cgh);
auto texTarget_acc =
texTarget.template get_access<sycl::float4,
sycl::access::mode::read>(cgh);
cgh.parallel_for(
sycl::nd_range<3>(blocks * threads, threads),
[=](sycl::nd_item<3> item_ct1) {
ComputeDerivativesKernel(
w, h, s, Ix, Iy, Iz,
texSource_acc, texTarget_acc,
texDescr, item_ct1);
});
});
} | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/Samples/5_Domain_Specific/HSOpticalFlow/solverKernel.dp.hpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include "common.h"
///////////////////////////////////////////////////////////////////////////////
/// \brief one iteration of classical Horn-Schunck method, CUDA kernel.
///
/// It is one iteration of Jacobi method for a corresponding linear system.
/// Template parameters are describe CTA size
/// \param[in] du0 current horizontal displacement approximation
/// \param[in] dv0 current vertical displacement approximation
/// \param[in] Ix image x derivative
/// \param[in] Iy image y derivative
/// \param[in] Iz temporal derivative
/// \param[in] w width
/// \param[in] h height
/// \param[in] s stride
/// \param[in] alpha degree of smoothness
/// \param[out] du1 new horizontal displacement approximation
/// \param[out] dv1 new vertical displacement approximation
///////////////////////////////////////////////////////////////////////////////
template <int bx, int by>
void JacobiIteration(const float *du0, const float *dv0,
const float *Ix, const float *Iy,
const float *Iz, int w, int h, int s,
float alpha, float *du1, float *dv1,
const sycl::nd_item<3> &item_ct1,
volatile float *du, volatile float *dv) {
// Handle to thread block group
auto cta = item_ct1.get_group();
const int ix = item_ct1.get_local_id(2) +
item_ct1.get_group(2) * item_ct1.get_local_range(2);
const int iy = item_ct1.get_local_id(1) +
item_ct1.get_group(1) * item_ct1.get_local_range(1);
// position within global memory array
const int pos = sycl::min(ix, w - 1) + sycl::min(iy, h - 1) * s;
// position within shared memory array
const int shMemPos =
item_ct1.get_local_id(2) + 1 + (item_ct1.get_local_id(1) + 1) * (bx + 2);
// Load data to shared memory.
// load tile being processed
du[shMemPos] = du0[pos];
dv[shMemPos] = dv0[pos];
// load necessary neighbouring elements
// We clamp out-of-range coordinates.
// It is equivalent to mirroring
// because we access data only one step away from borders.
if (item_ct1.get_local_id(1) == 0) {
// beginning of the tile
const int bsx = item_ct1.get_group(2) * item_ct1.get_local_range(2);
const int bsy = item_ct1.get_group(1) * item_ct1.get_local_range(1);
// element position within matrix
int x, y;
// element position within linear array
// gm - global memory
// sm - shared memory
int gmPos, smPos;
/*
DPCT1064:30: Migrated min call is used in a macro/template definition and
may not be valid for all macro/template uses. Adjust the code.
*/
x = dpct::min((unsigned int)(bsx + item_ct1.get_local_id(2)), w - 1);
// row just below the tile
y = sycl::max(bsy - 1, 0);
gmPos = y * s + x;
smPos = item_ct1.get_local_id(2) + 1;
du[smPos] = du0[gmPos];
dv[smPos] = dv0[gmPos];
// row above the tile
y = sycl::min(bsy + by, h - 1);
smPos += (by + 1) * (bx + 2);
gmPos = y * s + x;
du[smPos] = du0[gmPos];
dv[smPos] = dv0[gmPos];
} else if (item_ct1.get_local_id(1) == 1) {
// beginning of the tile
const int bsx = item_ct1.get_group(2) * item_ct1.get_local_range(2);
const int bsy = item_ct1.get_group(1) * item_ct1.get_local_range(1);
// element position within matrix
int x, y;
// element position within linear array
// gm - global memory
// sm - shared memory
int gmPos, smPos;
/*
DPCT1064:31: Migrated min call is used in a macro/template definition and
may not be valid for all macro/template uses. Adjust the code.
*/
y = dpct::min((unsigned int)(bsy + item_ct1.get_local_id(2)), h - 1);
// column to the left
x = sycl::max(bsx - 1, 0);
smPos = bx + 2 + item_ct1.get_local_id(2) * (bx + 2);
gmPos = x + y * s;
// check if we are within tile
if (item_ct1.get_local_id(2) < by) {
du[smPos] = du0[gmPos];
dv[smPos] = dv0[gmPos];
// column to the right
x = sycl::min(bsx + bx, w - 1);
gmPos = y * s + x;
smPos += bx + 1;
du[smPos] = du0[gmPos];
dv[smPos] = dv0[gmPos];
}
}
/*
DPCT1065:13: Consider replacing sycl::nd_item::barrier() with
sycl::nd_item::barrier(sycl::access::fence_space::local_space) for better
performance if there is no access to global memory.
*/
item_ct1.barrier();
if (ix >= w || iy >= h) return;
// now all necessary data are loaded to shared memory
int left, right, up, down;
left = shMemPos - 1;
right = shMemPos + 1;
up = shMemPos + bx + 2;
down = shMemPos - bx - 2;
float sumU = (du[left] + du[right] + du[up] + du[down]) * 0.25f;
float sumV = (dv[left] + dv[right] + dv[up] + dv[down]) * 0.25f;
float frac = (Ix[pos] * sumU + Iy[pos] * sumV + Iz[pos]) /
(Ix[pos] * Ix[pos] + Iy[pos] * Iy[pos] + alpha);
du1[pos] = sumU - Ix[pos] * frac;
dv1[pos] = sumV - Iy[pos] * frac;
}
///////////////////////////////////////////////////////////////////////////////
/// \brief one iteration of classical Horn-Schunck method, CUDA kernel wrapper.
///
/// It is one iteration of Jacobi method for a corresponding linear system.
/// \param[in] du0 current horizontal displacement approximation
/// \param[in] dv0 current vertical displacement approximation
/// \param[in] Ix image x derivative
/// \param[in] Iy image y derivative
/// \param[in] Iz temporal derivative
/// \param[in] w width
/// \param[in] h height
/// \param[in] s stride
/// \param[in] alpha degree of smoothness
/// \param[out] du1 new horizontal displacement approximation
/// \param[out] dv1 new vertical displacement approximation
///////////////////////////////////////////////////////////////////////////////
static void SolveForUpdate(const float *du0, const float *dv0, const float *Ix,
const float *Iy, const float *Iz, int w, int h,
int s, float alpha, float *du1, float *dv1, sycl::queue q) {
// CTA size
sycl::range<3> threads(1, 6, 32);
// grid size
sycl::range<3> blocks(1, iDivUp(h, threads[1]), iDivUp(w, threads[2]));
/*
DPCT1049:14: The work-group size passed to the SYCL kernel may exceed the
limit. To get the device limit, query info::device::max_work_group_size.
Adjust the work-group size if needed.
*/
q.submit([&](sycl::handler &cgh) {
sycl::local_accessor<float, 1> du_acc_ct1(
sycl::range<1>((32 + 2) * (6 + 2)), cgh);
sycl::local_accessor<float, 1> dv_acc_ct1(
sycl::range<1>((32 + 2) * (6 + 2)), cgh);
cgh.parallel_for(sycl::nd_range<3>(blocks * threads, threads),
[=](sycl::nd_item<3> item_ct1) {
JacobiIteration<32, 6>(du0, dv0, Ix, Iy, Iz, w, h, s,
alpha, du1, dv1, item_ct1,
du_acc_ct1.get_pointer(),
dv_acc_ct1.get_pointer());
});
});
}
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/Samples/5_Domain_Specific/HSOpticalFlow/upscaleKernel.dp.hpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include "common.h"
///////////////////////////////////////////////////////////////////////////////
/// \brief upscale one component of a displacement field, CUDA kernel
/// \param[in] width field width
/// \param[in] height field height
/// \param[in] stride field stride
/// \param[in] scale scale factor (multiplier)
/// \param[out] out result
///////////////////////////////////////////////////////////////////////////////
void UpscaleKernel(int width, int height, int stride, float scale, float *out,
sycl::accessor<sycl::float4, 2, sycl::access::mode::read,
sycl::access::target::image>
texCoarse_acc,
sycl::sampler texDesc,
const sycl::nd_item<3> &item_ct1) {
const int ix = item_ct1.get_local_id(2) +
item_ct1.get_group(2) * item_ct1.get_local_range(2);
const int iy = item_ct1.get_local_id(1) +
item_ct1.get_group(1) * item_ct1.get_local_range(1);
if (ix >= width || iy >= height) return;
float x = ((float)ix - 0.5f) * 0.5f;
float y = ((float)iy - 0.5f) * 0.5f;
auto inputCoord = sycl::float2(x, y);
// exploit hardware interpolation
// and scale interpolated vector to match next pyramid level resolution
out[ix + iy * stride] = texCoarse_acc.read(inputCoord, texDesc)[0] * scale;
}
///////////////////////////////////////////////////////////////////////////////
/// \brief upscale one component of a displacement field, kernel wrapper
/// \param[in] src field component to upscale
/// \param[in] width field current width
/// \param[in] height field current height
/// \param[in] stride field current stride
/// \param[in] newWidth field new width
/// \param[in] newHeight field new height
/// \param[in] newStride field new stride
/// \param[in] scale value scale factor (multiplier)
/// \param[out] out upscaled field component
///////////////////////////////////////////////////////////////////////////////
static void Upscale(const float *src, float *pI0_h, float *I0_h, float *src_p, int width, int height, int stride,
int newWidth, int newHeight, int newStride, float scale,
float *out, sycl::queue q) {
sycl::range<3> threads(1, 8, 32);
sycl::range<3> blocks(1, iDivUp(newHeight, threads[1]),
iDivUp(newWidth, threads[2]));
int dataSize = stride * height * sizeof(float);
q.memcpy(I0_h, src, dataSize).wait();
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
int index = i * stride + j;
pI0_h[index * 4 + 0] = I0_h[index];
pI0_h[index * 4 + 1] = pI0_h[index * 4 + 2] = pI0_h[index * 4 + 3] = 0.f;
}
}
q.memcpy(src_p, pI0_h, height * stride * sizeof(sycl::float4)).wait();
auto texDescr = sycl::sampler(
sycl::coordinate_normalization_mode::unnormalized,
sycl::addressing_mode::clamp_to_edge, sycl::filtering_mode::linear);
auto texCoarse = sycl::image<2>(
src_p, sycl::image_channel_order::rgba,
sycl::image_channel_type::fp32, sycl::range<2>(width, height),
sycl::range<1>(stride * sizeof(sycl::float4)));
q.submit([&](sycl::handler &cgh) {
auto texCoarse_acc =
texCoarse.template get_access<sycl::float4,
sycl::access::mode::read>(cgh);
cgh.parallel_for(sycl::nd_range<3>(blocks * threads, threads),
[=](sycl::nd_item<3> item_ct1) {
UpscaleKernel(newWidth, newHeight, newStride, scale, out,
texCoarse_acc, texDescr, item_ct1);
});
});
} | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/ccl_utils.hpp | //==---- ccl_utils.hpp----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_CCL_UTILS_HPP__
#define __DPCT_CCL_UTILS_HPP__
#include <sycl/sycl.hpp>
#include <oneapi/ccl.hpp>
#include <unordered_map>
#include <memory>
#include "device.hpp"
namespace dpct {
namespace ccl {
namespace detail {
/// Get stored kvs with specified kvs address.
inline std::shared_ptr<oneapi::ccl::kvs> &
get_kvs(const oneapi::ccl::kvs::address_type &addr) {
struct hash {
std::size_t operator()(const oneapi::ccl::kvs::address_type &in) const {
return std::hash<std::string_view>()(std::string_view(in.data(), in.size()));
}
};
static std::unordered_map<oneapi::ccl::kvs::address_type,
std::shared_ptr<oneapi::ccl::kvs>, hash>
kvs_map;
return kvs_map[addr];
}
/// Help class to init ccl environment.
class ccl_init_helper {
public:
ccl_init_helper() { oneapi::ccl::init(); }
};
} // namespace detail
/// Get concatenated library version as an integer.
static inline int get_version() {
oneapi::ccl::init();
auto ver = oneapi::ccl::get_library_version();
return ver.major * 10000 + ver.minor * 100 + ver.update;
}
/// Create main kvs and return its address.
static inline oneapi::ccl::kvs::address_type create_kvs_address() {
oneapi::ccl::init();
auto ptr = oneapi::ccl::create_main_kvs();
auto addr = ptr->get_address();
detail::get_kvs(addr) = ptr;
return addr;
}
/// Get stored kvs with /p addr if exist. Otherwise, create kvs with /p addr.
static inline std::shared_ptr<oneapi::ccl::kvs>
create_kvs(const oneapi::ccl::kvs::address_type &addr) {
oneapi::ccl::init();
auto &ptr = detail::get_kvs(addr);
if (!ptr)
ptr = oneapi::ccl::create_kvs(addr);
return ptr;
}
/// dpct communicator extension
class communicator_wrapper : public dpct::ccl::detail::ccl_init_helper {
public:
communicator_wrapper(
int size, int rank, oneapi::ccl::kvs::address_type id,
const oneapi::ccl::comm_attr &attr = oneapi::ccl::default_comm_attr)
: _device_comm(oneapi::ccl::create_device(
static_cast<sycl::device &>(dpct::get_current_device()))),
_context_comm(oneapi::ccl::create_context(dpct::get_default_context())),
_comm(oneapi::ccl::create_communicator(
size, rank, _device_comm, _context_comm, dpct::ccl::create_kvs(id),
attr)) {
_queue_init = false;
_ccl_stream_ptr = nullptr;
}
~communicator_wrapper() {
delete _ccl_stream_ptr;
};
/// Return the rank in a oneapi::ccl::communicator
/// \returns The rank corresponding to communicator object
int rank() const {
return _comm.rank();
}
/// Retrieves the number of rank in oneapi::ccl::communicator
/// \returns The number of the ranks
int size() const {
return _comm.size();
}
/// Return underlying native device, which was used in oneapi::ccl::communicator
sycl::device get_device() const {
return _comm.get_device().get_native();
}
/// \brief allreduce is a collective communication operation that performs the global reduction operation
/// on values from all ranks of communicator and distributes the result back to all ranks.
/// \param send_buf the buffer with @c count elements of @c dtype that stores local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result, must have the same dimension as @c send_buf
/// \param count the number of elements of type @c dtype in @c send_buf and @c recv_buf
/// \param dtype the datatype of elements in @c send_buf and @c recv_buf
/// \param rtype the type of the reduction operation to be applied
/// \param queue_ptr a sycl::queue ptr associated with the operation
/// \return @ref void
void allreduce(const void *sendbuff, void *recvbuff, size_t count,
oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype,
sycl::queue *queue_ptr) {
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::allreduce(sendbuff, recvbuff, count, dtype, rtype,
_comm, stream);
},
queue_ptr);
}
/// \brief reduce is a collective communication operation that performs the
/// global reduction operation on values from all ranks of the communicator
/// and returns the result to the root rank.
/// \param send_buf the buffer with @c count elements of @c dtype that stores
/// local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result,
/// must have the same dimension as @c send_buf
/// \param count the number of elements of type @c dtype in @c send_buf and @c recv_buf
/// \param dtype the datatype of elements in @c send_buf and @c recv_buf
/// \param root the rank that gets the result of reduction
/// \param rtype the type of the reduction operation to be applied
/// \param queue_ptr a sycl::queue ptr associated with the operation
/// \return @ref void
void reduce(const void *sendbuff, void *recvbuff, size_t count,
oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype,
int root, sycl::queue *queue_ptr) {
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::reduce(sendbuff, recvbuff, count, dtype, rtype,
root, _comm, stream);
},
queue_ptr);
}
/// \brief broadcast is a collective communication operation that broadcasts data
/// from one rank of communicator (denoted as root) to all other ranks.
/// Only support in-place operation
/// \param send_buf the buffer with @c count elements of @c dtype that stores
/// local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result
/// \param count the number of elements of type @c dtype in @c buf
/// \param dtype thedatatype of elements in @c buf
/// \param root the rank that broadcasts @c buf
/// \param queue_ptr a sycl::queue ptr associated with the operation
/// \return @ref void
void broadcast(void *sendbuff, void *recvbuff, size_t count,
oneapi::ccl::datatype dtype, int root,
sycl::queue *queue_ptr) {
if (sendbuff != recvbuff) {
throw std::runtime_error(
"oneCCL broadcast only support in-place operation. "
"send_buf and recv_buf must be same.");
return;
}
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::broadcast(recvbuff, count, dtype, root, _comm,
stream);
},
queue_ptr);
}
/// \brief reduce_scatter is a collective communication operation that performs the global reduction operation
/// on values from all ranks of the communicator and scatters the result in blocks back to all ranks.
/// \param send_buf the buffer with @c count elements of @c dtype that stores local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result, must have the same dimension as @c send_buf
/// \param recv_count the number of elements of type @c dtype in receive block
/// \param dtype the datatype of elements in @c send_buf and @c recv_buf
/// \param rtype the type of the reduction operation to be applied
/// \param queue_ptr a sycl::queue ptr associated with the operation
/// \return @ref void
void reduce_scatter(const void *sendbuff, void *recvbuff, size_t recv_count,
oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype,
sycl::queue *queue_ptr) {
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::reduce_scatter(sendbuff, recvbuff, recv_count,
dtype, rtype, _comm, stream);
},
queue_ptr);
}
private:
oneapi::ccl::device _device_comm;
oneapi::ccl::context _context_comm;
oneapi::ccl::communicator _comm;
sycl::queue _queue;
bool _queue_init;
oneapi::ccl::stream *_ccl_stream_ptr;
template <class Fn>
void call_func_wrapper(Fn func, sycl::queue *qptr) {
if (_queue_init && *qptr != _queue) {
call_func_async(func, qptr);
} else {
if(!_queue_init) {
_queue = *qptr;
_queue_init = true;
_ccl_stream_ptr = new oneapi::ccl::stream(oneapi::ccl::create_stream(_queue));
}
std::invoke(func, *_ccl_stream_ptr);
}
}
class call_func_async {
sycl::queue *_q_ptr;
struct call_async_impl {
oneapi::ccl::stream _ccl_stream_impl;
oneapi::ccl::event _ccl_event_impl;
template <class Fn>
explicit call_async_impl(Fn func, sycl::queue *qptr)
: _ccl_stream_impl(oneapi::ccl::create_stream(*qptr)),
_ccl_event_impl(std::invoke(func, _ccl_stream_impl)) {}
};
call_async_impl *_imp;
public:
template <class Fn>
explicit call_func_async(Fn func, sycl::queue *qptr)
: _q_ptr(qptr),
_imp(new call_async_impl(func, qptr)) {}
~call_func_async() {
_q_ptr->submit([&](sycl::handler &cgh)
{ cgh.host_task([=]
{
_imp->_ccl_event_impl.wait();
delete _imp; }); });
}
};
};
typedef dpct::ccl::communicator_wrapper *comm_ptr;
} // namespace ccl
} // namespace dpct
#endif // __DPCT_CCL_UTILS_HPP__ | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/util.hpp | //==---- util.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_UTIL_HPP__
#define __DPCT_UTIL_HPP__
#include <sycl/sycl.hpp>
#include <complex>
#include <type_traits>
#include <cassert>
#include <cstdint>
// TODO: Remove these function definitions once they exist in the DPC++ compiler
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
template <typename T>
__SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT __attribute__((noduplicate))
T __spirv_GroupNonUniformShuffle(__spv::Scope::Flag, T, unsigned) noexcept;
template <typename T>
__SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT __attribute__((noduplicate))
T __spirv_GroupNonUniformShuffleDown(__spv::Scope::Flag, T, unsigned) noexcept;
template <typename T>
__SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT __attribute__((noduplicate))
T __spirv_GroupNonUniformShuffleUp(__spv::Scope::Flag, T, unsigned) noexcept;
#endif
namespace dpct {
namespace detail {
template <typename tag, typename T> class generic_error_type {
public:
generic_error_type() = default;
generic_error_type(T value) : value{value} {}
operator T() const { return value; }
private:
T value;
};
} // namespace detail
using err0 = detail::generic_error_type<struct err0_tag, int>;
using err1 = detail::generic_error_type<struct err1_tag, int>;
template <int... Ints> struct integer_sequence {};
template <int Size, int... Ints>
struct make_index_sequence
: public make_index_sequence<Size - 1, Size - 1, Ints...> {};
template <int... Ints>
struct make_index_sequence<0, Ints...> : public integer_sequence<Ints...> {};
template <typename T> struct DataType { using T2 = T; };
template <typename T> struct DataType<sycl::vec<T, 2>> {
using T2 = std::complex<T>;
};
inline void matrix_mem_copy(void *to_ptr, const void *from_ptr, int to_ld,
int from_ld, int rows, int cols, int elem_size,
memcpy_direction direction = automatic,
sycl::queue &queue = dpct::get_default_queue(),
bool async = false) {
if (to_ptr == from_ptr && to_ld == from_ld) {
return;
}
if (to_ld == from_ld) {
size_t copy_size = elem_size * ((cols - 1) * (size_t)to_ld + rows);
if (async)
detail::dpct_memcpy(queue, (void *)to_ptr, (void *)from_ptr,
copy_size, direction);
else
detail::dpct_memcpy(queue, (void *)to_ptr, (void *)from_ptr,
copy_size, direction).wait();
} else {
if (async)
detail::dpct_memcpy(queue, to_ptr, from_ptr, elem_size * to_ld,
elem_size * from_ld, elem_size * rows, cols,
direction);
else
sycl::event::wait(detail::dpct_memcpy(
queue, to_ptr, from_ptr, elem_size * to_ld, elem_size * from_ld,
elem_size * rows, cols, direction));
}
}
/// Copy matrix data. The default leading dimension is column.
/// \param [out] to_ptr A pointer points to the destination location.
/// \param [in] from_ptr A pointer points to the source location.
/// \param [in] to_ld The leading dimension the destination matrix.
/// \param [in] from_ld The leading dimension the source matrix.
/// \param [in] rows The number of rows of the source matrix.
/// \param [in] cols The number of columns of the source matrix.
/// \param [in] direction The direction of the data copy.
/// \param [in] queue The queue where the routine should be executed.
/// \param [in] async If this argument is true, the return of the function
/// does NOT guarantee the copy is completed.
template <typename T>
inline void matrix_mem_copy(T *to_ptr, const T *from_ptr, int to_ld,
int from_ld, int rows, int cols,
memcpy_direction direction = automatic,
sycl::queue &queue = dpct::get_default_queue(),
bool async = false) {
using Ty = typename DataType<T>::T2;
matrix_mem_copy((void *)to_ptr, (void *)from_ptr, to_ld, from_ld, rows, cols,
sizeof(Ty), direction, queue, async);
}
/// Cast the high or low 32 bits of a double to an integer.
/// \param [in] d The double value.
/// \param [in] use_high32 Cast the high 32 bits of the double if true;
/// otherwise cast the low 32 bits.
inline int cast_double_to_int(double d, bool use_high32 = true) {
sycl::vec<double, 1> v0{d};
auto v1 = v0.as<sycl::int2>();
if (use_high32)
return v1[1];
return v1[0];
}
/// Combine two integers, the first as the high 32 bits and the second
/// as the low 32 bits, into a double.
/// \param [in] high32 The integer as the high 32 bits
/// \param [in] low32 The integer as the low 32 bits
inline double cast_ints_to_double(int high32, int low32) {
sycl::int2 v0{low32, high32};
auto v1 = v0.as<sycl::vec<double, 1>>();
return v1;
}
/// Reverse the bit order of an unsigned integer
/// \param [in] a Input unsigned integer value
/// \returns Value of a with the bit order reversed
template <typename T> inline T reverse_bits(T a) {
static_assert(std::is_unsigned<T>::value && std::is_integral<T>::value,
"unsigned integer required");
if (!a)
return 0;
T mask = 0;
size_t count = 4 * sizeof(T);
mask = ~mask >> count;
while (count) {
a = ((a & mask) << count) | ((a & ~mask) >> count);
count = count >> 1;
mask = mask ^ (mask << count);
}
return a;
}
/// \param [in] a The first value contains 4 bytes
/// \param [in] b The second value contains 4 bytes
/// \param [in] s The selector value, only lower 16bit used
/// \returns the permutation result of 4 bytes selected in the way
/// specified by \p s from \p a and \p b
inline unsigned int byte_level_permute(unsigned int a, unsigned int b,
unsigned int s) {
unsigned int ret;
ret =
((((std::uint64_t)b << 32 | a) >> (s & 0x7) * 8) & 0xff) |
(((((std::uint64_t)b << 32 | a) >> ((s >> 4) & 0x7) * 8) & 0xff) << 8) |
(((((std::uint64_t)b << 32 | a) >> ((s >> 8) & 0x7) * 8) & 0xff) << 16) |
(((((std::uint64_t)b << 32 | a) >> ((s >> 12) & 0x7) * 8) & 0xff) << 24);
return ret;
}
/// Find position of first least significant set bit in an integer.
/// ffs(0) returns 0.
///
/// \param [in] a Input integer value
/// \returns The position
template <typename T> inline int ffs(T a) {
static_assert(std::is_integral<T>::value, "integer required");
return (sycl::ctz(a) + 1) % (sizeof(T) * 8 + 1);
}
/// select_from_sub_group allows work-items to obtain a copy of a value held by
/// any other work-item in the sub_group. The input sub_group will be divided
/// into several logical sub_groups with id range [0, \p logical_sub_group_size
/// - 1]. Each work-item in logical sub_group gets value from another work-item
/// whose id is \p remote_local_id. If \p remote_local_id is outside the
/// logical sub_group id range, \p remote_local_id will modulo with \p
/// logical_sub_group_size. The \p logical_sub_group_size must be a power of 2
/// and not exceed input sub_group size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] remote_local_id Input source work item id
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T select_from_sub_group(sycl::sub_group g, T x, int remote_local_id,
int logical_sub_group_size = 32) {
unsigned int start_index =
g.get_local_linear_id() / logical_sub_group_size * logical_sub_group_size;
return sycl::select_from_group(
g, x, start_index + remote_local_id % logical_sub_group_size);
}
/// shift_sub_group_left move values held by the work-items in a sub_group
/// directly to another work-item in the sub_group, by shifting values a fixed
/// number of work-items to the left. The input sub_group will be divided into
/// several logical sub_groups with id range [0, \p logical_sub_group_size - 1].
/// Each work-item in logical sub_group gets value from another work-item whose
/// id is caller's id adds \p delta. If calculated id is outside the logical
/// sub_group id range, the work-item will get value from itself. The \p
/// logical_sub_group_size must be a power of 2 and not exceed input sub_group
/// size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_left(sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int end_index =
(id / logical_sub_group_size + 1) * logical_sub_group_size;
T result = sycl::shift_group_left(g, x, delta);
if ((id + delta) >= end_index) {
result = x;
}
return result;
}
/// shift_sub_group_right move values held by the work-items in a sub_group
/// directly to another work-item in the sub_group, by shifting values a fixed
/// number of work-items to the right. The input sub_group will be divided into
/// several logical_sub_groups with id range [0, \p logical_sub_group_size - 1].
/// Each work-item in logical_sub_group gets value from another work-item whose
/// id is caller's id subtracts \p delta. If calculated id is outside the
/// logical sub_group id range, the work-item will get value from itself. The \p
/// logical_sub_group_size must be a power of 2 and not exceed input sub_group
/// size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_right(sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
T result = sycl::shift_group_right(g, x, delta);
if ((id - start_index) < delta) {
result = x;
}
return result;
}
/// permute_sub_group_by_xor permutes values by exchanging values held by pairs
/// of work-items identified by computing the bitwise exclusive OR of the
/// work-item id and some fixed mask. The input sub_group will be divided into
/// several logical sub_groups with id range [0, \p logical_sub_group_size - 1].
/// Each work-item in logical sub_group gets value from another work-item whose
/// id is bitwise exclusive OR of the caller's id and \p mask. If calculated id
/// is outside the logical sub_group id range, the work-item will get value from
/// itself. The \p logical_sub_group_size must be a power of 2 and not exceed
/// input sub_group size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] mask Input mask
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T permute_sub_group_by_xor(sycl::sub_group g, T x, unsigned int mask,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
unsigned int target_offset = (id % logical_sub_group_size) ^ mask;
return sycl::select_from_group(g, x,
target_offset < logical_sub_group_size
? start_index + target_offset
: id);
}
namespace experimental {
/// Masked version of select_from_sub_group, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] remote_local_id Input source work item id
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T select_from_sub_group(unsigned int member_mask,
sycl::sub_group g, T x, int remote_local_id,
int logical_sub_group_size = 32) {
unsigned int start_index =
g.get_local_linear_id() / logical_sub_group_size * logical_sub_group_size;
unsigned logical_remote_id =
start_index + remote_local_id % logical_sub_group_size;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
return __spirv_GroupNonUniformShuffle(__spv::Scope::Subgroup, x, logical_remote_id);
#else
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)remote_local_id;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER
}
/// Masked version of shift_sub_group_left, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_left(unsigned int member_mask,
sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int end_index =
(id / logical_sub_group_size + 1) * logical_sub_group_size;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
T result = __spirv_GroupNonUniformShuffleDown(__spv::Scope::Subgroup, x, delta);
if ((id + delta) >= end_index) {
result = x;
}
return result;
#else
throw sycl::exception(sycl::errc::runtime, "Masked version of shift_sub_group_left "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)delta;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER
}
/// Masked version of shift_sub_group_right, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_right(unsigned int member_mask,
sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
T result = __spirv_GroupNonUniformShuffleUp(__spv::Scope::Subgroup, x, delta);
if ((id - start_index) < delta) {
result = x;
}
return result;
#else
throw sycl::exception(sycl::errc::runtime, "Masked version of shift_sub_group_right "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)delta;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY && __INTEL_LLVM_COMPILER
}
/// Masked version of permute_sub_group_by_xor, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] mask Input mask
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T permute_sub_group_by_xor(unsigned int member_mask,
sycl::sub_group g, T x, unsigned int mask,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
unsigned int target_offset = (id % logical_sub_group_size) ^ mask;
unsigned logical_remote_id = (target_offset < logical_sub_group_size) ? start_index + target_offset : id;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
return __spirv_GroupNonUniformShuffle(__spv::Scope::Subgroup, x, logical_remote_id);
#else
throw sycl::exception(sycl::errc::runtime, "Masked version of permute_sub_group_by_xor "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)mask;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER
}
} // namespace experimental
/// Computes the multiplication of two complex numbers.
/// \tparam T Complex element type
/// \param [in] x The first input complex number
/// \param [in] y The second input complex number
/// \returns The result
template <typename T>
sycl::vec<T, 2> cmul(sycl::vec<T, 2> x, sycl::vec<T, 2> y) {
std::complex<T> t1(x[0], x[1]), t2(y[0], y[1]);
t1 = t1 * t2;
return sycl::vec<T, 2>(t1.real(), t1.imag());
}
/// Computes the division of two complex numbers.
/// \tparam T Complex element type
/// \param [in] x The first input complex number
/// \param [in] y The second input complex number
/// \returns The result
template <typename T>
sycl::vec<T, 2> cdiv(sycl::vec<T, 2> x, sycl::vec<T, 2> y) {
std::complex<T> t1(x[0], x[1]), t2(y[0], y[1]);
t1 = t1 / t2;
return sycl::vec<T, 2>(t1.real(), t1.imag());
}
/// Computes the magnitude of a complex number.
/// \tparam T Complex element type
/// \param [in] x The input complex number
/// \returns The result
template <typename T>
T cabs(sycl::vec<T, 2> x) {
std::complex<T> t(x[0], x[1]);
return std::abs(t);
}
/// Computes the complex conjugate of a complex number.
/// \tparam T Complex element type
/// \param [in] x The input complex number
/// \returns The result
template <typename T>
sycl::vec<T, 2> conj(sycl::vec<T, 2> x) {
std::complex<T> t(x[0], x[1]);
t = std::conj(t);
return sycl::vec<T, 2>(t.real(), t.imag());
}
inline int get_sycl_language_version() {
#ifdef SYCL_LANGUAGE_VERSION
return SYCL_LANGUAGE_VERSION;
#else
return 202000;
#endif
}
namespace experimental {
/// Synchronize work items from all work groups within a SYCL kernel.
/// \param [in] item: Represents a work group.
/// \param [in] counter: An atomic object defined on a device memory which can
/// be accessed by work items in all work groups. The initial value of the
/// counter should be zero.
/// Note: Please make sure that all the work items of all work groups within
/// a SYCL kernel can be scheduled actively at the same time on a device.
template <int dimensions = 3>
inline void
nd_range_barrier(const sycl::nd_item<dimensions> &item,
sycl::atomic_ref<
unsigned int, sycl::memory_order::seq_cst,
sycl::memory_scope::device,
sycl::access::address_space::global_space> &counter) {
static_assert(dimensions == 3, "dimensions must be 3.");
unsigned int num_groups = item.get_group_range(2) * item.get_group_range(1) *
item.get_group_range(0);
item.barrier();
if (item.get_local_linear_id() == 0) {
unsigned int inc = 1;
unsigned int old_arrive = 0;
bool is_group0 =
(item.get_group(2) + item.get_group(1) + item.get_group(0) == 0);
if (is_group0) {
inc = 0x80000000 - (num_groups - 1);
}
old_arrive = counter.fetch_add(inc);
// Synchronize all the work groups
while (((old_arrive ^ counter.load()) & 0x80000000) == 0)
;
}
item.barrier();
}
/// Synchronize work items from all work groups within a SYCL kernel.
/// \param [in] item: Represents a work group.
/// \param [in] counter: An atomic object defined on a device memory which can
/// be accessed by work items in all work groups. The initial value of the
/// counter should be zero.
/// Note: Please make sure that all the work items of all work groups within
/// a SYCL kernel can be scheduled actively at the same time on a device.
template <>
inline void
nd_range_barrier(const sycl::nd_item<1> &item,
sycl::atomic_ref<
unsigned int, sycl::memory_order::seq_cst,
sycl::memory_scope::device,
sycl::access::address_space::global_space> &counter) {
unsigned int num_groups = item.get_group_range(0);
item.barrier();
if (item.get_local_linear_id() == 0) {
unsigned int inc = 1;
unsigned int old_arrive = 0;
bool is_group0 = (item.get_group(0) == 0);
if (is_group0) {
inc = 0x80000000 - (num_groups - 1);
}
old_arrive = counter.fetch_add(inc);
// Synchronize all the work groups
while (((old_arrive ^ counter.load()) & 0x80000000) == 0)
;
}
item.barrier();
}
/// The logical-group is a logical collection of some work-items within a
/// work-group.
/// Note: Please make sure that the logical-group size is a power of 2 in the
/// range [1, current_sub_group_size].
class logical_group {
sycl::nd_item<3> _item;
sycl::group<3> _g;
uint32_t _logical_group_size;
uint32_t _group_linear_range_in_parent;
public:
/// Dividing \p parent_group into several logical-groups.
/// \param [in] item Current work-item.
/// \param [in] parent_group The group to be divided.
/// \param [in] size The logical-group size.
logical_group(sycl::nd_item<3> item, sycl::group<3> parent_group,
uint32_t size)
: _item(item), _g(parent_group), _logical_group_size(size) {
_group_linear_range_in_parent =
(_g.get_local_linear_range() - 1) / _logical_group_size + 1;
}
/// Returns the index of the work-item within the logical-group.
uint32_t get_local_linear_id() const {
return _item.get_local_linear_id() % _logical_group_size;
}
/// Returns the index of the logical-group in the parent group.
uint32_t get_group_linear_id() const {
return _item.get_local_linear_id() / _logical_group_size;
}
/// Returns the number of work-items in the logical-group.
uint32_t get_local_linear_range() const {
if (_g.get_local_linear_range() % _logical_group_size == 0) {
return _logical_group_size;
}
uint32_t last_item_group_id =
_g.get_local_linear_range() / _logical_group_size;
uint32_t first_of_last_group = last_item_group_id * _logical_group_size;
if (_item.get_local_linear_id() >= first_of_last_group) {
return _g.get_local_linear_range() - first_of_last_group;
} else {
return _logical_group_size;
}
}
/// Returns the number of logical-group in the parent group.
uint32_t get_group_linear_range() const {
return _group_linear_range_in_parent;
}
};
// The original source of the function calculate_max_active_wg_per_xecore was
// under the license below:
//
// Copyright Intel Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
/// This function is used for occupancy calculation, it computes the max active
/// work-group number per Xe-Core. Ref to
/// https://github.com/oneapi-src/oneAPI-samples/tree/master/Tools/GPU-Occupancy-Calculator
/// \param [out] num_wg Active work-group number.
/// \param [in] wg_size Work-group size.
/// \param [in] slm_size Share local memory size.
/// \param [in] sg_size Sub-group size.
/// \param [in] used_barrier Whether barrier is used.
/// \param [in] used_large_grf Whether large General Register File is used.
/// \return If no error, returns 0.
/// If \p wg_size exceeds the max work-group size, the max work-group size will
/// be used instead of \p wg_size and returns -1.
inline int calculate_max_active_wg_per_xecore(int *num_wg, int wg_size,
int slm_size = 0,
int sg_size = 32,
bool used_barrier = false,
bool used_large_grf = false) {
int ret = 0;
const int slm_size_per_xe_core = 64 * 1024;
const int max_barrier_registers = 32;
dpct::device_ext &dev = dpct::get_current_device();
size_t max_wg_size = dev.get_info<sycl::info::device::max_work_group_size>();
if (wg_size > max_wg_size) {
wg_size = max_wg_size;
ret = -1;
}
int num_threads_ss = 56;
int max_num_wg = 56;
if (dev.has(sycl::aspect::ext_intel_gpu_eu_count_per_subslice) &&
dev.has(sycl::aspect::ext_intel_gpu_hw_threads_per_eu)) {
auto eu_count =
dev.get_info<sycl::info::device::ext_intel_gpu_eu_count_per_subslice>();
auto threads_count =
dev.get_info<sycl::ext::intel::info::device::gpu_hw_threads_per_eu>();
num_threads_ss = eu_count * threads_count;
max_num_wg = eu_count * threads_count;
}
if (used_barrier) {
max_num_wg = max_barrier_registers;
}
// Calculate num_wg_slm
int num_wg_slm = 0;
if (slm_size == 0) {
num_wg_slm = max_num_wg;
} else {
num_wg_slm = std::floor((float)slm_size_per_xe_core / slm_size);
}
// Calculate num_wg_threads
if (used_large_grf)
num_threads_ss = num_threads_ss / 2;
int num_threads = std::ceil((float)wg_size / sg_size);
int num_wg_threads = std::floor((float)num_threads_ss / num_threads);
// Calculate num_wg
*num_wg = std::min(num_wg_slm, num_wg_threads);
*num_wg = std::min(*num_wg, max_num_wg);
return ret;
}
} // namespace experimental
/// If x <= 2, then return a pointer to the deafult queue;
/// otherwise, return x reinterpreted as a dpct::queue_ptr.
inline queue_ptr int_as_queue_ptr(uintptr_t x) {
return x <= 2 ?
&get_default_queue()
: reinterpret_cast<queue_ptr>(x);
}
template <int n_nondefault_params, int n_default_params, typename T>
class args_selector;
/// args_selector is a helper class for extracting arguments from an
/// array of pointers to arguments or buffer of arguments to pass to a
/// kernel function.
///
/// \param R(Ts...) The type of the kernel
/// \param n_nondefault_params The number of nondefault parameters of the kernel
/// (excluding parameters that like sycl::nd_item, etc.)
/// \param n_default_params The number of default parameters of the kernel
///
/// Example usage:
/// With the following kernel:
/// void foo(sycl::float2 *x, int n, sycl::nd_item<3> item_ct1, float f=.1) {}
/// and with the declaration:
/// args_selector<2, 1, decltype(foo)> selector(kernelParams, extra);
/// we have:
/// selector.get<0>() returns a reference to sycl::float*,
/// selector.get<1>() returns a reference to int,
/// selector.get<2>() returns a reference to float
template <int n_nondefault_params, int n_default_params,
typename R, typename... Ts>
class args_selector<n_nondefault_params, n_default_params, R(Ts...)> {
private:
void **kernel_params;
char *args_buffer;
template <int i>
static constexpr int account_for_default_params() {
constexpr int n_total_params = sizeof...(Ts);
if constexpr (i >= n_nondefault_params) {
return n_total_params - n_default_params + (i - n_nondefault_params);
} else {
return i;
}
}
public:
/// Get the type of the ith argument of R(Ts...)
/// \param [in] i Index of parameter to get
/// \returns Type of ith parameter
template <int i>
using arg_type = std::tuple_element_t<account_for_default_params<i>(),
std::tuple<Ts...>>;
private:
template <int i>
static constexpr int get_offset() {
if constexpr (i == 0) {
// we can assume args_buffer is properly aligned to the
// first argument
return 0;
} else {
constexpr int prev_off = get_offset<i-1>();
constexpr int prev_past_end = prev_off + sizeof(arg_type<i-1>);
using T = arg_type<i>;
// is the past-the-end of the i-1st element properly aligned
// with the ith element's alignment?
if constexpr (prev_past_end % alignof(T) == 0) {
return prev_past_end;
}
// otherwise bump prev_past_end to match alignment
else {
return prev_past_end + (alignof(T) - (prev_past_end % alignof(T)));
}
}
}
static char *get_args_buffer(void **extra) {
if (!extra)
return nullptr;
for (; (std::size_t) *extra != 0; ++extra) {
if ((std::size_t) *extra == 1) {
return static_cast<char*>(*(extra+1));
}
}
return nullptr;
}
public:
/// If kernel_params is nonnull, then args_selector will
/// extract arguments from kernel_params. Otherwise, it
/// will extract them from extra.
/// \param [in] kernel_params Array of pointers to arguments
/// a or null pointer.
/// \param [in] extra Array containing pointer to argument buffer.
args_selector(void **kernel_params, void **extra)
: kernel_params(kernel_params),
args_buffer(get_args_buffer(extra))
{}
/// Get a reference to the ith argument extracted from kernel_params
/// or extra.
/// \param [in] i Index of argument to get
/// \returns Reference to the ith argument
template <int i>
arg_type<i> &get() {
if (kernel_params) {
return *static_cast<arg_type<i>*>(kernel_params[i]);
} else {
return *reinterpret_cast<arg_type<i>*>(args_buffer + get_offset<i>());
}
}
};
#ifdef _WIN32
#define DPCT_EXPORT __declspec(dllexport)
#else
#define DPCT_EXPORT
#endif
} // namespace dpct
#endif // __DPCT_UTIL_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/image.hpp | //==---- image.hpp --------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_IMAGE_HPP__
#define __DPCT_IMAGE_HPP__
#include <sycl/sycl.hpp>
#include "memory.hpp"
#include "util.hpp"
namespace dpct {
enum class image_channel_data_type {
signed_int,
unsigned_int,
fp,
};
class image_channel;
class image_wrapper_base;
namespace detail {
/// Image object type traits, with accessor type and sampled data type defined.
/// The data type of an image accessor must be one of sycl::int4, sycl::uint4,
/// sycl::float4 and sycl::half4. The data type of accessors with 8bits/16bits
/// channel width will be 32 bits. sycl::half is an exception.
template <class T> struct image_trait {
using acc_data_t = sycl::vec<T, 4>;
template <int dimensions>
using accessor_t =
sycl::accessor<acc_data_t, dimensions, sycl::access_mode::read,
sycl::access::target::image>;
template <int dimensions>
using array_accessor_t =
sycl::accessor<acc_data_t, dimensions, sycl::access_mode::read,
sycl::access::target::image_array>;
using data_t = T;
using elem_t = T;
static constexpr image_channel_data_type data_type =
std::is_integral<T>::value
? (std::is_signed<T>::value ? image_channel_data_type::signed_int
: image_channel_data_type::unsigned_int)
: image_channel_data_type::fp;
static constexpr int channel_num = 1;
};
template <>
struct image_trait<std::uint8_t> : public image_trait<std::uint32_t> {
using data_t = std::uint8_t;
using elem_t = data_t;
};
template <>
struct image_trait<std::uint16_t>
: public image_trait<std::uint32_t> {
using data_t = std::uint16_t;
using elem_t = data_t;
};
template <>
struct image_trait<std::int8_t> : public image_trait<std::int32_t> {
using data_t = std::int8_t;
using elem_t = data_t;
};
template <>
struct image_trait<std::int16_t> : public image_trait<std::int32_t> {
using data_t = std::int16_t;
using elem_t = data_t;
};
template <>
struct image_trait<char>
: public image_trait<typename std::conditional<
std::is_signed<char>::value, signed char, unsigned char>::type> {};
template <class T>
struct image_trait<sycl::vec<T, 1>> : public image_trait<T> {};
template <class T>
struct image_trait<sycl::vec<T, 2>> : public image_trait<T> {
using data_t = sycl::vec<T, 2>;
static constexpr int channel_num = 2;
};
template <class T>
struct image_trait<sycl::vec<T, 3>>
: public image_trait<sycl::vec<T, 4>> {
static constexpr int channel_num = 3;
};
template <class T>
struct image_trait<sycl::vec<T, 4>> : public image_trait<T> {
using data_t = sycl::vec<T, 4>;
static constexpr int channel_num = 4;
};
/// Functor to fetch data from read result of an image accessor.
template <class T> struct fetch_data {
using return_t = typename image_trait<T>::data_t;
using acc_data_t = typename image_trait<T>::acc_data_t;
return_t operator()(acc_data_t &&original_data) {
return (return_t)original_data.r();
}
};
template <class T>
struct fetch_data<sycl::vec<T, 1>> : public fetch_data<T> {};
template <class T> struct fetch_data<sycl::vec<T, 2>> {
using return_t = typename image_trait<sycl::vec<T, 2>>::data_t;
using acc_data_t = typename image_trait<sycl::vec<T, 2>>::acc_data_t;
return_t operator()(acc_data_t &&origin_data) {
return return_t(origin_data.r(), origin_data.g());
}
};
template <class T>
struct fetch_data<sycl::vec<T, 3>>
: public fetch_data<sycl::vec<T, 4>> {};
template <class T> struct fetch_data<sycl::vec<T, 4>> {
using return_t = typename image_trait<sycl::vec<T, 4>>::data_t;
using acc_data_t = typename image_trait<sycl::vec<T, 4>>::acc_data_t;
return_t operator()(acc_data_t &&origin_data) {
return return_t(origin_data.r(), origin_data.g(), origin_data.b(),
origin_data.a());
}
};
/// Create image according with given type \p T and \p dims.
template <class T> static image_wrapper_base *create_image_wrapper(int dims);
/// Create image with given data type \p T, channel order and dims
template <class T>
static image_wrapper_base *create_image_wrapper(unsigned channel_num, int dims);
/// Create image with channel info and specified dimensions.
static image_wrapper_base *create_image_wrapper(image_channel channel, int dims);
} // namespace detail
/// Image channel info, include channel number, order, data width and type
class image_channel {
image_channel_data_type _type = image_channel_data_type::signed_int;
/// Number of channels.
unsigned _channel_num = 0;
/// Total size of all channels in bytes.
unsigned _total_size = 0;
/// Size of each channel in bytes.
unsigned _channel_size = 0;
public:
/// Create image channel info according to template argument \p T.
template <class T> static image_channel create() {
image_channel channel;
channel.set_channel_size(detail::image_trait<T>::channel_num,
sizeof(typename detail::image_trait<T>::elem_t) *
8);
channel.set_channel_data_type(detail::image_trait<T>::data_type);
return channel;
}
image_channel() = default;
image_channel_data_type get_channel_data_type() { return _type; }
void set_channel_data_type(image_channel_data_type type) { _type = type; }
unsigned get_total_size() { return _total_size; }
unsigned get_channel_num() { return _channel_num; }
void set_channel_num(unsigned channel_num) {
_channel_num = channel_num;
_total_size = _channel_size * _channel_num;
}
/// image_channel constructor.
/// \param r Channel r width in bits.
/// \param g Channel g width in bits. Should be same with \p r, or zero.
/// \param b Channel b width in bits. Should be same with \p g, or zero.
/// \param a Channel a width in bits. Should be same with \p b, or zero.
/// \param data_type Image channel data type: signed_nt, unsigned_int or fp.
image_channel(int r, int g, int b, int a, image_channel_data_type data_type) {
_type = data_type;
if (a) {
assert(r == a && "SYCL doesn't support different channel size");
assert(r == b && "SYCL doesn't support different channel size");
assert(r == g && "SYCL doesn't support different channel size");
set_channel_size(4, a);
} else if (b) {
assert(r == b && "SYCL doesn't support different channel size");
assert(r == g && "SYCL doesn't support different channel size");
set_channel_size(3, b);
} else if (g) {
assert(r == g && "SYCL doesn't support different channel size");
set_channel_size(2, g);
} else {
set_channel_size(1, r);
}
}
sycl::image_channel_type get_channel_type() const {
if (_channel_size == 4) {
if (_type == image_channel_data_type::signed_int)
return sycl::image_channel_type::signed_int32;
else if (_type == image_channel_data_type::unsigned_int)
return sycl::image_channel_type::unsigned_int32;
else if (_type == image_channel_data_type::fp)
return sycl::image_channel_type::fp32;
} else if (_channel_size == 2) {
if (_type == image_channel_data_type::signed_int)
return sycl::image_channel_type::signed_int16;
else if (_type == image_channel_data_type::unsigned_int)
return sycl::image_channel_type::unsigned_int16;
else if (_type == image_channel_data_type::fp)
return sycl::image_channel_type::fp16;
} else {
if (_type == image_channel_data_type::signed_int)
return sycl::image_channel_type::signed_int8;
else if (_type == image_channel_data_type::unsigned_int)
return sycl::image_channel_type::unsigned_int8;
}
assert(false && "unexpected channel data kind and channel size");
return sycl::image_channel_type::signed_int32;
}
void set_channel_type(sycl::image_channel_type type) {
switch (type) {
case sycl::image_channel_type::unsigned_int8:
_type = image_channel_data_type::unsigned_int;
_channel_size = 1;
break;
case sycl::image_channel_type::unsigned_int16:
_type = image_channel_data_type::unsigned_int;
_channel_size = 2;
break;
case sycl::image_channel_type::unsigned_int32:
_type = image_channel_data_type::unsigned_int;
_channel_size = 4;
break;
case sycl::image_channel_type::signed_int8:
_type = image_channel_data_type::signed_int;
_channel_size = 1;
break;
case sycl::image_channel_type::signed_int16:
_type = image_channel_data_type::signed_int;
_channel_size = 2;
break;
case sycl::image_channel_type::signed_int32:
_type = image_channel_data_type::signed_int;
_channel_size = 4;
break;
case sycl::image_channel_type::fp16:
_type = image_channel_data_type::fp;
_channel_size = 2;
break;
case sycl::image_channel_type::fp32:
_type = image_channel_data_type::fp;
_channel_size = 4;
break;
default:
break;
}
_total_size = _channel_size * _channel_num;
}
sycl::image_channel_order get_channel_order() const {
switch (_channel_num) {
case 1:
return sycl::image_channel_order::r;
case 2:
return sycl::image_channel_order::rg;
case 3:
return sycl::image_channel_order::rgb;
case 4:
return sycl::image_channel_order::rgba;
default:
return sycl::image_channel_order::r;
}
}
/// Get the size for each channel in bits.
unsigned get_channel_size() const { return _channel_size * 8; }
/// Set channel size.
/// \param in_channel_num Channels number to set.
/// \param channel_size Size for each channel in bits.
void set_channel_size(unsigned in_channel_num,
unsigned channel_size) {
if (in_channel_num < _channel_num)
return;
_channel_num = in_channel_num;
_channel_size = channel_size / 8;
_total_size = _channel_size * _channel_num;
}
};
/// 2D or 3D matrix data for image.
class image_matrix {
image_channel _channel;
int _range[3] = {1, 1, 1};
int _dims = 0;
void *_host_data = nullptr;
/// Set range of each dimension.
template <int dimensions> void set_range(sycl::range<dimensions> range) {
for (int i = 0; i < dimensions; ++i)
_range[i] = range[i];
_dims = dimensions;
}
template <int... DimIdx>
sycl::range<sizeof...(DimIdx)> get_range(integer_sequence<DimIdx...>) {
return sycl::range<sizeof...(DimIdx)>(_range[DimIdx]...);
}
public:
/// Constructor with channel info and dimension size info.
template <int dimensions>
image_matrix(image_channel channel, sycl::range<dimensions> range)
: _channel(channel) {
set_range(range);
_host_data = std::malloc(range.size() * _channel.get_total_size());
}
image_matrix(sycl::image_channel_type channel_type, unsigned channel_num,
size_t x, size_t y) {
_channel.set_channel_type(channel_type);
_channel.set_channel_num(channel_num);
_dims = 1;
_range[0] = x;
if (y) {
_dims = 2;
_range[1] = y;
}
_host_data = std::malloc(_range[0] * _range[1] * _channel.get_total_size());
}
/// Construct a new image class with the matrix data.
template <int dimensions> sycl::image<dimensions> *create_image() {
return create_image<dimensions>(_channel);
}
/// Construct a new image class with the matrix data.
template <int dimensions>
sycl::image<dimensions> *create_image(image_channel channel) {
return new sycl::image<dimensions>(
_host_data, channel.get_channel_order(), channel.get_channel_type(),
get_range(make_index_sequence<dimensions>()),
sycl::property::image::use_host_ptr());
}
/// Get channel info.
inline image_channel get_channel() { return _channel; }
/// Get range of the image.
sycl::range<3> get_range() {
return sycl::range<3>(_range[0], _range[1], _range[2]);
}
/// Get matrix dims.
inline int get_dims() { return _dims; }
/// Convert to pitched data.
pitched_data to_pitched_data() {
return pitched_data(_host_data, _range[0], _range[0], _range[1]);
}
~image_matrix() {
if (_host_data)
std::free(_host_data);
_host_data = nullptr;
}
};
using image_matrix_p = image_matrix *;
enum class image_data_type { matrix, linear, pitch, unsupport };
/// Image data info.
class image_data {
public:
image_data() { _type = image_data_type::unsupport; }
image_data(image_matrix_p matrix_data) { set_data(matrix_data); }
image_data(void *data_ptr, size_t x_size, image_channel channel) {
set_data(data_ptr, x_size, channel);
}
image_data(void *data_ptr, size_t x_size, size_t y_size, size_t pitch_size,
image_channel channel) {
set_data(data_ptr, x_size, y_size, pitch_size, channel);
}
void set_data(image_matrix_p matrix_data) {
_type = image_data_type::matrix;
_data = matrix_data;
_channel = matrix_data->get_channel();
}
void set_data(void *data_ptr, size_t x_size, image_channel channel) {
_type = image_data_type::linear;
_data = data_ptr;
_x = x_size;
_channel = channel;
}
void set_data(void *data_ptr, size_t x_size, size_t y_size, size_t pitch_size,
image_channel channel) {
_type = image_data_type::pitch;
_data = data_ptr;
_x = x_size;
_y = y_size;
_pitch = pitch_size;
_channel = channel;
}
image_data_type get_data_type() const { return _type; }
void set_data_type(image_data_type type) { _type = type; }
void *get_data_ptr() const { return _data; }
void set_data_ptr(void *data) { _data = data; }
size_t get_x() const { return _x; }
void set_x(size_t x) { _x = x; }
size_t get_y() const { return _y; }
void set_y(size_t y) { _y = y; }
size_t get_pitch() const { return _pitch; }
void set_pitch(size_t pitch) { _pitch = pitch; }
image_channel get_channel() const { return _channel; }
void set_channel(image_channel channel) { _channel = channel; }
image_channel_data_type get_channel_data_type() {
return _channel.get_channel_data_type();
}
void set_channel_data_type(image_channel_data_type type) {
_channel.set_channel_data_type(type);
}
unsigned get_channel_size() { return _channel.get_channel_size(); }
void set_channel_size(unsigned channel_num, unsigned channel_size) {
return _channel.set_channel_size(channel_num, channel_size);
}
unsigned get_channel_num() { return _channel.get_channel_num(); }
void set_channel_num(unsigned num) {
return _channel.set_channel_num(num);
}
sycl::image_channel_type get_channel_type() {
return _channel.get_channel_type();
}
void set_channel_type(sycl::image_channel_type type) {
return _channel.set_channel_type(type);
}
private:
image_data_type _type;
void *_data = nullptr;
size_t _x, _y, _pitch;
image_channel _channel;
};
/// Image sampling info, include addressing mode, filtering mode and
/// normalization info.
class sampling_info {
sycl::addressing_mode _addressing_mode =
sycl::addressing_mode::clamp_to_edge;
sycl::filtering_mode _filtering_mode = sycl::filtering_mode::nearest;
sycl::coordinate_normalization_mode _coordinate_normalization_mode =
sycl::coordinate_normalization_mode::unnormalized;
public:
sycl::addressing_mode get_addressing_mode() { return _addressing_mode; }
void set(sycl::addressing_mode addressing_mode) { _addressing_mode = addressing_mode; }
sycl::filtering_mode get_filtering_mode() { return _filtering_mode; }
void set(sycl::filtering_mode filtering_mode) { _filtering_mode = filtering_mode; }
sycl::coordinate_normalization_mode get_coordinate_normalization_mode() {
return _coordinate_normalization_mode;
}
void set(sycl::coordinate_normalization_mode coordinate_normalization_mode) {
_coordinate_normalization_mode = coordinate_normalization_mode;
}
bool is_coordinate_normalized() {
return _coordinate_normalization_mode ==
sycl::coordinate_normalization_mode::normalized;
}
void set_coordinate_normalization_mode(int is_normalized) {
_coordinate_normalization_mode =
is_normalized ? sycl::coordinate_normalization_mode::normalized
: sycl::coordinate_normalization_mode::unnormalized;
}
void
set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode,
sycl::coordinate_normalization_mode coordinate_normalization_mode) {
set(addressing_mode);
set(filtering_mode);
set(coordinate_normalization_mode);
}
void set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode, int is_normalized) {
set(addressing_mode);
set(filtering_mode);
set_coordinate_normalization_mode(is_normalized);
}
sycl::sampler get_sampler() {
return sycl::sampler(_coordinate_normalization_mode, _addressing_mode,
_filtering_mode);
}
};
/// Image base class.
class image_wrapper_base {
sampling_info _sampling_info;
image_data _data;
public:
virtual ~image_wrapper_base() = 0;
void attach(image_data data) { set_data(data); }
/// Attach matrix data to this class.
void attach(image_matrix *matrix) {
detach();
image_wrapper_base::set_data(image_data(matrix));
}
/// Attach matrix data to this class.
void attach(image_matrix *matrix, image_channel channel) {
attach(matrix);
image_wrapper_base::set_channel(channel);
}
/// Attach linear data to this class.
void attach(const void *ptr, size_t count) {
attach(ptr, count, get_channel());
}
/// Attach linear data to this class.
void attach(const void *ptr, size_t count, image_channel channel) {
detach();
image_wrapper_base::set_data(image_data(const_cast<void *>(ptr), count, channel));
}
/// Attach 2D data to this class.
void attach(const void *data, size_t x, size_t y, size_t pitch) {
attach(data, x, y, pitch, get_channel());
}
/// Attach 2D data to this class.
void attach(const void *data, size_t x, size_t y, size_t pitch,
image_channel channel) {
detach();
image_wrapper_base::set_data(
image_data(const_cast<void *>(data), x, y, pitch, channel));
}
/// Detach data.
virtual void detach() {}
sampling_info get_sampling_info() { return _sampling_info; }
void set_sampling_info(sampling_info info) {
_sampling_info = info;
}
const image_data &get_data() { return _data; }
void set_data(image_data data) { _data = data; }
image_channel get_channel() { return _data.get_channel(); }
void set_channel(image_channel channel) { _data.set_channel(channel); }
image_channel_data_type get_channel_data_type() {
return _data.get_channel_data_type();
}
void set_channel_data_type(image_channel_data_type type) {
_data.set_channel_data_type(type);
}
unsigned get_channel_size() { return _data.get_channel_size(); }
void set_channel_size(unsigned channel_num, unsigned channel_size) {
return _data.set_channel_size(channel_num, channel_size);
}
sycl::addressing_mode get_addressing_mode() {
return _sampling_info.get_addressing_mode();
}
void set(sycl::addressing_mode addressing_mode) {
_sampling_info.set(addressing_mode);
}
sycl::filtering_mode get_filtering_mode() {
return _sampling_info.get_filtering_mode();
}
void set(sycl::filtering_mode filtering_mode) {
_sampling_info.set(filtering_mode);
}
sycl::coordinate_normalization_mode get_coordinate_normalization_mode() {
return _sampling_info.get_coordinate_normalization_mode();
}
void
set(sycl::coordinate_normalization_mode coordinate_normalization_mode) {
_sampling_info.set(coordinate_normalization_mode);
}
bool is_coordinate_normalized() {
return _sampling_info.is_coordinate_normalized();
}
void set_coordinate_normalization_mode(int is_normalized) {
_sampling_info.set_coordinate_normalization_mode(is_normalized);
}
void
set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode,
sycl::coordinate_normalization_mode coordinate_normalization_mode) {
set(addressing_mode);
set(filtering_mode);
set(coordinate_normalization_mode);
}
void set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode, int is_normalized) {
set(addressing_mode);
set(filtering_mode);
set_coordinate_normalization_mode(is_normalized);
}
unsigned get_channel_num() { return _data.get_channel_num(); }
void set_channel_num(unsigned num) {
return _data.set_channel_num(num);
}
sycl::image_channel_type get_channel_type() {
return _data.get_channel_type();
}
void set_channel_type(sycl::image_channel_type type) {
return _data.set_channel_type(type);
}
sycl::sampler get_sampler() { return _sampling_info.get_sampler(); }
};
inline image_wrapper_base::~image_wrapper_base() {}
using image_wrapper_base_p = image_wrapper_base *;
template <class T, int dimensions, bool IsImageArray> class image_accessor_ext;
/// Image class, wrapper of sycl::image.
template <class T, int dimensions, bool IsImageArray = false> class image_wrapper : public image_wrapper_base {
sycl::image<dimensions> *_image = nullptr;
#ifndef DPCT_USM_LEVEL_NONE
std::vector<char> _host_buffer;
#endif
void create_image(sycl::queue q) {
auto &data = get_data();
if (data.get_data_type() == image_data_type::matrix) {
_image = static_cast<image_matrix_p>(data.get_data_ptr())
->create_image<dimensions>(data.get_channel());
return;
}
auto ptr = data.get_data_ptr();
auto channel = data.get_channel();
if (detail::get_pointer_attribute(q, ptr) == detail::pointer_access_attribute::device_only) {
#ifdef DPCT_USM_LEVEL_NONE
ptr = get_buffer(ptr)
.template get_access<sycl::access_mode::read_write>()
.get_pointer();
#else
auto sz = data.get_x();
if (data.get_data_type() == image_data_type::pitch)
sz *= channel.get_total_size() * data.get_y();
_host_buffer.resize(sz);
q.memcpy(_host_buffer.data(), ptr, sz).wait();
ptr = _host_buffer.data();
#endif
}
if constexpr (dimensions == 1) {
assert(data.get_data_type() == image_data_type::linear);
_image = new sycl::image<1>(
ptr, channel.get_channel_order(), channel.get_channel_type(),
sycl::range<1>(data.get_x() / channel.get_total_size()));
} else if constexpr (dimensions == 2) {
assert(data.get_data_type() == image_data_type::pitch);
_image = new sycl::image<2>(ptr, channel.get_channel_order(),
channel.get_channel_type(),
sycl::range<2>(data.get_x(), data.get_y()),
sycl::range<1>(data.get_pitch()));
} else {
throw std::runtime_error("3D image only support matrix data");
}
return;
}
public:
using acc_data_t = typename detail::image_trait<T>::acc_data_t;
using accessor_t =
typename image_accessor_ext<T, IsImageArray ? (dimensions - 1) : dimensions,
IsImageArray>::accessor_t;
image_wrapper() { set_channel(image_channel::create<T>()); }
~image_wrapper() { detach(); }
/// Get image accessor.
accessor_t get_access(sycl::handler &cgh, sycl::queue &q = get_default_queue()) {
if (!_image)
create_image(q);
return accessor_t(*_image, cgh);
}
/// Detach data.
void detach() override {
if (_image)
delete _image;
_image = nullptr;
}
};
/// Wrap sampler and image accessor together.
template <class T, int dimensions, bool IsImageArray = false>
class image_accessor_ext {
public:
using accessor_t =
typename detail::image_trait<T>::template accessor_t<dimensions>;
using data_t = typename detail::image_trait<T>::data_t;
sycl::sampler _sampler;
accessor_t _img_acc;
public:
image_accessor_ext(sycl::sampler sampler, accessor_t acc)
: _sampler(sampler), _img_acc(acc) {}
/// Read data from accessor.
template <bool Available = dimensions == 3>
typename std::enable_if<Available, data_t>::type read(float x, float y,
float z) {
return detail::fetch_data<T>()(
_img_acc.read(sycl::float4(x, y, z, 0), _sampler));
}
/// Read data from accessor.
template <class Coord0, class Coord1, class Coord2,
bool Available = dimensions == 3 &&
std::is_integral<Coord0>::value
&&std::is_integral<Coord1>::value
&&std::is_integral<Coord2>::value>
typename std::enable_if<Available, data_t>::type read(Coord0 x, Coord1 y,
Coord2 z) {
return detail::fetch_data<T>()(
_img_acc.read(sycl::int4(x, y, z, 0), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 2>
typename std::enable_if<Available, data_t>::type read(float x, float y) {
return detail::fetch_data<T>()(
_img_acc.read(sycl::float2(x, y), _sampler));
}
/// Read data from accessor.
template <class Coord0, class Coord1,
bool Available = dimensions == 2 &&
std::is_integral<Coord0>::value
&&std::is_integral<Coord1>::value>
typename std::enable_if<Available, data_t>::type read(Coord0 x, Coord1 y) {
return detail::fetch_data<T>()(
_img_acc.read(sycl::int2(x, y), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 1>
typename std::enable_if<Available, data_t>::type read(float x) {
return detail::fetch_data<T>()(_img_acc.read(x, _sampler));
}
/// Read data from accessor.
template <class CoordT,
bool Available = dimensions == 1 && std::is_integral<CoordT>::value>
typename std::enable_if<Available, data_t>::type read(CoordT x) {
return detail::fetch_data<T>()(_img_acc.read(x, _sampler));
}
};
template <class T, int dimensions> class image_accessor_ext<T, dimensions, true> {
public:
using accessor_t =
typename detail::image_trait<T>::template array_accessor_t<dimensions>;
using data_t = typename detail::image_trait<T>::data_t;
sycl::sampler _sampler;
accessor_t _img_acc;
public:
image_accessor_ext(sycl::sampler sampler, accessor_t acc)
: _sampler(sampler), _img_acc(acc) {}
/// Read data from accessor.
template <bool Available = dimensions == 2>
typename std::enable_if<Available, data_t>::type read(int index, float x,
float y) {
return detail::fetch_data<T>()(
_img_acc[index].read(sycl::float2(x, y), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 2>
typename std::enable_if<Available, data_t>::type read(int index, int x, int y) {
return detail::fetch_data<T>()(
_img_acc[index].read(sycl::int2(x, y), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 1>
typename std::enable_if<Available, data_t>::type read(int index, float x) {
return detail::fetch_data<T>()(
_img_acc[index].read(x, _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 1>
typename std::enable_if<Available, data_t>::type read(int index, int x) {
return detail::fetch_data<T>()(
_img_acc[index].read(x, _sampler));
}
};
/// Create image wrapper according to image data and sampling info.
/// \return Pointer to image wrapper base class.
/// \param data Image data used to create image wrapper.
/// \param info Image sampling info used to create image wrapper.
/// \returns Pointer to base class of created image wrapper object.
static inline image_wrapper_base *create_image_wrapper(image_data data,
sampling_info info) {
image_channel channel;
int dims = 1;
if (data.get_data_type() == image_data_type::matrix) {
auto matrix = (image_matrix_p)data.get_data_ptr();
channel = matrix->get_channel();
dims = matrix->get_dims();
} else {
if (data.get_data_type() == image_data_type::pitch) {
dims = 2;
}
channel = data.get_channel();
}
if (auto ret = detail::create_image_wrapper(channel, dims)) {
ret->set_sampling_info(info);
ret->set_data(data);
return ret;
}
return nullptr;
}
namespace detail {
/// Create image according with given type \p T and \p dims.
template <class T> static image_wrapper_base *create_image_wrapper(int dims) {
switch (dims) {
case 1:
return new image_wrapper<T, 1>();
case 2:
return new image_wrapper<T, 2>();
case 3:
return new image_wrapper<T, 3>();
default:
return nullptr;
}
}
/// Create image with given data type \p T, channel order and dims
template <class T>
static image_wrapper_base *create_image_wrapper(unsigned channel_num, int dims) {
switch (channel_num) {
case 1:
return create_image_wrapper<T>(dims);
case 2:
return create_image_wrapper<sycl::vec<T, 2>>(dims);
case 3:
return create_image_wrapper<sycl::vec<T, 3>>(dims);
case 4:
return create_image_wrapper<sycl::vec<T, 4>>(dims);
default:
return nullptr;
}
}
/// Create image with channel info and specified dimensions.
static image_wrapper_base *create_image_wrapper(image_channel channel, int dims) {
switch (channel.get_channel_type()) {
case sycl::image_channel_type::fp16:
return create_image_wrapper<sycl::half>(channel.get_channel_num(), dims);
case sycl::image_channel_type::fp32:
return create_image_wrapper<float>(channel.get_channel_num(), dims);
case sycl::image_channel_type::signed_int8:
return create_image_wrapper<std::int8_t>(channel.get_channel_num(), dims);
case sycl::image_channel_type::signed_int16:
return create_image_wrapper<std::int16_t>(channel.get_channel_num(), dims);
case sycl::image_channel_type::signed_int32:
return create_image_wrapper<std::int32_t>(channel.get_channel_num(), dims);
case sycl::image_channel_type::unsigned_int8:
return create_image_wrapper<std::uint8_t>(channel.get_channel_num(), dims);
case sycl::image_channel_type::unsigned_int16:
return create_image_wrapper<std::uint16_t>(channel.get_channel_num(), dims);
case sycl::image_channel_type::unsigned_int32:
return create_image_wrapper<std::uint32_t>(channel.get_channel_num(), dims);
default:
return nullptr;
}
}
} // namespace detail
} // namespace dpct
#endif // !__DPCT_IMAGE_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/kernel.hpp | //==---- kernel.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_KERNEL_HPP__
#define __DPCT_KERNEL_HPP__
#include <sycl/sycl.hpp>
#ifdef _WIN32
#include <unordered_set>
#include <windows.h>
#else
#include <dlfcn.h>
#endif
#if defined(__has_include) && __has_include(<filesystem>)
#include <filesystem>
#elif defined(__has_include) && __has_include(<experimental/filesystem>)
#include <experimental/filesystem>
#else
#error "SYCLomatic runtime requires C++ filesystem support"
#endif
#include <random>
#include <image.hpp>
#include <fstream>
namespace dpct {
typedef void (*kernel_functor)(sycl::queue &, const sycl::nd_range<3> &,
unsigned int, void **, void **);
struct kernel_function_info {
int max_work_group_size = 0;
};
static inline void get_kernel_function_info(kernel_function_info *kernel_info,
const void *function) {
kernel_info->max_work_group_size =
dpct::dev_mgr::instance()
.current_device()
.get_info<sycl::info::device::max_work_group_size>();
}
static inline kernel_function_info
get_kernel_function_info(const void *function) {
kernel_function_info kernel_info;
kernel_info.max_work_group_size =
dpct::dev_mgr::instance()
.current_device()
.get_info<sycl::info::device::max_work_group_size>();
return kernel_info;
}
namespace detail {
#if defined(__has_include) && __has_include(<filesystem>)
namespace fs = std::filesystem;
#else
namespace fs = std::experimental::filesystem;
#endif
/// Write data to temporary file and return absolute path to temporary file.
/// Temporary file is created in a temporary directory both of which have random
/// names with only the user having access permissions. Only one temporary file
/// will be created in the temporary directory.
static inline fs::path write_data_to_file(char const *const data, size_t size) {
std::error_code ec;
if (sizeof(size_t) >= sizeof(std::streamsize) &&
size > (std::numeric_limits<std::streamsize>::max)())
throw std::runtime_error("data file too large");
// random number generator
std::random_device dev;
std::mt19937 prng(dev());
std::uniform_int_distribution<uint64_t> rand(0);
// find temporary directory
auto tmp_dir = fs::temp_directory_path(ec);
if (ec)
throw std::runtime_error("could not find temporary directory");
// create private directory
std::stringstream directory;
fs::path directory_path;
constexpr int max_attempts = 5;
int i;
for (i = 0; i < max_attempts; i++) {
directory << std::hex << rand(prng);
directory_path = tmp_dir / directory.str();
if (fs::create_directory(directory_path)) {
break;
}
}
if (i == max_attempts)
throw std::runtime_error("could not create directory");
// only allow owner permissions to private directory
fs::permissions(directory_path, fs::perms::owner_all, ec);
if (ec)
throw std::runtime_error("could not set directory permissions");
// random filename in private directory
std::stringstream filename;
filename << std::hex << rand(prng);
#ifdef _WIN32
auto filepath = directory_path / (filename.str() + ".dll");
#else
auto filepath = directory_path / filename.str();
#endif
// write data to temporary file
auto outfile = std::ofstream(filepath, std::ios::out | std::ios::binary);
if (outfile) {
// only allow program to write file
fs::permissions(filepath, fs::perms::owner_write, ec);
if (ec)
throw std::runtime_error("could not set permissions");
outfile.write(data, size);
if (!outfile.good())
throw std::runtime_error("could not write data");
outfile.close();
// only allow program to read/execute file
fs::permissions(filepath, fs::perms::owner_read | fs::perms::owner_exec,
ec);
if (ec)
throw std::runtime_error("could not set permissions");
} else
throw std::runtime_error("could not write data");
// check temporary file contents
auto infile = std::ifstream(filepath, std::ios::in | std::ios::binary);
if (infile) {
bool mismatch = false;
size_t cnt = 0;
while (1) {
char c;
infile.get(c);
if (infile.eof())
break;
if (c != data[cnt++])
mismatch = true;
}
if (cnt != size || mismatch)
throw std::runtime_error("file contents not written correctly");
} else
throw std::runtime_error("could not validate file");
if (!filepath.is_absolute())
throw std::runtime_error("temporary filepath is not absolute");
return filepath;
}
static inline uint16_t extract16(unsigned char const *const ptr) {
uint16_t ret = 0;
ret |= static_cast<uint16_t>(ptr[0]) << 0;
ret |= static_cast<uint16_t>(ptr[1]) << 8;
return (ret);
}
static inline uint32_t extract32(unsigned char const *const ptr) {
uint32_t ret = 0;
ret |= static_cast<uint32_t>(ptr[0]) << 0;
ret |= static_cast<uint32_t>(ptr[1]) << 8;
ret |= static_cast<uint32_t>(ptr[2]) << 16;
ret |= static_cast<uint32_t>(ptr[3]) << 24;
return (ret);
}
static inline uint64_t extract64(unsigned char const *const ptr) {
uint64_t ret = 0;
ret |= static_cast<uint64_t>(ptr[0]) << 0;
ret |= static_cast<uint64_t>(ptr[1]) << 8;
ret |= static_cast<uint64_t>(ptr[2]) << 16;
ret |= static_cast<uint64_t>(ptr[3]) << 24;
ret |= static_cast<uint64_t>(ptr[4]) << 32;
ret |= static_cast<uint64_t>(ptr[5]) << 40;
ret |= static_cast<uint64_t>(ptr[6]) << 48;
ret |= static_cast<uint64_t>(ptr[7]) << 56;
return (ret);
}
static inline uint64_t get_lib_size(char const *const blob) {
#ifdef _WIN32
///////////////////////////////////////////////////////////////////////
// Analyze DOS stub
unsigned char const *const ublob =
reinterpret_cast<unsigned char const *const>(blob);
if (ublob[0] != 0x4d || ublob[1] != 0x5a) {
throw std::runtime_error("Blob is not a Windows DLL.");
}
uint32_t pe_header_offset = extract32(ublob + 0x3c);
///////////////////////////////////////////////////////////////////////
// Ananlyze PE-header
unsigned char const *const pe_header = ublob + pe_header_offset;
// signature
uint32_t pe_signature = extract32(pe_header + 0);
if (pe_signature != 0x00004550) {
throw std::runtime_error("PE-header signature is not 0x00004550");
}
// machine
uint16_t machine = extract16(pe_header + 4);
if (machine != 0x8664) {
throw std::runtime_error("Only DLLs for x64 supported");
}
// number of sections
uint16_t number_of_sections = extract16(pe_header + 6);
// sizeof optional header
uint16_t sizeof_optional_header = extract16(pe_header + 20);
// magic
uint16_t magic = extract16(pe_header + 24);
if (magic != 0x10b && magic != 0x20b) {
throw std::runtime_error("MAGIC is not 0x010b or 0x020b");
}
///////////////////////////////////////////////////////////////////////
// Analyze tail of optional header
constexpr int coff_header_size = 24;
unsigned char const *const tail_of_optional_header =
pe_header + coff_header_size + sizeof_optional_header;
if (extract64(tail_of_optional_header - 8) != 0) {
throw std::runtime_error("Optional header not zero-padded");
}
///////////////////////////////////////////////////////////////////////
// Analyze last section header
constexpr int section_header_size = 40;
unsigned char const *const last_section_header =
tail_of_optional_header + section_header_size * (number_of_sections - 1);
uint32_t sizeof_raw_data = extract32(last_section_header + 16);
uint32_t pointer_to_raw_data = extract32(last_section_header + 20);
return sizeof_raw_data + pointer_to_raw_data;
#else
if (blob[0] != 0x7F || blob[1] != 'E' || blob[2] != 'L' || blob[3] != 'F')
throw std::runtime_error("Blob is not in ELF format");
if (blob[4] != 0x02)
throw std::runtime_error("Only 64-bit headers are supported");
if (blob[5] != 0x01)
throw std::runtime_error("Only little-endian headers are supported");
unsigned char const *const ublob =
reinterpret_cast<unsigned char const *const>(blob);
uint64_t e_shoff = extract64(ublob + 0x28);
uint16_t e_shentsize = extract16(ublob + 0x3A);
uint16_t e_shnum = extract16(ublob + 0x3C);
return e_shoff + (e_shentsize * e_shnum);
#endif
}
#ifdef _WIN32
class path_lib_record {
public:
void operator=(const path_lib_record &) = delete;
~path_lib_record() {
for (auto entry : lib_to_path) {
FreeLibrary(static_cast<HMODULE>(entry.first));
fs::permissions(entry.second, fs::perms::owner_all);
fs::remove_all(entry.second.remove_filename());
}
}
static void record_lib_path(fs::path path, void *library) {
lib_to_path[library] = path;
}
static void remove_lib(void *library) {
auto path = lib_to_path[library];
std::error_code ec;
FreeLibrary(static_cast<HMODULE>(library));
fs::permissions(path, fs::perms::owner_all);
if (fs::remove_all(path.remove_filename(), ec) != 2 || ec)
// one directory and one temporary file should have been deleted
throw std::runtime_error("Directory delete failed");
lib_to_path.erase(library);
}
private:
static inline std::unordered_map<void *, fs::path> lib_to_path;
};
#endif
} // namespace detail
class kernel_library {
public:
kernel_library() : ptr{nullptr} {}
kernel_library(void *ptr) : ptr{ptr} {}
operator void *() const { return ptr; }
private:
void *ptr;
#ifdef _WIN32
static inline detail::path_lib_record single_instance_to_trigger_destructor;
#endif
};
namespace detail {
static inline kernel_library load_dl_from_data(char const *const data,
size_t size) {
fs::path filename = write_data_to_file(data, size);
#ifdef _WIN32
void *so = LoadLibraryW(filename.wstring().c_str());
#else
void *so = dlopen(filename.c_str(), RTLD_LAZY);
#endif
if (so == nullptr)
throw std::runtime_error("Failed to load kernel library");
#ifdef _WIN32
detail::path_lib_record::record_lib_path(filename, so);
#else
std::error_code ec;
// Windows DLL cannot be deleted while in use
if (fs::remove_all(filename.remove_filename(), ec) != 2 || ec)
// one directory and one temporary file should have been deleted
throw std::runtime_error("Directory delete failed");
#endif
return so;
}
} // namespace detail
/// Load kernel library and return a handle to use the library.
/// \param [in] name The name of the library.
static inline kernel_library load_kernel_library(const std::string &name) {
std::ifstream ifs;
ifs.open(name, std::ios::in | std::ios::binary);
std::stringstream buffer;
buffer << ifs.rdbuf();
const std::string buffer_string = buffer.str();
return detail::load_dl_from_data(buffer_string.c_str(), buffer_string.size());
}
/// Load kernel library whose image is alreay in memory and return a handle to
/// use the library.
/// \param [in] image A pointer to the image in memory.
static inline kernel_library load_kernel_library_mem(char const *const image) {
const size_t size = detail::get_lib_size(image);
return detail::load_dl_from_data(image, size);
}
/// Unload kernel library.
/// \param [in,out] library Handle to the library to be closed.
static inline void unload_kernel_library(const kernel_library &library) {
#ifdef _WIN32
detail::path_lib_record::remove_lib(library);
#else
dlclose(library);
#endif
}
class kernel_function {
public:
kernel_function() : ptr{nullptr} {}
kernel_function(dpct::kernel_functor ptr) : ptr{ptr} {}
operator void *() const { return ((void *)ptr); }
void operator()(sycl::queue &q, const sycl::nd_range<3> &range,
unsigned int a, void **args, void **extra) {
ptr(q, range, a, args, extra);
}
private:
dpct::kernel_functor ptr;
};
/// Find kernel function in a kernel library and return its address.
/// \param [in] library Handle to the kernel library.
/// \param [in] name Name of the kernel function.
static inline dpct::kernel_function
get_kernel_function(kernel_library &library, const std::string &name) {
#ifdef _WIN32
dpct::kernel_functor fn = reinterpret_cast<dpct::kernel_functor>(
GetProcAddress(static_cast<HMODULE>(static_cast<void *>(library)),
(name + std::string("_wrapper")).c_str()));
#else
dpct::kernel_functor fn = reinterpret_cast<dpct::kernel_functor>(
dlsym(library, (name + std::string("_wrapper")).c_str()));
#endif
if (fn == nullptr)
throw std::runtime_error("Failed to get function");
return fn;
}
/// Invoke a kernel function.
/// \param [in] function kernel function.
/// \param [in] queue SYCL queue used to execute kernel
/// \param [in] groupRange SYCL group range
/// \param [in] localRange SYCL local range
/// \param [in] localMemSize The size of local memory required by the kernel
/// function.
/// \param [in] kernelParams Array of pointers to kernel arguments.
/// \param [in] extra Extra arguments.
static inline void invoke_kernel_function(dpct::kernel_function &function,
sycl::queue &queue,
sycl::range<3> groupRange,
sycl::range<3> localRange,
unsigned int localMemSize,
void **kernelParams, void **extra) {
function(queue, sycl::nd_range<3>(groupRange * localRange, localRange),
localMemSize, kernelParams, extra);
}
/// Find image wrapper in a kernel library and return its address.
/// \param [in] library Handle to the kernel library.
/// \param [in] name Name of the target image wrapper.
static inline dpct::image_wrapper_base_p
get_image_wrapper(dpct::kernel_library &library, const std::string &name) {
#ifdef _WIN32
dpct::image_wrapper_base_p fn =
reinterpret_cast<dpct::image_wrapper_base_p>(GetProcAddress(
static_cast<HMODULE>(static_cast<void *>(library)), name.c_str()));
#else
dpct::image_wrapper_base_p fn = reinterpret_cast<dpct::image_wrapper_base_p>(
dlsym(library, name.c_str()));
#endif
if (fn == nullptr)
throw std::runtime_error("Failed to get image");
return fn;
}
} // namespace dpct
#endif // __DPCT_KERNEL_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/dpct.hpp | //==---- dpct.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_HPP__
#define __DPCT_HPP__
#include <sycl/sycl.hpp>
#include <iostream>
#include <limits.h>
#include <math.h>
template <class... Args> class dpct_kernel_name;
template <int Arg> class dpct_kernel_scalar;
#include "atomic.hpp"
#include "device.hpp"
#include "image.hpp"
#include "kernel.hpp"
#include "math.hpp"
#include "memory.hpp"
#include "util.hpp"
#if defined(_MSC_VER)
#define __dpct_align__(n) __declspec(align(n))
#define __dpct_inline__ __forceinline
#else
#define __dpct_align__(n) __attribute__((aligned(n)))
#define __dpct_inline__ __inline__ __attribute__((always_inline))
#endif
#if defined(_MSC_VER)
#define __dpct_noinline__ __declspec(noinline)
#else
#define __dpct_noinline__ __attribute__((noinline))
#endif
#define DPCT_COMPATIBILITY_TEMP (600)
namespace dpct{
enum error_code { success = 0, default_error = 999 };
}
#define DPCT_CHECK_ERROR(expr) \
[&]() { \
try { \
expr; \
return dpct::success; \
} catch (std::exception const &e) { \
std::cerr << e.what() << std::endl; \
return dpct::default_error; \
} \
}()
#define DPCT_PI_F (3.14159274101257f)
#define DPCT_PI (3.141592653589793115998)
#endif // __DPCT_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/dnnl_utils.hpp | //==---- dnnl_utils.hpp ---------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_DNNL_UTILS_HPP__
#define __DPCT_DNNL_UTILS_HPP__
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/numeric>
#include <oneapi/mkl.hpp>
#include <oneapi/mkl/rng/device.hpp>
#include <sycl/sycl.hpp>
#include <oneapi/dnnl/dnnl.hpp>
#include <oneapi/dnnl/dnnl_sycl.hpp>
#include <unordered_map>
#include <algorithm>
#include <list>
#include "memory.hpp"
#include "device.hpp"
#include "lib_common_utils.hpp"
namespace dpct {
namespace dnnl {
/// Get concatenated library version as an integer.
static inline size_t get_version() {
const ::dnnl::version_t *ver = ::dnnl::version();
return ver->major * 1000 + ver->minor * 100 + ver->patch;
}
class engine_ext;
typedef oneapi::mkl::rng::philox4x32x10 rng_engine_t;
/// An enum class representing memory layout. Used by
/// memory_desc_ext to create a memory with pre-defined layout.
enum class memory_format_tag { nchw, nhwc, nchw_blocked };
/// An enum class representing RNN data memory layout. Used by
/// memory_desc_ext to create a memory with pre-defined layout.
enum class rnn_memory_format_tag { tnc, ntc };
/// A class holding the description of an N-dimensions memory.
class memory_desc_ext {
::dnnl::memory::desc _desc;
public:
/// Convert dpct::library_data_t to dnnl::memory::data_type.
static ::dnnl::memory::data_type to_dnnl_data_type(dpct::library_data_t dt);
/// Convert dnnl::memory::data_type to dpct::library_data_t.
static dpct::library_data_t
to_dpct_library_data_t(::dnnl::memory::data_type dt, unsigned block_size);
/// Convert dpct::dnnl::memory_format_tag to dnnl::memory::format_tag.
static ::dnnl::memory::format_tag to_dnnl_format_tag(dpct::library_data_t dt,
memory_format_tag tag);
memory_desc_ext() = default;
memory_desc_ext(::dnnl::memory::desc &desc) : _desc(desc) {}
memory_desc_ext(::dnnl::memory::desc &&desc) : _desc(std::move(desc)) {}
/// Setting a 4D memory with given parameters.
/// \param [in] tag Format tag.
/// \param [in] dt Data type.
/// \param [in] n Number of images.
/// \param [in] c Number of channels.
/// \param [in] h Height of images.
/// \param [in] w Width of images.
void set(memory_format_tag tag, dpct::library_data_t dt, int n, int c, int h,
int w);
/// Setting a 3D RNN data memory with given parameters.
/// \param [in] tag RNN data format tag.
/// \param [in] dt Data type.
/// \param [in] t Number of sequence length.
/// \param [in] n Number of batch.
/// \param [in] c Height of input channel.
void set(rnn_memory_format_tag tag, dpct::library_data_t dt, int t, int n, int c);
/// Setting a 4D memory with given parameters.
/// \param [in] dt Data type.
/// \param [in] n Number of images.
/// \param [in] c Number of channels.
/// \param [in] h Height of images.
/// \param [in] w Width of images.
/// \param [in] n_stride Stride between two continuous images.
/// \param [in] c_stride Stride between two continuous channels.
/// \param [in] h_stride Stride between two continuous rows.
/// \param [in] w_stride Stride between two continuous columns.
void set(dpct::library_data_t dt, int n, int c, int h, int w, int n_stride,
int c_stride, int h_stride, int w_stride);
/// Setting a ND memory with given parameters.
/// \param [in] dt Data type.
/// \param [in] ndims Dimension of the memory.
/// \param [in] dims Array of dimension ndims that contain the size of each
/// memory dimension. \param [in] strides Array of dimension ndims that
/// contain the stride of each memory dimension.
void set(dpct::library_data_t dt, int ndims, const int dims[],
const int strides[]);
/// Setting a ND memory with given parameters.
/// \param [in] tag Format tag.
/// \param [in] dt Data type.
/// \param [in] ndims Dimension of the memory.
/// \param [in] dims Array of dimension ndims that contain the size of each
/// memory dimension.
void set(memory_format_tag tag, dpct::library_data_t dt, int ndims,
const int dims[]);
/// Getting a ::dnnl::memory::desc from a memory_desc_ext.
/// \returns The ::dnnl::memory::desc.
const ::dnnl::memory::desc &get_desc() const { return _desc; }
/// Setting holding desc with given dnnl memory descriptor.
void set_desc(::dnnl::memory::desc desc) { _desc = desc; }
/// Getting a size of a memory_desc_ext in bytes.
/// \returns The size.
size_t get_size() const { return _desc.get_size(); }
/// Getting parameters from a 4D memory.
/// \param [out] dt Data type.
/// \param [out] n Number of images.
/// \param [out] c Number of channels.
/// \param [out] h Height of images.
/// \param [out] w Width of images.
/// \param [out] n_stride Stride between two continuous images.
/// \param [out] c_stride Stride between two continuous channels.
/// \param [out] h_stride Stride between two continuous rows.
/// \param [out] w_stride Stride between two continuous columns.
void get(dpct::library_data_t *dt, int *n, int *c, int *h, int *w,
int *n_stride, int *c_stride, int *h_stride, int *w_stride) const;
/// Getting parameters from a 4D memory.
/// \param [out] dt Data type.
/// \param [out] tag Format tag.
/// \param [out] n Number of images.
/// \param [out] c Number of channels.
/// \param [out] h Height of images.
/// \param [out] w Width of images.
void get(dpct::library_data_t *dt, memory_format_tag *tag, int *n, int *c,
int *h, int *w) const;
/// Getting parameters from a 3D RNN data memory.
/// \param [out] dt Data type.
/// \param [out] tag RNN data format tag.
/// \param [out] t Number of sequence length.
/// \param [out] n Number of batch.
/// \param [out] c Height of input channel.
void get(dpct::library_data_t *dt, rnn_memory_format_tag *tag, int *t, int *n,
int *c) const;
/// Getting parameters from a ND memory.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given memory descriptor.
/// \param [out] dt Data type.
/// \param [out] ndims Dimension of the memory.
/// \param [out] dims Array of dimension requested_ndims that contain the
/// size of each memory dimension.
/// \param [out] strides Array of dimension requested_ndims that contain the
/// stride of each memory dimension.
void get(int requested_ndims, dpct::library_data_t *dt, int *ndims,
int dims[], int strides[]) const;
/// Getting parameters from a ND memory.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given memory descriptor.
/// \param [out] dt Data type.
/// \param [out] tag Format tag.
/// \param [out] ndims Dimension of the memory.
/// \param [out] dims Array of dimension requested_ndims that contain the
/// size of each memory dimension.
void get(int requested_ndims, dpct::library_data_t *dt,
memory_format_tag *tag, int *ndims, int dims[]) const;
/// Getting dims from a ND memory.
/// \return The dims.
std::vector<int64_t> get_dims() const { return _desc.get_dims(); }
/// Getting strides from a ND memory.
/// \return The strides.
std::vector<int64_t> get_strides() const {
return _desc.get_strides();
}
/// Getting element num from a ND memory.
/// \return The element number.
size_t get_element_num() const {
auto dims = _desc.get_dims();
if (dims.empty()) {
return 0;
}
size_t result = 1;
for (auto &dim : dims) {
result *= dim;
}
return result;
}
operator bool() const {
return bool(_desc);
}
memory_desc_ext &operator=(std::nullptr_t) {
_desc.reset(nullptr);
return *this;
}
};
/// A class holding description for an activation operation.
class activation_desc {
::dnnl::algorithm _alg;
float _alpha;
float _beta;
public:
/// Setting an activation descriptor with given parameters.
/// \param [in] alg Activation algorithm.
/// \param [in] alpha Value of alpha parameter.
void set(::dnnl::algorithm alg, float alpha) {
_alg = alg;
if(alg == ::dnnl::algorithm::eltwise_clip) {
_alpha = 0;
_beta = alpha;
} else {
_alpha = alpha;
}
}
/// Getting parameters form an activation descriptor.
/// \param [out] alg Activation algorithm.
/// \param [out] alpha Value of alpha parameter.
void get(::dnnl::algorithm *alg, float *alpha) const {
*alg = _alg;
if(_alg == ::dnnl::algorithm::eltwise_clip) {
*alpha = _beta;
} else {
*alpha = _alpha;
}
}
/// Setting the alpha parameter of an activation descriptor.
/// \param [in] alpha Value of alpha parameter.
void set_alpha(float alpha) { _alpha = alpha; }
/// Setting the beta parameter of an activation descriptor.
/// \param [in] beta Value of beta parameter.
void set_beta(float beta) { _beta = beta; }
/// Setting the algorithm parameter of an activation descriptor.
/// \param [in] alg Activation algorithm.
void set_algorithm(::dnnl::algorithm alg) { _alg = alg; }
/// Getting the alpha parameter from an activation descriptor.
/// \param [out] alpha Value of alpha parameter.
float get_alpha() const { return _alpha; }
/// Getting the beta parameter from an activation descriptor.
/// \param [out] beta Value of beta parameter.
float get_beta() const { return _beta; }
/// Getting the algorithm parameter from an activation descriptor.
/// \param [out] alg Activation algorithm.
::dnnl::algorithm get_algorithm() const { return _alg; }
};
/// A class holding description for a local response normalization operation.
class lrn_desc {
unsigned int _local_size;
float _alpha;
float _beta;
float _k;
public:
/// Setting a local response normalization descriptor with given parameters.
/// \param [in] local_size Value of local_size parameter.
/// \param [in] alpha Value of alpha parameter.
/// \param [in] beta Value of beta parameter.
/// \param [in] k Value of k parameter.
void set(unsigned int local_size, float alpha, float beta, float k) {
_local_size = local_size;
_alpha = alpha;
_beta = beta;
_k = k;
}
/// Getting parameters form a local response normalization descriptor.
/// \param [out] local_size Value of local_size parameter.
/// \param [out] alpha Value of alpha parameter.
/// \param [out] beta Value of beta parameter.
/// \param [out] k Value of k parameter.
void get(unsigned int *local_size, float *alpha, float *beta,
float *k) const {
*local_size = _local_size;
*alpha = _alpha;
*beta = _beta;
*k = _k;
}
/// Setting the local size parameter of a local response normalization
/// descriptor.
/// \param [in] local_size Value of local_size parameter.
void set_local_size(unsigned int local_size) { _local_size = local_size; }
/// Setting the alpha parameter of a local response normalization descriptor.
/// \param [in] alpha Value of alpha parameter.
void set_alpha(float alpha) { _alpha = alpha; }
/// Setting the beta parameter of a local response normalization descriptor.
/// \param [in] beta Value of beta parameter.
void set_beta(float beta) { _beta = beta; }
/// Setting the k parameter of a local response normalization descriptor.
/// \param [in] k Value of k parameter.
void set_k(float k) { _k = k; }
/// Getting the local size parameter from a local response normalization
/// descriptor.
/// \param [out] local_size Value of local_size parameter.
unsigned int get_local_size() const { return _local_size; }
/// Getting the alpha parameter from a local response normalization
/// descriptor.
/// \param [out] alpha Value of alpha parameter.
float get_alpha() const { return _alpha; }
/// Getting the beta parameter from a local response normalization descriptor.
/// \param [out] beta Value of beta parameter.
float get_beta() const { return _beta; }
/// Getting the k parameter from a local response normalization descriptor.
/// \param [out] k Value of k parameter.
float get_k() const { return _k; }
};
/// An enum class representing softmax algorithm.
enum class softmax_algorithm { normal, log };
/// An enum class representing softmax mode.
enum class softmax_mode { instance, channel };
/// A class holding description for a pooling operation.
class pooling_desc {
::dnnl::algorithm _alg;
std::vector<int64_t> _stride;
std::vector<int64_t> _kernel;
std::vector<int64_t> _padding;
public:
/// Setting a 2D pooling descriptor with given parameters.
/// \param [in] alg Pooling algorithm.
/// \param [in] kernel_h Value of height of kernel.
/// \param [in] kernel_w Value of width of kernel.
/// \param [in] padding_h Value of height of padding.
/// \param [in] padding_w Value of width of padding.
/// \param [in] stride_h Value of height of stride.
/// \param [in] stride_w Value of width of stride.
void set(::dnnl::algorithm alg, int kernel_h, int kernel_w, int padding_h,
int padding_w, int stride_h, int stride_w) {
_alg = alg;
_stride = {stride_h, stride_w};
_kernel = {kernel_h, kernel_w};
_padding = {padding_h, padding_w};
}
/// Setting a ND pooling descriptor with given parameters.
/// \param [in] alg Pooling algorithm.
/// \param [in] ndims Dimension of the pooling operation.
/// \param [in] kernel Array of dimension ndims containing the kernel size of
/// each dimension.
/// \param [in] padding Array of dimension ndims containing the padding size of
/// each dimension.
/// \param [in] stride Array of dimension ndims containing the stride size of
/// each dimension.
void set(::dnnl::algorithm alg, int ndims, int kernel[], int padding[],
int stride[]) {
_alg = alg;
_stride = std::vector<int64_t>(stride, stride + ndims);
_kernel = std::vector<int64_t>(kernel, kernel + ndims);
_padding = std::vector<int64_t>(padding, padding + ndims);
}
/// Getting parameters from a 2D pooling descriptor.
/// \param [out] alg Pooling algorithm.
/// \param [out] kernel_h Value of height of kernel.
/// \param [out] kernel_w Value of width of kernel.
/// \param [out] padding_h Value of height of padding.
/// \param [out] padding_w Value of width of padding.
/// \param [out] stride_h Value of height of stride.
/// \param [out] stride_w Value of width of stride.
void get(::dnnl::algorithm *alg, int *kernel_h, int *kernel_w, int *padding_h,
int *padding_w, int *stride_h, int *stride_w) const {
*alg = _alg;
*kernel_h = _kernel[0];
*kernel_w = _kernel[1];
*padding_h = _padding[0];
*padding_w = _padding[1];
*stride_h = _stride[0];
*stride_w = _stride[1];
}
/// Getting parameters from a ND pooling descriptor.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given pooling descriptor.
/// \param [out] alg Pooling algorithm.
/// \param [out] ndims Dimension of the pooling operation.
/// \param [out] kernel Array of dimension ndims containing the kernel size of
/// each dimension.
/// \param [out] padding Array of dimension ndims containing the padding size
/// of each dimension.
/// \param [out] stride Array of dimension ndims containing the stride size of
/// each dimension.
void get(int requested_ndims, ::dnnl::algorithm *alg, int *ndims,
int kernel[], int padding[], int stride[]) const {
*alg = _alg;
*ndims = _stride.size();
for (int i = 0; i < requested_ndims; i++) {
kernel[i] = _kernel[i];
padding[i] = _padding[i];
stride[i] = _stride[i];
}
}
/// Setting the algorithm parameter of a pooling descriptor.
/// \param [in] alg Pooling algorithm.
void set_algorithm(::dnnl::algorithm alg) { _alg = alg; }
/// Setting the stride parameter of a pooling descriptor.
/// \param [in] stride Array of dimension ndims containing the stride size of
/// each dimension.
void set_stride(const std::vector<int64_t> &stride) { _stride = stride; }
/// Setting the kernel parameter of a pooling descriptor.
/// \param [in] kernel Array of dimension ndims containing the kernel size of
/// each dimension.
void set_kernel(const std::vector<int64_t> &kernel) { _kernel = kernel; }
/// Setting the padding parameter of a pooling descriptor.
/// \param [in] padding Array of dimension ndims containing the padding size
/// of each dimension.
void set_padding(const std::vector<int64_t> &padding) { _padding = padding; }
/// Getting the algorithm parameter from a pooling descriptor.
/// \param [out] alg Pooling algorithm.
::dnnl::algorithm get_algorithm() const { return _alg; }
/// Getting the stride parameter from a pooling descriptor.
/// \returns Array of dimension ndims containing the stride size of each
/// dimension.
const std::vector<int64_t> &get_stride() const { return _stride; }
/// Getting the kernel parameter from a pooling descriptor.
/// \returns Array of dimension ndims containing the kernel size of each
/// dimension.
const std::vector<int64_t> &get_kernel() const { return _kernel; }
/// Getting the padding parameter from a pooling descriptor.
/// \returns Array of dimension ndims containing the padding size of each
/// dimension.
const std::vector<int64_t> &get_padding() const { return _padding; }
/// Getting the output dimensions of a memory after 2D pooling has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [out] out_n Number of images.
/// \param [out] out_c Number of channels.
/// \param [out] out_h Height of images.
/// \param [out] out_w Width of images.
void get_forward_output_dim(const memory_desc_ext &desc, int *out_n,
int *out_c, int *out_h, int *out_w) const {
auto dims = desc.get_dims();
*out_n = dims[0];
*out_c = dims[1];
*out_h = 1 + (dims[2] + 2 * _padding[0] - _kernel[0]) / _stride[0];
*out_w = 1 + (dims[3] + 2 * _padding[1] - _kernel[1]) / _stride[1];
}
/// Getting the output dimensions of a memory after ND pooling has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [out] ndims Dimension of the memory.
/// \param [out] out_dims Array of dimension requested_ndims that contain
/// the size of each memory dimension.
void get_forward_output_dim(const memory_desc_ext &desc, int ndims,
int out_dims[]) const {
assert(ndims >= 4 && "ndims is at least 4.");
auto dims = desc.get_dims();
out_dims[0] = dims[0];
out_dims[1] = dims[1];
for (int i = 2; i < ndims; i++) {
out_dims[i] =
1 + (dims[i] + 2 * _padding[i - 2] - _kernel[i - 2]) / _stride[i - 2];
}
}
};
/// An enum class representing reduction operations.
enum class reduction_op {
max,
min,
sum,
mul,
mean,
amax,
mul_no_zeros,
norm1,
norm2
};
/// An enum class representing batch normalization mode.
enum class batch_normalization_mode { per_activation, spatial };
/// An enum class representing batch normalization operations.
enum class batch_normalization_ops { none, activation, add_activation };
/// An enum class representing binary operations.
enum class binary_op { add, sub, mul, div, min, max, sqrt, neg };
/// An struct representing convolution algorithm infomation.
struct convolution_algorithm_info {
::dnnl::algorithm algo = ::dnnl::algorithm::convolution_auto;
int status = 0;
};
/// A class holding description for a convolution operation.
class convolution_desc {
std::vector<int64_t> _strides;
std::vector<int64_t> _dilates;
std::vector<int64_t> _paddings;
int _group_count = 1;
::dnnl::fpmath_mode _math_mode = ::dnnl::fpmath_mode::strict;
public:
/// Setting a group count to be used in the convolution.
/// \param [in] group_count Value of group count.
void set_group_count(int group_count) { _group_count = group_count; }
/// Getting a group count specified in the given convolution descriptor.
/// \returns Value of group count.
int get_group_count() { return _group_count; }
/// Setting floating point math mode to be used in the convolution.
/// \param [in] math_mode Value of math_mode.
void set_math_mode(::dnnl::fpmath_mode math_mode) { _math_mode = math_mode; }
/// Getting floating point math mode specified in the given convolution descriptor.
/// \returns Value of math mode.
::dnnl::fpmath_mode get_math_mode() { return _math_mode; }
/// Setting a 2D convolution descriptor with given parameters.
/// \param [in] padding_h Value of height of padding.
/// \param [in] padding_w Value of width of padding.
/// \param [in] stride_h Value of height of stride.
/// \param [in] stride_w Value of width of stride.
/// \param [in] dilate_h Value of height of dilate.
/// \param [in] dilate_w Value of width of dilate.
void set(int padding_h, int padding_w, int stride_h, int stride_w,
int dilate_h, int dilate_w) {
_strides = {stride_h, stride_w};
_dilates = {dilate_h - 1, dilate_w - 1};
_paddings = {padding_h, padding_w};
}
/// Setting a ND convolution descriptor with given parameters.
/// \param [in] ndims Dimension of the convolution operation.
/// \param [in] paddings Array of dimension ndims containing the padding size of
/// each dimension.
/// \param [in] strides Array of dimension ndims containing the stride size of
/// each dimension.
/// \param [in] dilates Array of dimension ndims containing the kernel size of
/// each dimension.
void set(int ndims, int paddings[], int strides[], int dilates[]) {
_strides = std::vector<int64_t>(strides, strides + ndims);
_paddings = std::vector<int64_t>(paddings, paddings + ndims);
_dilates = std::vector<int64_t>(dilates, dilates + ndims);
for (auto &dilate : _dilates) {
dilate--;
}
}
/// Getting parameters from a 2D convolution descriptor.
/// \param [out] padding_h Value of height of padding.
/// \param [out] padding_w Value of width of padding.
/// \param [out] stride_h Value of height of stride.
/// \param [out] stride_w Value of width of stride.
/// \param [out] dilate_h Value of height of dilate.
/// \param [out] dilate_w Value of width of dilate.
void get(int *padding_h, int *padding_w, int *stride_h, int *stride_w,
int *dilate_h, int *dilate_w) const {
*dilate_h = _dilates[0];
*dilate_w = _dilates[1];
*padding_h = _paddings[0];
*padding_w = _paddings[1];
*stride_h = _strides[0];
*stride_w = _strides[1];
}
/// Getting parameters from a ND convolution descriptor.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given convolution descriptor.
/// \param [out] ndims Dimension of the pooling operation.
/// \param [out] paddings Array of dimension ndims containing the padding size
/// of each dimension.
/// \param [out] strides Array of dimension ndims containing the stride size of
/// each dimension.
/// \param [out] dilates Array of dimension ndims containing the dilate size of
/// each dimension.
void get(int requested_ndims, int *ndims, int paddings[], int strides[],
int dilates[]) const {
*ndims = _strides.size();
for (int i = 0; i < requested_ndims; i++) {
dilates[i] = _dilates[i];
paddings[i] = _paddings[i];
strides[i] = _strides[i];
}
}
/// Getting the stride parameter from a convolution descriptor.
/// \returns Array of dimension ndims containing the stride size of each
/// dimension.
const std::vector<int64_t> &get_stride() const { return _strides; }
/// Getting the kernel parameter from a convolution descriptor.
/// \returns Array of dimension ndims containing the dilate size of each
/// dimension.
const std::vector<int64_t> &get_dilate() const { return _dilates; }
/// Getting the padding parameter from a convolution descriptor.
/// \returns Array of dimension ndims containing the padding size of each
/// dimension.
const std::vector<int64_t> &get_padding() const { return _paddings; }
/// Getting the output dimensions of a memory after 2D convolution has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [in] weight_desc Input weight memory descriptor.
/// \param [out] out_n Number of images.
/// \param [out] out_c Number of channels.
/// \param [out] out_h Height of images.
/// \param [out] out_w Width of images.
void get_forward_output_dim(const memory_desc_ext &desc,
const memory_desc_ext &weight_desc, int *out_n,
int *out_c, int *out_h, int *out_w) const {
auto dims = desc.get_dims();
auto weight_dims = weight_desc.get_dims();
*out_n = dims[0];
*out_c = weight_dims[0];
*out_h = 1 + (dims[2] + 2 * _paddings[0] -
(1 + (_dilates[0] * (weight_dims[2] - 1)))) /
_strides[0];
*out_w = 1 + (dims[3] + 2 * _paddings[1] -
(1 + (_dilates[1] * (weight_dims[3] - 1)))) /
_strides[1];
}
/// Getting the output dimensions of a memory after ND convolution has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [in] weight_desc Input weight memory descriptor.
/// \param [out] ndims Dimension of the memory.
/// \param [out] out_dims Array of dimension requested_ndims that contain
/// the size of each memory dimension.
void get_forward_output_dim(const memory_desc_ext &desc,
const memory_desc_ext &weight_desc, int ndims,
int out_dims[]) const {
assert(ndims >= 4 && "ndims is at least 4.");
auto dims = desc.get_dims();
auto weight_dims = weight_desc.get_dims();
out_dims[0] = dims[0];
out_dims[1] = weight_dims[1];
for (int i = 2; i < ndims; i++) {
out_dims[i] = 1 + (dims[i] + 2 * _paddings[i - 2] -
(1 + (_dilates[i - 2] * (weight_dims[i] - 1)))) /
_strides[i - 2];
}
}
convolution_desc &operator=(std::nullptr_t) {
return *this = convolution_desc();
}
operator bool() const {
return !(_strides.size() == 0
&& _dilates.size() == 0
&& _paddings.size() == 0);
}
};
/// An enum class representing rnn mode.
enum class rnn_mode { vanilla_relu, vanilla_tanh, lstm, gru };
/// An enum class representing rnn bias mode.
enum class rnn_bias_mode { none, single };
/// An enum class representing rnn direction.
enum class rnn_direction {unidirectional, bidirectional};
/// A class holding description for a RNN operation.
class rnn_desc {
rnn_mode _mode;
rnn_bias_mode _bias_mode;
rnn_direction _direction;
dpct::library_data_t _dt;
int _input_size;
int _hidden_size;
int _projection_size;
int _layer_size;
public:
void set(rnn_mode mode, rnn_bias_mode bias_mode, rnn_direction direction,
dpct::library_data_t dt, int input_size, int hidden_size,
int projection_size, int layer_size) {
_mode = mode;
_bias_mode = bias_mode;
_direction = direction;
_input_size = input_size;
_hidden_size = hidden_size;
_projection_size = projection_size;
_layer_size = layer_size;
_dt = dt;
}
void get(rnn_mode *mode, rnn_bias_mode *bias_mode, rnn_direction *direction,
dpct::library_data_t *dt, int *input_size, int *hidden_size,
int *projection_size, int *layer_size) const {
*mode = _mode;
*bias_mode = _bias_mode;
*direction = _direction;
*input_size = _input_size;
*hidden_size = _hidden_size;
*projection_size = _projection_size;
*layer_size = _layer_size;
*dt = _dt;
}
};
/// A class holding description for a Dropout operation.
class dropout_desc {
struct dropout_desc_imp {
float _p = 0.5f;
unsigned long long _seed = 1;
void *_state = nullptr;
std::vector<std::uint8_t> _host_state;
rng_engine_t _rng_engine;
dropout_desc_imp() : _rng_engine(dpct::get_default_queue(), 1) {}
};
std::shared_ptr<dropout_desc_imp> _imp;
void generate(sycl::queue *q, std::int64_t required_state_size,
std::int64_t num, void *buffer) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
sycl::event e_gen = oneapi::mkl::rng::generate(
oneapi::mkl::rng::bernoulli<std::int32_t>(1.f - _imp->_p),
_imp->_rng_engine, num, (std::int32_t *)buffer);
sycl::event e_save = q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e_gen);
cgh.host_task([=] {
oneapi::mkl::rng::save_state(_imp->_rng_engine,
_imp->_host_state.data());
});
});
q->memcpy(_imp->_state, _imp->_host_state.data(), required_state_size,
e_save);
#endif
}
public:
operator bool() const {
return bool(_imp);
}
dropout_desc &operator=(std::nullptr_t) {
_imp.reset();
return *this;
}
/// Initializing a dropout descriptor.
void init(){
_imp = std::make_shared<dropout_desc_imp>();
}
/// Setting a dropout descriptor with given parameters.
/// \param [in] engine Engine of the dropout operation.
/// \param [in] p Probability of value set to zero.
/// \param [in] state Memory that store random generator state.
/// \param [in] state_size Required size to store random generator state.
/// \param [in] seed Seed to initialize conditions of the generator state.
void set(engine_ext &engine, float p, void *state, size_t state_size,
unsigned long long seed);
/// Getting parameters from a dropout descriptor.
/// \param [in] engine Engine of the dropout operation.
/// \param [in] p Probability of value set to zero.
/// \param [in] state Memory that store random generator state.
/// \param [in] seed Seed to initialize conditions of the generator state.
void get(float *p, void **states, unsigned long long *seed) const noexcept {
*seed = _imp->_seed;
*states = _imp->_state;
*p = _imp->_p;
}
/// Getting the probability of value set to zero.
/// \returns Probability.
float get_probability() const noexcept { return _imp->_p; }
/// Restoreing a dropout descriptor from stored state.
/// \param [in] engine Engine of the dropout operation.
/// \param [in] p Probability of value set to zero.
/// \param [in] state Memory that store random generator state.
/// \param [in] state_size Required size to store random generator state.
/// \param [in] seed Seed to initialize conditions of the generator state.
void restore(engine_ext &engine, float p, void *state, size_t state_size,
unsigned long long seed);
friend class engine_ext;
};
namespace detail {
typedef std::string primitive_cache_key_type;
typedef std::list<primitive_cache_key_type> usage_list_type;
typedef struct {
::dnnl::primitive *primitive;
usage_list_type::iterator usage_it;
std::function<void(::dnnl::primitive *)> destructor;
sycl::event e;
} primitive_cache_value_type;
typedef std::unordered_map<primitive_cache_key_type, primitive_cache_value_type>
cache_map_type;
// The primitive cache uses LRU replacement policy, and the default cache
// capacity is 1024.
class primitive_cache {
int _capacity = 1024;
usage_list_type usage;
cache_map_type cache_map;
void touch(cache_map_type::iterator it, sycl::event e = {},
bool update_event = false) {
if (it->second.usage_it != usage.begin()) {
const primitive_cache_key_type &key = it->first;
usage.erase(it->second.usage_it);
usage.push_front(key);
it->second.usage_it = usage.begin();
}
if (update_event) {
it->second.e = e;
}
}
void async_destruct_primitive(const primitive_cache_value_type &value) {
dpct::get_current_device().default_queue().submit([&](sycl::handler &cgh) {
cgh.depends_on(value.e);
cgh.host_task([=] { value.destructor(value.primitive); });
});
}
public:
::dnnl::primitive *get(const primitive_cache_key_type &key) {
auto it = cache_map.find(key);
if (it == cache_map.end()) {
return nullptr;
}
touch(it);
return it->second.primitive;
}
void put(const primitive_cache_key_type &key, ::dnnl::primitive *value,
std::function<void(::dnnl::primitive *)> destructor, sycl::event e) {
auto it = cache_map.find(key);
if (it != cache_map.end()) {
touch(it, e, true);
} else {
if (cache_map.size() == _capacity) {
auto last_primitive = cache_map.find(usage.back());
async_destruct_primitive(last_primitive->second);
cache_map.erase(usage.back());
usage.pop_back();
}
usage.push_front(key);
cache_map[key] = {value, usage.begin(), destructor, e};
}
}
~primitive_cache() {
for (auto &v : cache_map) {
async_destruct_primitive(v.second);
}
}
};
} // namespace detail
/// A class holding the oneDNN engine.
class engine_ext {
struct output_argument_info {
float _alpha;
float _beta;
int _name;
memory_desc_ext _desc;
void *_data;
output_argument_info(float alpha, float beta, int name,
memory_desc_ext desc, void *data)
: _alpha(alpha), _beta(beta), _name(name), _desc(desc), _data(data) {}
output_argument_info(float alpha, float beta, memory_desc_ext desc,
void *data)
: _alpha(alpha), _beta(beta), _name(0), _desc(desc), _data(data) {}
};
::dnnl::engine _eng;
::dnnl::stream _s;
sycl::queue *_q = nullptr;
std::map<void *, ::dnnl::memory> workspace_map;
std::int64_t _random_engine_state_size = -1;
detail::primitive_cache _primitive_cache;
::dnnl::memory &get_workspace(void *key) { return workspace_map[key]; }
void insert_workspace(void *key, ::dnnl::memory workspace) {
workspace_map[key] = workspace;
}
const ::dnnl::stream &get_stream() const { return _s; }
const ::dnnl::engine &get_engine() const { return _eng; }
void *allocate(const memory_desc_ext &desc, int count = 1) const;
::dnnl::memory::desc
compress_spatial_dimensions_to_channel(const ::dnnl::memory::desc &desc);
::dnnl::memory::desc
get_bn_scale_bias_mean_var_desc(const ::dnnl::memory::desc &desc,
batch_normalization_mode mode);
sycl::event batch_normalization_backward_internal(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_desc, void *scale, void *bias,
float beta_param, void *diff_scale, void *diff_bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var);
sycl::event batch_normalization_forward_internal(
bool is_infer, batch_normalization_mode mode, float epsilon, float factor,
float alpha, const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var,
void *running_mean, void *running_var);
::dnnl::memory::desc
transfer_memory_desc_to_channel_major_format(const ::dnnl::memory::desc &desc);
::dnnl::memory::desc
bn_reorder_memory_to_channel_major_format(bool is_input, ::dnnl::memory::desc &desc,
void *src, void **cache,
std::vector<void *> &caches);
::dnnl::memory::desc
transfer_memory_desc_to_format_tag_any(const ::dnnl::memory::desc &desc){
return ::dnnl::memory::desc(desc.get_dims(), desc.get_data_type(),
::dnnl::memory::format_tag::any);
}
void allocate_and_reorder_memory_to_optimal(::dnnl::memory::desc &from_desc,
void *&from,
::dnnl::memory::desc &to_desc,
void *&to,
std::vector<void *> &caches) {
if (from_desc != to_desc) {
to = allocate(to_desc);
caches.push_back(to);
async_reorder(1.f, from_desc, from, 0.f, to_desc, to);
}
}
template <typename primitive_type, typename... args_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
create_primitive(args_type &&...args);
template <typename primitive_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
create_primitive_with_pd(const typename primitive_type::primitive_desc &pd);
template <typename primitive_type, typename... args_type>
typename primitive_type::primitive_desc
create_primitive_desc(args_type &&...args);
template <typename primitive_desc_type>
std::string generate_cache_key(const primitive_desc_type &pd);
void serialize_dims(std::stringstream &ss, const std::vector<int64_t> &dims) {
ss.write((char *)dims.data(), dims.size() * sizeof(int64_t));
};
void serialize_mem_desc(std::stringstream &ss,
const ::dnnl::memory::desc &desc) {
if (desc.is_zero()) {
return;
}
auto format_kind = desc.get_format_kind();
ss << desc.get_ndims() << (std::uint8_t)desc.get_data_type()
<< (std::uint8_t)format_kind;
serialize_dims(ss, desc.get_dims());
serialize_dims(ss, desc.get_strides());
if (format_kind == ::dnnl::memory::format_kind::blocked) {
ss << desc.get_inner_nblks();
serialize_dims(ss, desc.get_inner_blks());
serialize_dims(ss, desc.get_inner_idxs());
}
};
sycl::event execute_rnn_forward_primitive(
rnn_mode mode, ::dnnl::prop_kind kind, ::dnnl::rnn_direction direction,
rnn_bias_mode bias_mode, ::dnnl::memory::data_type dt,
::dnnl::memory::format_tag tag, int seq_length, int batch_size, int src_c,
int dst_c, int layer_size, int direction_num, int hidden_size,
int gate_num, int projection_size, std::vector<void *> &data,
std::vector<int> &offset, int iter_num, size_t *weight_size = nullptr,
size_t *workspace_size = nullptr, size_t *scratchpad_size = nullptr);
sycl::event rnn_forward_internal(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight, size_t workspace_size, void *workspace,
size_t scratchpad_size, void *scratchpad, bool is_get_execution_args,
size_t *weight_size_query, size_t *workspace_size_query,
size_t *scratchpad_size_query);
sycl::event execute_rnn_backward_primitive(
rnn_mode mode, ::dnnl::rnn_direction direction, rnn_bias_mode bias_mode,
::dnnl::memory::data_type dt, ::dnnl::memory::format_tag tag,
int seq_length, int batch_size, int src_c, int dst_c, int layer_size,
int direction_num, int hidden_size, int gate_num, int projection_size,
std::vector<void *> &data, std::vector<int> &offset, int iter_num);
void async_free(sycl::queue *q, sycl::event e,
std::unordered_map<int, ::dnnl::memory> *args,
std::vector<void *> device_ptrs = {}) {
q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
if (args) {
delete args;
}
for (auto ptr : device_ptrs) {
if (ptr) {
sycl::free(ptr, *_q);
}
}
});
});
};
bool
scale_parameter_preprocess(const std::vector<output_argument_info> &args);
template <typename primitive_type>
sycl::event
execute_primitive(const std::pair<detail::primitive_cache_key_type,
primitive_type *> &primitive,
std::unordered_map<int, ::dnnl::memory> *args,
const std::vector<output_argument_info> &extra_args = {},
const std::vector<void *> &device_ptrs = {});
template <typename T>
sycl::event fill_with_type(sycl::queue *q, void *src, const void *value,
size_t size_with_byte) {
return q->fill<T>(static_cast<T *>(src), *static_cast<const T *>(value),
size_with_byte / sizeof(T));
}
template <typename T> struct no_zero_op {
T operator()(T e) {
if (!e) {
return 1;
}
return e;
}
};
template <typename T>
void transform_no_zero_with_type(sycl::queue *q, void *src, void *dst,
size_t num) {
std::transform(oneapi::dpl::execution::make_device_policy(*q),
static_cast<T *>(src), static_cast<T *>(src) + num,
static_cast<T *>(dst), no_zero_op<T>());
}
void transform_no_zero(const memory_desc_ext &desc, void *src, void *dst);
::dnnl::memory::desc get_group_weight_desc(int group_count,
const memory_desc_ext &weight_desc);
void get_rnn_configuration(const ::dnnl::memory::desc &desc,
rnn_direction direction, rnn_mode mode,
dpct::library_data_t dt, int hidden_size,
::dnnl::memory::data_type *dnnl_dt,
::dnnl::memory::format_tag *tag,
int *projection_size, int *output_size,
int *seq_length, int *batch_size,
int *direction_num, int *gate_num);
public:
engine_ext() {}
operator bool() const {
return bool(_eng) && bool(_s) && bool(_q);
}
engine_ext &operator=(std::nullptr_t) {
_eng.reset(nullptr);
_s.reset(nullptr);
_q = nullptr;
return *this;
}
/// Creating oneDNN engine.
void create_engine() {
_eng = ::dnnl::sycl_interop::make_engine(
dpct::get_current_device(), dpct::get_current_device().get_context());
_s = ::dnnl::sycl_interop::make_stream(
_eng, dpct::get_current_device().default_queue());
_q = &dpct::get_current_device().default_queue();
}
/// Setting the user's SYCL queue for an oneDNN engine.
/// \param [in] q Pointer to the SYCL queue.
void set_queue(sycl::queue *q) {
if (!q) {
throw std::runtime_error("set_queue: pointer must not be nullptr.");
}
if (!_eng) {
throw std::runtime_error("set_queue: current engine is invalid.");
}
if (q->get_context() != ::dnnl::sycl_interop::get_context(_eng)) {
throw std::runtime_error(
"set_queue: queue is mismatch with current engine context.");
}
_q = q;
_s = ::dnnl::sycl_interop::make_stream(_eng, *q);
}
/// Retrieving the user's SYCL queue set in the oneDNN engine.
/// \returns Pointer to the SYCL queue.
sycl::queue *get_queue() const { return _q; }
/// Setting all elements of a memory to a given value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] valuePtr Pointer to a single value.
void fill(const memory_desc_ext &src_desc, void *src,
const void *valuePtr);
/// Coping the scaled data from a memory to another memory with a different
/// description.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void reorder(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst);
/// Scaling all the elements of a memory by a given factor.
/// \param [in] alpha Value to scaling factors.
/// \param [in] src_desc Source memory descriptor.
/// \param [out] src Pointer to source data.
void scale(float alpha, const memory_desc_ext &src_desc, void *src);
/// Adding the scaled values of a memory to another memory.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void sum(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst);
/// Computing a specified activation function value.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void activation_forward(activation_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst);
/// Computing the gradient of a specified activation function.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
void
activation_backward(activation_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src);
/// Computing a specified pooling function value.
/// \param [in] desc Pooling descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward propagation.
void pooling_forward(pooling_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified pooling function.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential
/// source data.
/// \param [in] workspace Pointer to workspace used for backward
/// propagation.
void pooling_backward(pooling_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc,
void *diff_dst, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src,
::dnnl::memory *workspace = nullptr);
/// Computing a specified softmax function value.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void softmax_forward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Computing the gradient of a specified softmax function.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
void softmax_backward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc,
void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src);
/// Computing a specified local response normalization function value.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
void lrn_forward(lrn_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified local response normalization
/// function.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] workspace Pointer to workspace used for backward propagation.
void lrn_backward(lrn_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &diff_src_desc,
void *diff_src, ::dnnl::memory *workspace = nullptr);
/// Setting all elements of a memory to a given value asynchronously.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] valuePtr Pointer to a single value.
/// \returns An event representing the fill operations.
sycl::event async_fill(const memory_desc_ext &src_desc, void *src,
const void *valuePtr);
/// Coping the scaled data from a memory to another memory with a different
/// description asynchronously.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the reorder operations.
sycl::event async_reorder(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst);
/// Scaling all the elements of a memory by a given factor asynchronously.
/// \param [in] alpha Value to scaling factors.
/// \param [in] src_desc Source memory descriptor.
/// \param [out] src Pointer to source data.
/// \returns An event representing the scale operations.
sycl::event async_scale(float alpha, const memory_desc_ext &src_desc, void *src);
/// Adding the scaled values of a memory to another memory asynchronously.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the sum operations.
sycl::event async_sum(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst);
/// Perform specified binary operation asynchronously.
/// \param [in] op Specified binary operation.
/// \param [in] alpha_0 Value to scaling factors used to scale the src_0
/// value.
/// \param [in] src_desc_0 Source 0 memory descriptor.
/// \param [in] src_0 Pointer to source 0 data.
/// \param [in] alpha_1 Value to scaling factors used to scale the src_1
/// value.
/// \param [in] src_desc_1 Source 1 memory descriptor.
/// \param [in] src_1 Pointer to source 1 data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the binary operations.
sycl::event async_binary(binary_op op, float alpha_0,
const memory_desc_ext &src_desc_0, void *src_0,
float alpha_1, const memory_desc_ext &src_desc_1,
void *src_1, float beta, const memory_desc_ext &dst_desc,
void *dst);
/// Perform specified binary operation asynchronously.
/// \param [in] op Specified reduction operation.
/// \param [in] alpha Value to scaling factors used to scale the data
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the reduction operations.
sycl::event async_reduction(reduction_op op, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Computing a specified activation function value asynchronously.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the activation forward operations.
sycl::event async_activation_forward(activation_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst);
/// Computing the gradient of a specified activation function asynchronously.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \returns An event representing the activation backward operations.
sycl::event
async_activation_backward(activation_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src);
/// Computing a specified pooling function value asynchronously.
/// \param [in] desc Pooling descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward propagation.
/// \returns An event representing the pooling forward operations.
sycl::event async_pooling_forward(pooling_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified pooling function asynchronously.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential
/// source data.
/// \param [in] workspace Pointer to workspace used for backward
/// propagation.
/// \returns An event representing the pooling backward operations.
sycl::event async_pooling_backward(pooling_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc,
void *diff_dst, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src,
::dnnl::memory *workspace = nullptr);
/// Computing a specified softmax function value asynchronously.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the softmax forward operations.
sycl::event async_softmax_forward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Computing the gradient of a specified softmax function asynchronously.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \returns An event representing the softmax backward operations.
sycl::event async_softmax_backward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc,
void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src);
/// Computing a specified local response normalization function value
/// asynchronously.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
/// \returns An event representing the lrn forward operations.
sycl::event async_lrn_forward(lrn_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified local response normalization
/// function asynchronously.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] workspace Pointer to workspace used for backward propagation.
/// \returns An event representing the lrn backward operations.
sycl::event async_lrn_backward(lrn_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &diff_src_desc,
void *diff_src, ::dnnl::memory *workspace = nullptr);
/// Derives a memory descriptor for the batch normalization scale, bias, mean,
/// variance from the source memory descriptor and batch normalization mode.
/// \param [out] desc Derived memory descriptor.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] mode Batch normalization mode.
static void derive_batch_normalization_memory_desc(memory_desc_ext &desc,
const memory_desc_ext &src_desc,
batch_normalization_mode mode);
/// Derives a memory descriptor for the batch normalization scale, bias, mean,
/// variance from the source memory descriptor and batch normalization mode.
/// \param [out] scale_bias_desc Derived scale and bias memory descriptor.
/// \param [out] mean_var_desc Derived mean and var memory descriptor.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] mode Batch normalization mode.
static void derive_batch_normalization_memory_desc(memory_desc_ext &scale_bias_desc,
memory_desc_ext &mean_var_desc,
const memory_desc_ext &src_desc,
batch_normalization_mode mode);
/// Get the size of workspace that needed by batch normalization. The data stored
/// in workspace must be preserved between forward and backward.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] src_desc Source memory descriptor.
/// \returns Size of workspace.
size_t get_batch_normalization_workspace_size(
batch_normalization_ops ops, const memory_desc_ext &src_desc);
/// Computing a specified batch normalization inference stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory
/// descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] mean Pointer to mean data.
/// \param [in] var Pointer to variance data.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_inference(
batch_normalization_mode mode, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *mean, void *var);
/// Computing a specified batch normalization inference stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] scale_bias_desc Scale, bias memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] mean_var_desc Mean, variance memory descriptor.
/// \param [in] mean Pointer to mean data.
/// \param [in] var Pointer to variance data.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_inference(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *mean, void *var);
/// Computing a specified batch normalization training stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] factor Factor value used in running mean and variance
/// computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory
/// descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [out] running_mean Pointer to running mean data.
/// \param [out] running_var Pointer to running variance data.
/// \param [out] saved_mean Pointer to optional cache to save mean data.
/// \param [out] saved_var Pointer to optional cache to save variance data.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_training(
batch_normalization_mode mode, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var);
/// Computing a specified batch normalization training stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] factor Factor value used in running mean and variance
/// computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory
/// descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [out] running_mean Pointer to running mean data.
/// \param [out] running_var Pointer to running variance data.
/// \param [out] saved_mean Pointer to optional cache to save mean data.
/// \param [out] saved_var Pointer to optional cache to save variance data.
/// \param [in] workspace_size Size of workspace.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace);
/// Computing a specified batch normalization training stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] factor Factor value used in running mean and variance
/// computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] scale_bias_desc Scale, bias memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] mean_var_desc Mean, variance memory descriptor.
/// \param [out] running_mean Pointer to running mean data.
/// \param [out] running_var Pointer to running variance data.
/// \param [out] saved_mean Pointer to optional cache to save mean data.
/// \param [out] saved_var Pointer to optional cache to save variance data.
/// \param [in] workspace_size Size of workspace.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *running_mean, void *running_var,
void *saved_mean, void *saved_var, size_t workspace_size, void *workspace);
/// Computing the gradient of a specified batch normalization function asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha_data Value to scaling factors used to scale the computed
/// data value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta_data Value to scaling factors used to scale the prior value
/// in the data memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] alpha_param Value to scaling factors used to scale the computed
/// parameter value.
/// \param [in] diff_scale_bias_mean_var_desc Differential scale, bias, mean,
/// variance memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] beta_param Value to scaling factors used to scale the prior value
/// in the parameter memory.
/// \param [in] diff_scale Pointer to differential scale data.
/// \param [in] diff_bias Pointer to differential bias data.
/// \param [in] saved_mean Pointer to optional cache saved mean data in forward.
/// \param [in] saved_var Pointer to optional cache saved variance data in forward.
/// \returns An event representing the batch normalization backward operations.
sycl::event async_batch_normalization_backward(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_mean_var_desc, void *scale,
float beta_param, void *diff_scale, void *diff_bias, void *saved_mean,
void *saved_var);
/// Computing the gradient of a specified batch normalization function
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha_data Value to scaling factors used to scale the computed
/// data value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta_data Value to scaling factors used to scale the prior value
/// in the data memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] diff_summand_desc Differential summand memory descriptor.
/// \param [out] diff_summand Pointer to differential summand data.
/// \param [in] alpha_param Value to scaling factors used to scale the computed
/// parameter value.
/// \param [in] diff_scale_bias_mean_var_desc Differential scale, bias, mean,
/// variance memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] beta_param Value to scaling factors used to scale the prior value
/// in the parameter memory.
/// \param [out] diff_scale Pointer to differential scale data.
/// \param [out] diff_bias Pointer to differential bias data.
/// \param [in] saved_mean Pointer to optional cache saved mean data in forward.
/// \param [in] saved_var Pointer to optional cache saved variance data in forward.
/// \param [in] workspace_size Size of workspace.
/// \param [in] workspace Pointer to workspace used for backward propagation.
/// \returns An event representing the batch normalization backward operations.
sycl::event async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_mean_var_desc,
void *scale, void *bias, float beta_param, void *diff_scale,
void *diff_bias, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace);
/// Computing the gradient of a specified batch normalization function
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha_data Value to scaling factors used to scale the computed
/// data value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta_data Value to scaling factors used to scale the prior value
/// in the data memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] diff_summand_desc Differential summand memory descriptor.
/// \param [out] diff_summand Pointer to differential summand data.
/// \param [in] alpha_param Value to scaling factors used to scale the computed
/// parameter value.
/// \param [in] diff_scale_bias_desc Differential scale, bias memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] beta_param Value to scaling factors used to scale the prior value
/// in the parameter memory.
/// \param [out] diff_scale Pointer to differential scale data.
/// \param [out] diff_bias Pointer to differential bias data.
/// \param [in] mean_var_desc Differential mean, variance memory descriptor.
/// \param [in] saved_mean Pointer to optional cache saved mean data in forward.
/// \param [in] saved_var Pointer to optional cache saved variance data in forward.
/// \param [in] workspace_size Size of workspace.
/// \param [in] workspace Pointer to workspace used for backward propagation.
/// \returns An event representing the batch normalization backward operations.
sycl::event async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_desc, void *scale,
void *bias, float beta_param, void *diff_scale, void *diff_bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace);
/// Computing a specified convolution function value asynchronously.
/// \param [in] desc Convolution descriptor.
/// \param [in] alg Convolution algorithm.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] weight_desc Weight memory descriptor.
/// \param [in] weight Pointer to weight data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the convolution forward operations.
sycl::event async_convolution_forward(convolution_desc &desc, ::dnnl::algorithm alg,
float alpha, const memory_desc_ext &src_desc,
void *src, const memory_desc_ext &weight_desc,
void *weight, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Computing a specified convolution function value asynchronously.
/// \param [in] desc Convolution descriptor.
/// \param [in] alg Convolution algorithm.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] alpha_0 Value to scaling factors used to scale the data
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] weight_desc Weight memory descriptor.
/// \param [in] weight Pointer to weight data.
/// \param [in] alpha_1 Value to scaling factors used to scale the summand
/// value.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] bias_desc Bias memory descriptor.
/// \param [in] bias Pointer to bias data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the convolution forward operations.
sycl::event async_convolution_forward(
convolution_desc &desc, ::dnnl::algorithm alg, activation_desc &adesc,
float alpha_0, const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &weight_desc, void *weight, float alpha_1,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &bias_desc, void *bias,
const memory_desc_ext &dst_desc, void *dst);
/// Computing the data gradient of a specified convolution function asynchronously.
/// \param [in] desc Convolution descriptor.
/// \param [in] alg Convolution algorithm.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] weight_desc Weight memory descriptor.
/// \param [in] weight Pointer to weight data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \returns An event representing the convolution backward data operations.
sycl::event async_convolution_backward_data(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &weight_desc, void *weight,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src);
/// Computing the weight gradient of a specified convolution function
/// asynchronously.
/// \param [in] desc Convolution descriptor.
/// \param [in] alg Convolution algorithm.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] diff_weight_desc Differential weight memory descriptor.
/// \param [out] diff_weight Pointer to differential weight data.
/// \returns An event representing the convolution backward weight operations.
sycl::event async_convolution_backward_weight(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_weight_desc, void *diff_weight);
/// Computing the bias gradient of a specified convolution function
/// asynchronously.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] diff_bias_desc Differential bias memory descriptor.
/// \param [out] diff_bias Pointer to differential bias data.
/// \returns An event representing the convolution backward bias operations.
sycl::event async_convolution_backward_bias(float alpha,
const memory_desc_ext &diff_dst_desc,
void *diff_dst, float beta,
const memory_desc_ext &diff_bias_desc,
void *diff_bias);
/// Getting the required weight space size for specified rnn operation.
/// \param [in] desc RNN descriptor.
/// \param [out] weight_space_size Size of required weight space.
void rnn_get_weight_space_size(const rnn_desc &desc,
size_t *weight_space_size);
/// Getting the required scratchpad size and workspace size for specified rnn operation.
/// \param [in] desc RNN descriptor.
/// \param [in] kind Propagation kind.
/// \param [in] src_desc Source memory descriptor.
/// \param [out] scratchpad_size Size of required scratchpad.
/// \param [out] workspace_size Size of required workspace.
void rnn_get_scratchpad_workspace_size(const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc,
size_t *scratchpad_size, size_t *workspace_size);
/// Computing a specified rnn function value asynchronously.
/// \param [in] desc RNN descriptor.
/// \param [in] kind Propagation kind.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] iter_desc Recurrent hidden state data memory descriptor.
/// \param [in] src_iter Pointer to input recurrent hidden state data.
/// \param [in] dst_iter Pointer to output recurrent hidden state data.
/// \param [in] iter_c_desc Recurrent cell state data memory descriptor.
/// \param [in] src_c_iter Pointer to input recurrent cell state data.
/// \param [in] dst_c_iter Pointer to output recurrent cell state data.
/// \param [in] weight_size Size of weight memory.
/// \param [in] weight Pointer to weight data.
/// \param [in] scratchpad_size Size of scratchpad memory.
/// \param [in] scratchpad Pointer to scratchpad data.
/// \param [in] workspace_size Size of workspace memory.
/// \param [in] workspace Pointer to workspace data.
/// \returns An event representing the status of rnn forward operations.
sycl::event async_rnn_forward(const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &iter_desc, void *src_iter,
void *dst_iter,
const memory_desc_ext &iter_c_desc,
void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight,
size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace);
/// Computing the data and weight gradient of a specified rnn function
/// asynchronously.
/// \param [in] desc RNN descriptor.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] iter_desc Recurrent hidden state data memory descriptor.
/// \param [in] src_iter Pointer to input recurrent hidden state data.
/// \param [in] diff_dst_iter Pointer to differential output recurrent hidden state data.
/// \param [out] diff_src_iter Pointer to differential input recurrent hidden state data.
/// \param [in] iter_c_desc Recurrent cell state data memory descriptor.
/// \param [in] src_c_iter Pointer to input recurrent cell state data.
/// \param [in] diff_dst_c_iter Pointer to differential output recurrent cell state data.
/// \param [out] diff_src_c_iter Pointer to differential input recurrent cell state data.
/// \param [in] weight_size Size of weight memory.
/// \param [in] weight Pointer to weight data.
/// \param [out] diff_weight Pointer to differential weight data.
/// \param [in] scratchpad_size Size of scratchpad memory.
/// \param [in] scratchpad Pointer to scratchpad data.
/// \param [in] workspace_size Size of workspace memory.
/// \param [in] workspace Pointer to workspace data.
/// \returns An event representing the status of rnn backward operations.
sycl::event async_rnn_backward(
const rnn_desc &desc, const memory_desc_ext &dst_desc, void *dst,
void *diff_dst, const memory_desc_ext &src_desc, void *src,
void *diff_src, const memory_desc_ext &iter_desc, void *src_iter,
void *diff_dst_iter, void *diff_src_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c,
void *diff_dst_iter_c, void *diff_src_iter_c, size_t weight_size,
void *weight, void *diff_weight, size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace);
/// Getting the required state size for specified dropout operation.
/// \param [in] src_desc Source memory descriptor.
/// \returns Required size of state.
size_t get_dropout_state_size();
/// Getting the required workspace size for dropout operation.
/// \param [in] src_desc Source memory descriptor.
/// \returns Required size of workspace.
static size_t get_dropout_workspace_size(const memory_desc_ext &src_desc);
/// Computing a specified dropout function value asynchronously.
/// \param [in] desc Dropout descriptor.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] workspace Pointer to workspace data.
/// \param [in] workspace_size Size of workspace memory.
/// \returns An event representing the dropout forward operations.
sycl::event async_dropout_forward(dropout_desc &desc,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
void *workspace, size_t workspace_size);
/// Computing the gradient of a specified dropout function asynchronously.
/// \param [in] desc Dropout descriptor.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] workspace Pointer to workspace data.
/// \param [in] workspace_size Size of workspace memory.
/// \returns An event representing the dropout backward operations.
sycl::event async_dropout_backward(dropout_desc &desc,
const memory_desc_ext &diff_dst_desc,
void *diff_dst,
const memory_desc_ext &diff_src_desc,
void *diff_src, void *workspace,
size_t workspace_size);
};
inline
void dropout_desc::restore(engine_ext &engine, float p, void *state,
size_t state_size, unsigned long long seed) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
if (state) {
std::int64_t required_state_size = engine.get_dropout_state_size();
if (state_size < required_state_size) {
throw std::runtime_error("restore: state_size less than required state size.");
}
sycl::queue *q = engine.get_queue();
_imp->_p = p;
_imp->_seed = seed;
_imp->_state = state;
_imp->_host_state = std::vector<std::uint8_t>(required_state_size);
q->memcpy(_imp->_host_state.data(), _imp->_state, required_state_size).wait();
_imp->_rng_engine =
oneapi::mkl::rng::load_state<rng_engine_t>(
*q, _imp->_host_state.data());
}
#endif
}
inline
void dropout_desc::set(engine_ext &engine, float p, void *state,
size_t state_size, unsigned long long seed) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
_imp->_p = p;
if (state) {
std::int64_t required_state_size = engine.get_dropout_state_size();
if (state_size < required_state_size) {
throw std::runtime_error("set: no sufficient memory to save states.");
}
sycl::queue *q = engine.get_queue();
_imp->_seed = seed;
_imp->_state = state;
_imp->_host_state = std::vector<std::uint8_t>(required_state_size);
_imp->_rng_engine = rng_engine_t(*q, seed);
oneapi::mkl::rng::save_state(_imp->_rng_engine, _imp->_host_state.data());
q->memcpy(_imp->_state, _imp->_host_state.data(), required_state_size).wait();
}
#endif
}
inline
::dnnl::memory::data_type
memory_desc_ext::to_dnnl_data_type(dpct::library_data_t dt) {
using dnnl_dt = ::dnnl::memory::data_type;
switch (dt) {
case dpct::library_data_t::real_half:
return dnnl_dt::f16;
case dpct::library_data_t::real_bfloat16:
return dnnl_dt::bf16;
case dpct::library_data_t::real_float:
return dnnl_dt::f32;
case dpct::library_data_t::real_int32:
return dnnl_dt::s32;
case dpct::library_data_t::real_int8:
return dnnl_dt::s8;
case dpct::library_data_t::real_uint8:
return dnnl_dt::u8;
case dpct::library_data_t::real_int8_4:
return dnnl_dt::s8;
case dpct::library_data_t::real_int8_32:
return dnnl_dt::s8;
case dpct::library_data_t::real_uint8_4:
return dnnl_dt::u8;
default:
throw std::runtime_error("to_dnnl_data_type: unsupported data type.");
}
}
inline
dpct::library_data_t
memory_desc_ext::to_dpct_library_data_t(::dnnl::memory::data_type dt,
unsigned block_size) {
using dpct_dt = dpct::library_data_t;
using dnnl_dt = ::dnnl::memory::data_type;
switch (dt) {
case dnnl_dt::f16:
return dpct_dt::real_half;
case dnnl_dt::bf16:
return dpct_dt::real_bfloat16;
case dnnl_dt::f32:
return dpct_dt::real_float;
case dnnl_dt::s32:
return dpct_dt::real_int32;
case dnnl_dt::s8:
if (block_size == 4) {
return dpct_dt::real_int8_4;
} else if (block_size == 32) {
return dpct_dt::real_int8_32;
} else {
return dpct_dt::real_int8;
}
case dnnl_dt::u8:
if (block_size == 4) {
return dpct_dt::real_uint8_4;
} else {
return dpct_dt::real_uint8;
}
default:
throw std::runtime_error("to_dpct_library_data_t: unsupported data type "
"dnnl::memory::data_type::undef.");
}
}
inline
::dnnl::memory::format_tag
memory_desc_ext::to_dnnl_format_tag(dpct::library_data_t dt,
memory_format_tag tag) {
using dpct_dt = dpct::library_data_t;
using dpct_tag = memory_format_tag;
using dnnl_tag = ::dnnl::memory::format_tag;
switch (tag) {
case dpct_tag::nchw:
return dnnl_tag::nchw;
case dpct_tag::nhwc:
return dnnl_tag::nhwc;
default:
if (dt == dpct_dt::real_int8_32) {
return dnnl_tag::nChw32c;
} else {
return dnnl_tag::nChw4c;
}
}
}
inline
void memory_desc_ext::set(memory_format_tag tag, dpct::library_data_t dt, int n,
int c, int h, int w) {
_desc = ::dnnl::memory::desc({n, c, h, w}, to_dnnl_data_type(dt),
to_dnnl_format_tag(dt, tag));
}
inline
void memory_desc_ext::set(dpct::library_data_t dt, int n, int c, int h, int w,
int n_stride, int c_stride, int h_stride,
int w_stride) {
_desc = ::dnnl::memory::desc({n, c, h, w}, to_dnnl_data_type(dt),
{n_stride, c_stride, h_stride, w_stride});
}
inline
void memory_desc_ext::set(dpct::library_data_t dt, int ndims, const int dims[],
const int strides[]) {
_desc = ::dnnl::memory::desc({dims, dims + ndims}, to_dnnl_data_type(dt),
{strides, strides + ndims});
}
inline
void memory_desc_ext::set(memory_format_tag tag, dpct::library_data_t dt,
int ndims, const int dims[]) {
_desc = ::dnnl::memory::desc({dims, dims + ndims}, to_dnnl_data_type(dt),
to_dnnl_format_tag(dt, tag));
}
inline
void memory_desc_ext::set(rnn_memory_format_tag tag, dpct::library_data_t dt,
int t, int n, int c) {
if (tag == rnn_memory_format_tag::tnc) {
_desc = ::dnnl::memory::desc({t, n, c}, to_dnnl_data_type(dt),
::dnnl::memory::format_tag::tnc);
} else if(tag == rnn_memory_format_tag::ntc) {
_desc = ::dnnl::memory::desc({t, n, c}, to_dnnl_data_type(dt),
::dnnl::memory::format_tag::ntc);
} else {
throw std::runtime_error("set: unsupported memory format tag.");
}
}
inline
void memory_desc_ext::get(dpct::library_data_t *dt, int *n, int *c, int *h,
int *w, int *n_stride, int *c_stride, int *h_stride,
int *w_stride) const {
unsigned block_size = 1;
auto dims = _desc.get_dims();
auto inner_blks = _desc.get_inner_blks();
auto strides = _desc.get_strides();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*n = dims[0];
*c = dims[1];
*h = dims[2];
*w = dims[3];
*n_stride = strides[0] / block_size;
*c_stride = strides[1] / block_size;
*h_stride = strides[2] / block_size;
*w_stride = strides[3] / block_size;
}
inline
void memory_desc_ext::get(dpct::library_data_t *dt, memory_format_tag *tag,
int *n, int *c, int *h, int *w) const {
unsigned block_size = 1;
*tag = memory_format_tag::nchw;
auto dims = _desc.get_dims();
auto strides = _desc.get_strides();
auto inner_blks = _desc.get_inner_blks();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
*tag = memory_format_tag::nchw_blocked;
}
if (strides[1] == 1 && dims[1] != 1) {
*tag = memory_format_tag::nhwc;
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*n = dims[0];
*c = dims[1];
*h = dims[2];
*w = dims[3];
}
inline
void memory_desc_ext::get(dpct::library_data_t *dt, rnn_memory_format_tag *tag,
int *t, int *n, int *c) const {
auto dims = _desc.get_dims();
auto strides = _desc.get_strides();
if (strides[0] >= strides[1]) {
*tag = rnn_memory_format_tag::tnc;
} else {
*tag = rnn_memory_format_tag::ntc;
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), 1);
*t = dims[0];
*n = dims[1];
*c = dims[2];
}
inline
void memory_desc_ext::get(int requested_ndims, dpct::library_data_t *dt,
int *ndims, int dims[], int strides[]) const {
unsigned block_size = 1;
auto inner_blks = _desc.get_inner_blks();
auto adims = _desc.get_dims();
auto astrides = _desc.get_strides();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*ndims = _desc.get_ndims();
for (int index = 0; index < requested_ndims; index++) {
dims[index] = adims[index];
strides[index] =
astrides[index] / block_size;
}
}
inline
void memory_desc_ext::get(int requested_ndims, dpct::library_data_t *dt,
memory_format_tag *tag, int *ndims,
int dims[]) const {
unsigned block_size = 1;
*tag = memory_format_tag::nchw;
auto inner_blks = _desc.get_inner_blks();
auto adims = _desc.get_dims();
auto astrides = _desc.get_strides();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
*tag = memory_format_tag::nchw_blocked;
}
if (astrides[1] == 1 &&
adims[1] != 1) {
*tag = memory_format_tag::nhwc;
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*ndims = _desc.get_ndims();
for (int index = 0; index < requested_ndims; index++) {
dims[index] = adims[index];
}
}
inline
void engine_ext::get_rnn_configuration(const ::dnnl::memory::desc &desc,
rnn_direction direction, rnn_mode mode,
dpct::library_data_t dt, int hidden_size,
::dnnl::memory::data_type *dnnl_dt,
::dnnl::memory::format_tag *tag,
int *projection_size, int *output_size,
int *seq_length, int *batch_size,
int *direction_num, int *gate_num) {
if (!desc.is_zero()) {
auto dims = desc.get_dims();
auto strides = desc.get_strides();
if (strides[0] >= strides[1]) {
*tag = ::dnnl::memory::format_tag::tnc;
*seq_length = dims[0];
*batch_size = dims[1];
} else {
*tag = ::dnnl::memory::format_tag::ntc;
*seq_length = dims[1];
*batch_size = dims[0];
}
}
if (direction == rnn_direction::bidirectional) {
*direction_num = 2;
} else {
*direction_num = 1;
}
if (mode == rnn_mode::lstm) {
*gate_num = 4;
} else if (mode == rnn_mode::gru) {
*gate_num = 3;
} else {
*gate_num = 1;
}
if (*projection_size != hidden_size) {
*output_size = *projection_size;
} else {
*projection_size = 0;
*output_size = hidden_size;
}
*dnnl_dt = memory_desc_ext::to_dnnl_data_type(dt);
}
inline
void *engine_ext::allocate(const memory_desc_ext &data_desc, int count) const {
size_t mem_size = data_desc.get_size();
void *mem = sycl::malloc_device(mem_size * count, *_q);
return mem;
}
inline
void engine_ext::transform_no_zero(const memory_desc_ext &desc, void *src, void *dst) {
::dnnl::memory::data_type dt = desc.get_desc().get_data_type();
size_t element_num = desc.get_element_num();
switch (dt) {
case ::dnnl::memory::data_type::f32:
transform_no_zero_with_type<float>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::f16:
transform_no_zero_with_type<sycl::half>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::s32:
transform_no_zero_with_type<int32_t>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::s8:
transform_no_zero_with_type<int8_t>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::u8:
transform_no_zero_with_type<uint8_t>(_q, src, dst, element_num);
break;
default:
throw std::runtime_error("transform_no_zero: unsupported data type.");
}
}
inline
::dnnl::memory::desc
engine_ext::get_group_weight_desc(int group_count,
const memory_desc_ext &weight_desc) {
if (group_count == 1) {
return weight_desc.get_desc();
}
auto help_weight_desc = weight_desc.get_desc();
int ndims = help_weight_desc.get_ndims();
if (!help_weight_desc.get_inner_blks().empty()) {
throw std::runtime_error("get_group_weight_desc: group convolution with "
"blocked weight memory unimplemented.");
}
std::vector<int64_t> new_size;
auto old_size = weight_desc.get_dims();
new_size.push_back(group_count);
new_size.push_back(old_size[0] / group_count);
for (int index = 1; index < old_size.size(); index++) {
new_size.push_back(old_size[index]);
}
std::vector<int64_t> strides = help_weight_desc.get_strides();
::dnnl::memory::format_tag tag;
bool is_nhwc = (strides[1] == 1 && old_size[1] != 1);
if (ndims == 4) {
if (is_nhwc) {
tag = ::dnnl::memory::format_tag::gohwi;
} else {
tag = ::dnnl::memory::format_tag::goihw;
}
} else if (ndims == 5) {
if (is_nhwc) {
tag = ::dnnl::memory::format_tag::godhwi;
} else {
tag = ::dnnl::memory::format_tag::goidhw;
}
}
help_weight_desc =
::dnnl::memory::desc(new_size, weight_desc.get_desc().get_data_type(), tag);
return help_weight_desc;
}
inline
::dnnl::memory::desc engine_ext::compress_spatial_dimensions_to_channel(
const ::dnnl::memory::desc &desc) {
int ndims = desc.get_ndims();
auto dims = desc.get_dims();
auto inner_blks = desc.get_inner_blks();
assert(ndims >= 4 && "ndims is at least 4.");
std::vector<int64_t> compressed_dims(ndims);
compressed_dims[0] = dims[0];
compressed_dims[1] = dims[1];
for (int index = 2; index < ndims; index++) {
compressed_dims[1] = compressed_dims[1] * dims[index];
compressed_dims[index] = 1;
}
if (!inner_blks.empty() && inner_blks[0] == 4) {
return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(),
::dnnl::memory::format_tag::nChw4c);
} else if (!inner_blks.empty() && inner_blks[0] == 32) {
return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(),
::dnnl::memory::format_tag::nChw32c);
}
std::vector<int64_t> strides(ndims, 1);
strides[0] = compressed_dims[1];
return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(), strides);
}
inline
::dnnl::memory::desc
engine_ext::get_bn_scale_bias_mean_var_desc(const ::dnnl::memory::desc &desc,
batch_normalization_mode mode) {
int ndims = desc.get_ndims();
auto dims = desc.get_dims();
assert(ndims >= 4 && "ndims is at least 4.");
int channel_num = 1;
if (mode == batch_normalization_mode::spatial) {
channel_num = dims[1];
} else {
for (int index = 1; index < ndims; index++) {
channel_num = channel_num * dims[index];
}
}
return ::dnnl::memory::desc({channel_num}, desc.get_data_type(),
::dnnl::memory::format_tag::a);
}
inline
::dnnl::memory::desc engine_ext::transfer_memory_desc_to_channel_major_format(
const ::dnnl::memory::desc &desc) {
if (!desc.get_inner_blks().empty()) {
return desc;
}
int ndims = desc.get_ndims();
auto dims = desc.get_dims();
if (ndims == 4) {
return ::dnnl::memory::desc(dims, desc.get_data_type(),
::dnnl::memory::format_tag::nchw);
}
return ::dnnl::memory::desc(dims, desc.get_data_type(),
::dnnl::memory::format_tag::ncdhw);
}
/// If the alpha = 0 and beta = 1, then the destination (dst = alpha * out +
/// beta * prior_dst) have no change. In this case this function returns true
/// means the operation can exit directly.
inline
bool engine_ext::scale_parameter_preprocess(
const std::vector<output_argument_info> &args) {
bool direct_exit = true;
for (auto &arg : args) {
if (arg._alpha == 0.f) {
if (arg._beta != 1.f) {
async_scale(arg._beta, arg._desc, arg._data);
}
} else {
direct_exit = false;
}
}
return direct_exit;
}
inline
void engine_ext::derive_batch_normalization_memory_desc(
memory_desc_ext &scale_bias_desc, memory_desc_ext &mean_var_desc,
const memory_desc_ext &src_desc, batch_normalization_mode mode) {
derive_batch_normalization_memory_desc(scale_bias_desc, src_desc, mode);
derive_batch_normalization_memory_desc(mean_var_desc, src_desc, mode);
}
inline
void engine_ext::derive_batch_normalization_memory_desc(
memory_desc_ext &desc, const memory_desc_ext &src_desc,
batch_normalization_mode mode) {
int src_ndims = src_desc.get_desc().get_ndims();
auto inner_blks = src_desc.get_desc().get_inner_blks();
if (src_desc.get_desc().get_ndims() != 4 ||
src_desc.get_desc().get_ndims() != 5) {
throw std::runtime_error("derive_batch_normalization_memory_desc: only 4d "
"and 5d memory descriptor supported.");
}
std::vector<int64_t> dims = src_desc.get_dims();
dims[0] = 1;
if (mode == batch_normalization_mode::spatial) {
dims[2] = 1;
dims[3] = 1;
if (src_ndims == 5) {
dims[4] = 1;
}
}
auto data_type = src_desc.get_desc().get_data_type();
if (data_type == ::dnnl::memory::data_type::f16) {
data_type = ::dnnl::memory::data_type::f32;
}
if (!inner_blks.empty() && inner_blks[0] == 4) {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::nChw4c));
} else if (!inner_blks.empty() && inner_blks[0] == 32) {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::nChw32c));
} else {
if (src_ndims == 4) {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::nchw));
} else {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::ncdhw));
}
}
}
template <typename primitive_type>
sycl::event engine_ext::execute_primitive(
const std::pair<detail::primitive_cache_key_type, primitive_type *>
&primitive,
std::unordered_map<int, ::dnnl::memory> *args,
const std::vector<output_argument_info> &output_args,
const std::vector<void *> &device_ptrs) {
std::vector<void *> caches;
int output_arg_num = output_args.size();
for (int i = 0; i < output_arg_num; i++) {
if (output_args[i]._beta != 0.f) {
auto cache = allocate(output_args[i]._desc);
caches.push_back(cache);
args->insert(
{output_args[i]._name,
::dnnl::memory(output_args[i]._desc.get_desc(), _eng, cache)});
} else {
args->insert(
{output_args[i]._name, ::dnnl::memory(output_args[i]._desc.get_desc(),
_eng, output_args[i]._data)});
}
}
auto e = ::dnnl::sycl_interop::execute(
*(static_cast<primitive_type *>(primitive.second)), _s, *args);
_primitive_cache.put(
primitive.first, primitive.second,
[](::dnnl::primitive *p) { delete static_cast<primitive_type *>(p); }, e);
int cache_index = 0;
for (int i = 0; i < output_arg_num; i++) {
if (output_args[i]._beta != 0.f) {
e = async_sum(output_args[i]._alpha, output_args[i]._desc,
caches[cache_index++], output_args[i]._beta, output_args[i]._desc,
output_args[i]._data);
} else {
if (output_args[i]._alpha != 1.f) {
e = async_scale(output_args[i]._alpha, output_args[i]._desc,
output_args[i]._data);
}
}
}
caches.insert(caches.end(), device_ptrs.begin(), device_ptrs.end());
async_free(_q, e, args, caches);
return e;
}
inline
::dnnl::memory::desc engine_ext::bn_reorder_memory_to_channel_major_format(
bool is_input, ::dnnl::memory::desc &desc, void *src, void **cache,
std::vector<void *> &caches) {
::dnnl::memory::desc result;
result = transfer_memory_desc_to_channel_major_format(desc);
if ((result != desc) || !src) {
*cache = allocate(desc);
if (is_input && src) {
async_reorder(1.f, desc, src, 0.f, result, *cache);
}
caches.push_back(*cache);
}
return result;
}
inline
sycl::event engine_ext::batch_normalization_backward_internal(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_desc, void *scale, void *bias,
float beta_param, void *diff_scale, void *diff_bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var) {
if (scale_parameter_preprocess(
{{alpha_data, beta_data, diff_src_desc, diff_src},
{alpha_param, beta_param, diff_scale_bias_desc, diff_scale},
{alpha_param, beta_param, diff_scale_bias_desc, diff_bias}})) {
return sycl::event();
}
std::vector<void *> caches;
void *reordered_src = nullptr, *reordered_diff_dst = nullptr,
*reordered_diff_src = nullptr, *reordered_scale = nullptr,
*reordered_bias = nullptr, *reordered_diff_scale = nullptr,
*reordered_diff_bias = nullptr, *reordered_saved_mean = nullptr,
*reordered_saved_var = nullptr;
::dnnl::memory::desc help_src_desc = src_desc.get_desc();
::dnnl::memory::desc help_diff_dst_desc = diff_dst_desc.get_desc();
::dnnl::memory::desc help_diff_src_desc = diff_src_desc.get_desc();
::dnnl::memory::desc help_diff_scale_bias_desc =
diff_scale_bias_desc.get_desc();
::dnnl::memory::desc help_mean_var_desc = mean_var_desc.get_desc();
::dnnl::memory::desc actual_diff_src_desc = help_diff_src_desc;
::dnnl::memory::desc actual_diff_scale_bias_desc = help_diff_scale_bias_desc;
if (mode == batch_normalization_mode::per_activation) {
help_src_desc = bn_reorder_memory_to_channel_major_format(true, help_src_desc, src,
&reordered_src, caches);
help_diff_dst_desc = bn_reorder_memory_to_channel_major_format(
true, help_diff_dst_desc, diff_dst, &reordered_diff_dst, caches);
help_diff_src_desc = bn_reorder_memory_to_channel_major_format(
false, help_diff_src_desc, diff_src, &reordered_diff_src, caches);
actual_diff_src_desc = help_diff_src_desc;
help_diff_scale_bias_desc = bn_reorder_memory_to_channel_major_format(
true, help_diff_scale_bias_desc, scale, &reordered_scale, caches);
actual_diff_scale_bias_desc = help_diff_scale_bias_desc;
if (bias) {
bn_reorder_memory_to_channel_major_format(true, help_diff_scale_bias_desc, bias,
&reordered_bias, caches);
}
bn_reorder_memory_to_channel_major_format(false, help_diff_scale_bias_desc,
diff_scale, &reordered_diff_scale,
caches);
bn_reorder_memory_to_channel_major_format(false, help_diff_scale_bias_desc,
diff_bias, &reordered_diff_bias, caches);
help_mean_var_desc = bn_reorder_memory_to_channel_major_format(
true, help_mean_var_desc, saved_mean, &reordered_saved_mean, caches);
bn_reorder_memory_to_channel_major_format(true, help_mean_var_desc, saved_var,
&reordered_saved_var, caches);
help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc);
help_diff_src_desc =
compress_spatial_dimensions_to_channel(help_diff_src_desc);
help_diff_dst_desc =
compress_spatial_dimensions_to_channel(help_diff_dst_desc);
} else {
if ((help_src_desc != help_diff_dst_desc) ||
(help_src_desc != help_diff_src_desc) ||
(help_diff_dst_desc != help_diff_src_desc)) {
help_src_desc = bn_reorder_memory_to_channel_major_format(
true, help_src_desc, src, &reordered_src, caches);
help_diff_dst_desc = bn_reorder_memory_to_channel_major_format(
true, help_diff_dst_desc, diff_dst, &reordered_diff_dst, caches);
help_diff_src_desc = bn_reorder_memory_to_channel_major_format(
false, help_diff_src_desc, diff_src, &reordered_diff_src, caches);
actual_diff_src_desc = help_diff_src_desc;
}
}
help_diff_scale_bias_desc =
get_bn_scale_bias_mean_var_desc(help_diff_scale_bias_desc, mode);
help_mean_var_desc =
get_bn_scale_bias_mean_var_desc(help_mean_var_desc, mode);
auto forward_primitive =
create_primitive_desc<::dnnl::batch_normalization_forward>(
::dnnl::prop_kind::forward_training, help_src_desc,
help_diff_dst_desc, epsilon,
::dnnl::normalization_flags::use_scale |
::dnnl::normalization_flags::use_shift);
auto primitive =
create_primitive<::dnnl::batch_normalization_backward>(
::dnnl::prop_kind::backward, help_diff_src_desc, help_diff_dst_desc,
help_src_desc, epsilon,
::dnnl::normalization_flags::use_scale |
::dnnl::normalization_flags::use_shift, forward_primitive);
void *dst_cache = nullptr;
if (!saved_mean && !saved_var) {
dst_cache = allocate(diff_dst_desc);
if (!reordered_saved_mean) {
reordered_saved_mean = allocate(mean_var_desc);
caches.push_back(reordered_saved_mean);
}
if (!reordered_saved_var) {
reordered_saved_var = allocate(mean_var_desc);
caches.push_back(reordered_saved_var);
}
if (!bias) {
_q->fill(reordered_bias, 0, diff_scale_bias_desc.get_size());
}
batch_normalization_forward_internal(
true, mode, epsilon, 0.f, 1.f, src_desc, src, 0.f, diff_dst_desc,
dst_cache, diff_scale_bias_desc, scale, bias ? bias : reordered_bias,
mean_var_desc, reordered_saved_mean, reordered_saved_var, nullptr,
nullptr);
caches.push_back(dst_cache);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC,
{::dnnl::memory(help_src_desc, _eng,
reordered_src ? reordered_src : src)}},
{DNNL_ARG_SCALE,
{::dnnl::memory(help_diff_scale_bias_desc, _eng,
reordered_scale ? reordered_scale : scale)}},
{DNNL_ARG_MEAN,
{::dnnl::memory(help_mean_var_desc, _eng,
reordered_saved_mean ? reordered_saved_mean
: saved_mean)}},
{DNNL_ARG_VARIANCE,
{::dnnl::memory(help_mean_var_desc, _eng,
reordered_saved_var ? reordered_saved_var : saved_var)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(help_diff_src_desc, _eng,
reordered_diff_dst ? reordered_diff_dst : diff_dst)}}};
sycl::event e = execute_primitive(
primitive, execution_args,
{{alpha_data, beta_data, DNNL_ARG_DIFF_SRC, help_diff_src_desc,
reordered_diff_src ? reordered_diff_src : diff_src},
{alpha_param, beta_param, DNNL_ARG_DIFF_SCALE, help_diff_scale_bias_desc,
reordered_diff_scale ? reordered_diff_scale : diff_scale},
{alpha_param, beta_param, DNNL_ARG_DIFF_SHIFT, help_diff_scale_bias_desc,
reordered_diff_bias ? reordered_diff_bias : diff_bias}});
if (actual_diff_src_desc != diff_src_desc.get_desc() && reordered_diff_src) {
e = async_reorder(1.f, actual_diff_src_desc, reordered_diff_src, 0.f,
diff_src_desc, diff_src);
}
if (actual_diff_scale_bias_desc != diff_scale_bias_desc.get_desc() &&
reordered_diff_scale && reordered_diff_bias) {
async_reorder(1.f, actual_diff_scale_bias_desc, reordered_diff_scale, 0.f,
diff_scale_bias_desc, diff_scale);
e = async_reorder(1.f, actual_diff_scale_bias_desc, reordered_diff_bias, 0.f,
diff_scale_bias_desc, diff_bias);
}
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
inline
sycl::event engine_ext::batch_normalization_forward_internal(
bool is_infer, batch_normalization_mode mode, float epsilon, float factor,
float alpha, const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var,
void *running_mean, void *running_var) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
std::vector<void *> caches;
void *reordered_src = nullptr, *reordered_dst = nullptr,
*reordered_scale = nullptr, *reordered_bias = nullptr,
*reordered_saved_mean = nullptr, *reordered_saved_var = nullptr;
::dnnl::memory::desc help_src_desc = src_desc.get_desc();
::dnnl::memory::desc help_dst_desc = dst_desc.get_desc();
::dnnl::memory::desc help_scale_bias_desc = scale_bias_desc.get_desc();
::dnnl::memory::desc help_mean_var_desc = mean_var_desc.get_desc();
::dnnl::memory::desc actual_dst_desc = help_dst_desc;
::dnnl::memory::desc actual_mean_var_desc = help_mean_var_desc;
if (mode == batch_normalization_mode::per_activation) {
help_src_desc = bn_reorder_memory_to_channel_major_format(true, help_src_desc, src,
&reordered_src, caches);
help_dst_desc = bn_reorder_memory_to_channel_major_format(
false, help_dst_desc, dst, &reordered_dst, caches);
actual_dst_desc = help_dst_desc;
help_scale_bias_desc = bn_reorder_memory_to_channel_major_format(
true, help_scale_bias_desc, scale, &reordered_scale, caches);
bn_reorder_memory_to_channel_major_format(true, help_scale_bias_desc, bias,
&reordered_bias, caches);
help_mean_var_desc = bn_reorder_memory_to_channel_major_format(
is_infer, help_mean_var_desc, saved_mean,
&reordered_saved_mean, caches);
actual_mean_var_desc = help_mean_var_desc;
bn_reorder_memory_to_channel_major_format(is_infer,
help_mean_var_desc, saved_var,
&reordered_saved_var, caches);
help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc);
help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc);
} else {
if (help_src_desc != help_dst_desc) {
help_src_desc = bn_reorder_memory_to_channel_major_format(
true, help_src_desc, src, &reordered_src, caches);
help_dst_desc = bn_reorder_memory_to_channel_major_format(
false, help_dst_desc, dst, &reordered_dst, caches);
actual_dst_desc = help_dst_desc;
}
}
help_scale_bias_desc =
get_bn_scale_bias_mean_var_desc(help_scale_bias_desc, mode);
help_mean_var_desc =
get_bn_scale_bias_mean_var_desc(help_mean_var_desc, mode);
::dnnl::prop_kind kind;
::dnnl::normalization_flags flag = ::dnnl::normalization_flags::use_scale |
::dnnl::normalization_flags::use_shift;
if (is_infer) {
kind = ::dnnl::prop_kind::forward_inference;
flag = ::dnnl::normalization_flags::use_global_stats | flag;
} else {
kind = ::dnnl::prop_kind::forward_training;
}
auto primitive =
create_primitive<::dnnl::batch_normalization_forward>(
kind, help_src_desc, help_dst_desc, epsilon, flag);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC,
{::dnnl::memory(help_src_desc, _eng,
reordered_src ? reordered_src : src)}},
{DNNL_ARG_SCALE,
{::dnnl::memory(help_scale_bias_desc, _eng,
reordered_scale ? reordered_scale : scale)}},
{DNNL_ARG_SHIFT,
{::dnnl::memory(help_scale_bias_desc, _eng,
reordered_bias ? reordered_bias : bias)}},
{DNNL_ARG_MEAN,
{::dnnl::memory(help_mean_var_desc, _eng,
reordered_saved_mean ? reordered_saved_mean
: saved_mean)}},
{DNNL_ARG_VARIANCE,
{::dnnl::memory(help_mean_var_desc, _eng,
reordered_saved_var ? reordered_saved_var
: saved_var)}}};
sycl::event e = execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, help_dst_desc,
reordered_dst ? reordered_dst : dst}});
if (!is_infer && running_var) {
auto src_ndim = src_desc.get_desc().get_ndims();
auto src_dims = src_desc.get_dims();
int element_num = src_dims[0];
if (mode == batch_normalization_mode::spatial) {
for (int index = 2; index < src_ndim; index++) {
element_num *= src_dims[index];
}
}
float unbias_factor = element_num / (element_num - 1.f);
async_scale(1.f - factor, mean_var_desc, running_var);
e = async_sum(factor * unbias_factor, mean_var_desc,
reordered_saved_var ? reordered_saved_var : saved_var,
1.f, mean_var_desc, running_var);
}
if (!is_infer && running_mean) {
e = async_sum(factor, mean_var_desc,
reordered_saved_mean ? reordered_saved_mean : saved_mean,
(1.f - factor), mean_var_desc, running_mean);
}
if (reordered_dst && (actual_dst_desc != dst_desc.get_desc())) {
e = async_reorder(1.f, actual_dst_desc, reordered_dst, 0.f, dst_desc, dst);
}
if (!is_infer && reordered_saved_mean && reordered_saved_var && saved_mean &&
saved_var && (actual_mean_var_desc != mean_var_desc.get_desc())) {
e = async_reorder(1.f, actual_mean_var_desc, reordered_saved_mean, 0.f,
mean_var_desc, saved_mean);
e = async_reorder(1.f, actual_mean_var_desc, reordered_saved_var, 0.f,
mean_var_desc, saved_var);
}
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
inline
sycl::event engine_ext::rnn_forward_internal(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight, size_t workspace_size, void *workspace,
size_t scratchpad_size, void *scratchpad, bool is_get_execution_args,
size_t *weight_size_query, size_t *workspace_size_query,
size_t *scratchpad_size_query) {
::dnnl::memory::data_type src_dt;
::dnnl::memory::format_tag src_format_tag;
rnn_mode mode;
rnn_bias_mode bias_mode;
rnn_direction direction;
dpct::library_data_t dt;
int direction_num = 1, input_size = 0, hidden_size = 0, projection_size = 0,
layer_size = 0, gate_num = 1, output_size = 0, data_type_size = 0,
seq_length = 1, batch_size = 1;
std::vector<void *> data = {src, dst, src_iter, dst_iter,
src_iter_c, dst_iter_c, weight, workspace,
scratchpad};
std::vector<int> offset(6, 0);
void *input_layer_cache = nullptr, *hidden_layer_cache = nullptr;
sycl::event e;
desc.get(&mode, &bias_mode, &direction, &dt, &input_size, &hidden_size,
&projection_size, &layer_size);
get_rnn_configuration(src_desc.get_desc(), direction, mode, dt, hidden_size,
&src_dt, &src_format_tag, &projection_size,
&output_size, &seq_length, &batch_size, &direction_num,
&gate_num);
if (direction == rnn_direction::bidirectional) {
// Here to combine the oneDNN bidirectional_sum and
// bidirectional_concat config, so call execute_rnn_forward_primitive
// twice.
if (layer_size > 1) {
if (!is_get_execution_args) {
input_layer_cache = allocate(src_desc);
hidden_layer_cache = allocate(src_desc);
_q->memcpy(input_layer_cache, src, src_desc.get_size());
}
data[0] = input_layer_cache;
data[1] = hidden_layer_cache;
e = execute_rnn_forward_primitive(
mode, kind, ::dnnl::rnn_direction::bidirectional_sum, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
output_size, 1, direction_num, hidden_size, gate_num, projection_size,
data, offset, layer_size - 1, weight_size_query, workspace_size_query,
scratchpad_size_query);
data[0] =
((layer_size - 1) % 2 == 0) ? input_layer_cache : hidden_layer_cache;
data[1] = dst;
}
e = execute_rnn_forward_primitive(
mode, kind, ::dnnl::rnn_direction::bidirectional_concat, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
2 * output_size, 1, direction_num, hidden_size, gate_num,
projection_size, data, offset, 1, weight_size_query,
workspace_size_query, scratchpad_size_query);
} else {
e = execute_rnn_forward_primitive(
mode, kind, ::dnnl::rnn_direction::unidirectional_left2right, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
output_size, layer_size, direction_num, hidden_size, gate_num,
projection_size, data, offset, 1, weight_size_query,
workspace_size_query, scratchpad_size_query);
}
if (is_get_execution_args) {
return e;
}
if (input_layer_cache && hidden_layer_cache) {
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(input_layer_cache, *_q);
sycl::free(hidden_layer_cache, *_q);
});
});
}
return e;
}
inline
sycl::event engine_ext::execute_rnn_forward_primitive(
rnn_mode mode, ::dnnl::prop_kind kind, ::dnnl::rnn_direction direction,
rnn_bias_mode bias_mode, ::dnnl::memory::data_type dt,
::dnnl::memory::format_tag tag, int seq_length, int batch_size, int src_c,
int dst_c, int layer_size, int direction_num, int hidden_size, int gate_num,
int projection_size, std::vector<void *> &data, std::vector<int> &offset,
int iter_num, size_t *weight_size, size_t *workspace_size,
size_t *scratchpad_size) {
sycl::event e;
::dnnl::primitive *p = nullptr;
detail::primitive_cache_key_type key;
std::unordered_map<int, ::dnnl::memory> *execution_args;
::dnnl::memory::desc bias_desc(
{layer_size, direction_num, gate_num, hidden_size}, dt,
::dnnl::memory::format_tag::ldgo);
::dnnl::memory::desc weight_layer_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc weight_iter_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc projection_desc;
if (projection_size) {
projection_desc = ::dnnl::memory::desc(
{layer_size, direction_num, hidden_size, projection_size}, dt,
::dnnl::memory::format_tag::ldio);
}
if (weight_size) {
*weight_size +=
(weight_layer_desc.get_size() + weight_iter_desc.get_size() +
projection_desc.get_size() + bias_desc.get_size()) *
iter_num;
return e;
}
::dnnl::memory::desc src_desc({seq_length, batch_size, src_c}, dt, tag);
::dnnl::memory::desc dst_desc({seq_length, batch_size, dst_c}, dt, tag);
::dnnl::memory::desc iter_desc(
{layer_size, direction_num, batch_size,
projection_size ? projection_size : hidden_size},
dt, ::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc iter_c_desc(
{layer_size, direction_num, batch_size, hidden_size}, dt,
::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc workspace_desc;
::dnnl::memory::desc scratchpad_desc;
::dnnl::primitive_attr attr;
attr.set_scratchpad_mode(::dnnl::scratchpad_mode::user);
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
auto pd = create_primitive_desc<::dnnl::vanilla_rnn_forward>(
kind,
mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu
: ::dnnl::algorithm::eltwise_tanh,
direction, src_desc, iter_desc, weight_layer_desc, weight_iter_desc,
bias_desc, dst_desc, iter_desc, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
if (workspace_size && scratchpad_size) {
*workspace_size += workspace_desc.get_size() * iter_num;
*scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size
? scratchpad_desc.get_size()
: *scratchpad_size;
} else {
auto r = create_primitive_with_pd<::dnnl::vanilla_rnn_forward>(pd);
key = r.first;
p = r.second;
}
} else if (mode == rnn_mode::gru) {
auto pd = create_primitive_desc<::dnnl::gru_forward>(
kind, direction, src_desc, iter_desc, weight_layer_desc,
weight_iter_desc, bias_desc, dst_desc, iter_desc, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
if (workspace_size && scratchpad_size) {
*workspace_size += workspace_desc.get_size() * iter_num;
*scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size
? scratchpad_desc.get_size()
: *scratchpad_size;
} else {
auto r = create_primitive_with_pd<::dnnl::gru_forward>(pd);
key = r.first;
p = r.second;
}
} else if (mode == rnn_mode::lstm) {
auto pd = create_primitive_desc<::dnnl::lstm_forward>(
kind, direction, src_desc, iter_desc, iter_c_desc, weight_layer_desc,
weight_iter_desc, ::dnnl::memory::desc(), projection_desc, bias_desc,
dst_desc, iter_desc, iter_c_desc, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
if (workspace_size && scratchpad_size) {
*workspace_size += workspace_desc.get_size() * iter_num;
*scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size
? scratchpad_desc.get_size()
: *scratchpad_size;
} else {
auto r = create_primitive_with_pd<::dnnl::lstm_forward>(pd);
key = r.first;
p = r.second;
}
}
for (int i = 0; i < iter_num; i++) {
void *in_cache = data[0], *out_cache = data[1], *dst_iter_c_cache = nullptr,
*dst_iter_cache = ((uint8_t *)(data[3]) + offset[1]);
if (mode == rnn_mode::lstm) {
dst_iter_c_cache = (uint8_t *)(data[4]) + offset[2];
}
if (!workspace_size) {
execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC_LAYER, {::dnnl::memory(src_desc, _eng, data[0])}},
{DNNL_ARG_DST_LAYER, {::dnnl::memory(dst_desc, _eng, data[1])}},
{DNNL_ARG_SCRATCHPAD, {::dnnl::memory(scratchpad_desc, _eng, data[8])}}};
auto insert_args = [&](int arg_name, ::dnnl::memory::desc &d, void *data,
int &offset) {
execution_args->insert(
{arg_name, {::dnnl::memory(d, _eng, (uint8_t *)data + offset)}});
offset += d.get_size();
};
insert_args(DNNL_ARG_SRC_ITER, iter_desc, data[2], offset[0]);
insert_args(DNNL_ARG_DST_ITER, iter_desc, data[3], offset[1]);
if (mode == rnn_mode::lstm) {
insert_args(DNNL_ARG_SRC_ITER_C, iter_c_desc, data[4], offset[2]);
insert_args(DNNL_ARG_DST_ITER_C, iter_c_desc, data[5], offset[3]);
}
insert_args(DNNL_ARG_WEIGHTS_LAYER, weight_layer_desc, data[6],
offset[4]);
insert_args(DNNL_ARG_WEIGHTS_ITER, weight_iter_desc, data[6], offset[4]);
if (projection_size) {
insert_args(DNNL_ARG_WEIGHTS_PROJECTION, projection_desc, data[6],
offset[4]);
}
if (bias_mode == rnn_bias_mode::none) {
_q->memset((uint8_t *)(data[6]) + offset[4], 0, bias_desc.get_size());
}
insert_args(DNNL_ARG_BIAS, bias_desc, data[6], offset[4]);
if (kind == ::dnnl::prop_kind::forward_training) {
insert_args(DNNL_ARG_WORKSPACE, workspace_desc, data[7], offset[5]);
}
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
execute_primitive<::dnnl::vanilla_rnn_forward>(
{key, static_cast<::dnnl::vanilla_rnn_forward *>(p)},
execution_args);
} else if (mode == rnn_mode::gru) {
execute_primitive<::dnnl::gru_forward>(
{key, static_cast<::dnnl::gru_forward *>(p)}, execution_args);
} else if (mode == rnn_mode::lstm) {
execute_primitive<::dnnl::lstm_forward>(
{key, static_cast<::dnnl::lstm_forward *>(p)}, execution_args);
}
if (i != iter_num - 1) {
std::swap(data[0], data[1]);
}
}
if (kind == ::dnnl::prop_kind::forward_training) {
if (workspace_size) {
*workspace_size +=
(src_desc.get_size() + dst_desc.get_size() + iter_desc.get_size());
if (mode == rnn_mode::lstm) {
*workspace_size += iter_c_desc.get_size();
}
} else {
_q->memcpy((uint8_t *)(data[7]) + offset[5], in_cache,
src_desc.get_size());
offset[5] += src_desc.get_size();
_q->memcpy((uint8_t *)(data[7]) + offset[5], out_cache,
dst_desc.get_size());
offset[5] += dst_desc.get_size();
_q->memcpy((uint8_t *)(data[7]) + offset[5], dst_iter_cache,
iter_desc.get_size());
offset[5] += iter_desc.get_size();
if (mode == rnn_mode::lstm) {
_q->memcpy((uint8_t *)(data[7]) + offset[5], dst_iter_c_cache,
iter_c_desc.get_size());
offset[5] += iter_c_desc.get_size();
}
}
}
}
return e;
}
inline
sycl::event engine_ext::execute_rnn_backward_primitive(
rnn_mode mode, ::dnnl::rnn_direction direction, rnn_bias_mode bias_mode,
::dnnl::memory::data_type dt, ::dnnl::memory::format_tag tag,
int seq_length, int batch_size, int src_c, int dst_c, int layer_size,
int direction_num, int hidden_size, int gate_num, int projection_size,
std::vector<void *> &data, std::vector<int> &offset, int iter_num) {
sycl::event e;
::dnnl::primitive *p = nullptr;
detail::primitive_cache_key_type key;
::dnnl::prop_kind fkind = ::dnnl::prop_kind::forward_training;
::dnnl::prop_kind bkind = ::dnnl::prop_kind::backward;
::dnnl::memory::desc bias_desc(
{layer_size, direction_num, gate_num, hidden_size}, dt,
::dnnl::memory::format_tag::ldgo);
::dnnl::memory::desc weight_layer_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc weight_iter_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc diff_weight_layer_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldgoi);
::dnnl::memory::desc diff_weight_iter_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldgoi);
::dnnl::memory::desc projection_desc, diff_projection_desc;
if (projection_size) {
projection_desc = ::dnnl::memory::desc(
{layer_size, direction_num, hidden_size, projection_size}, dt,
::dnnl::memory::format_tag::ldio);
diff_projection_desc = ::dnnl::memory::desc(
{layer_size, direction_num, hidden_size, projection_size}, dt,
::dnnl::memory::format_tag::ldoi);
}
::dnnl::memory::desc src_desc({seq_length, batch_size, src_c}, dt, tag);
::dnnl::memory::desc dst_desc({seq_length, batch_size, dst_c}, dt, tag);
::dnnl::memory::desc iter_desc(
{layer_size, direction_num, batch_size,
projection_size ? projection_size : hidden_size},
dt, ::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc iter_c_desc(
{layer_size, direction_num, batch_size, hidden_size}, dt,
::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc workspace_desc;
::dnnl::memory::desc scratchpad_desc;
::dnnl::primitive_attr attr;
attr.set_scratchpad_mode(::dnnl::scratchpad_mode::user);
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
auto fpd = create_primitive_desc<::dnnl::vanilla_rnn_forward>(
fkind,
mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu
: ::dnnl::algorithm::eltwise_tanh,
direction, src_desc, iter_desc, weight_layer_desc, weight_iter_desc,
bias_desc, dst_desc, iter_desc, attr);
auto pd = create_primitive_desc<::dnnl::vanilla_rnn_backward>(
bkind,
mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu
: ::dnnl::algorithm::eltwise_tanh,
direction, src_desc, iter_desc, diff_weight_layer_desc,
diff_weight_iter_desc, bias_desc, dst_desc, iter_desc, src_desc,
iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc,
iter_desc, fpd, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
auto r = create_primitive_with_pd<::dnnl::vanilla_rnn_backward>(pd);
key = r.first;
p = r.second;
} else if (mode == rnn_mode::gru) {
auto fpd = create_primitive_desc<::dnnl::gru_forward>(
fkind, direction, src_desc, iter_desc, weight_layer_desc,
weight_iter_desc, bias_desc, dst_desc, iter_desc, attr);
auto pd = create_primitive_desc<::dnnl::gru_backward>(
bkind, direction, src_desc, iter_desc, diff_weight_layer_desc,
diff_weight_iter_desc, bias_desc, dst_desc, iter_desc, src_desc,
iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc,
iter_desc, fpd, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
auto r = create_primitive_with_pd<::dnnl::gru_backward>(pd);
key = r.first;
p = r.second;
} else if (mode == rnn_mode::lstm) {
auto fpd = create_primitive_desc<::dnnl::lstm_forward>(
fkind, direction, src_desc, iter_desc, iter_c_desc, weight_layer_desc,
weight_iter_desc, ::dnnl::memory::desc(), projection_desc, bias_desc,
dst_desc, iter_desc, iter_c_desc, attr);
auto pd = create_primitive_desc<::dnnl::lstm_backward>(
bkind, direction, src_desc, iter_desc, iter_c_desc,
diff_weight_layer_desc, diff_weight_iter_desc, ::dnnl::memory::desc(),
diff_projection_desc, bias_desc, dst_desc, iter_desc, iter_c_desc,
src_desc, iter_desc, iter_c_desc, weight_layer_desc, weight_iter_desc,
::dnnl::memory::desc(), projection_desc, bias_desc, dst_desc, iter_desc,
iter_c_desc, fpd, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
auto r = create_primitive_with_pd<::dnnl::lstm_backward>(pd);
key = r.first;
p = r.second;
}
for (int i = 0; i < iter_num; i++) {
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DIFF_SRC_LAYER, {::dnnl::memory(src_desc, _eng, data[8])}},
{DNNL_ARG_DIFF_DST_LAYER, {::dnnl::memory(dst_desc, _eng, data[9])}},
{DNNL_ARG_SCRATCHPAD, {::dnnl::memory(scratchpad_desc, _eng, data[15])}}};
auto insert_args = [&](int arg_name, ::dnnl::memory::desc &d, void *data,
int &offset) {
offset += d.get_size();
execution_args->insert(
{arg_name, {::dnnl::memory(d, _eng, (uint8_t *)data - offset)}});
};
if (mode == rnn_mode::lstm) {
insert_args(DNNL_ARG_DST_ITER_C, iter_c_desc, data[7], offset[0]);
insert_args(DNNL_ARG_SRC_ITER_C, iter_c_desc, data[4], offset[2]);
}
insert_args(DNNL_ARG_DST_ITER, iter_desc, data[7], offset[0]);
insert_args(DNNL_ARG_DST_LAYER, dst_desc, data[7], offset[0]);
insert_args(DNNL_ARG_SRC_LAYER, src_desc, data[7], offset[0]);
insert_args(DNNL_ARG_WORKSPACE, workspace_desc, data[7], offset[0]);
insert_args(DNNL_ARG_SRC_ITER, iter_desc, data[2], offset[1]);
insert_args(DNNL_ARG_BIAS, bias_desc, data[6], offset[3]);
if (projection_size) {
insert_args(DNNL_ARG_WEIGHTS_PROJECTION, diff_projection_desc, data[6],
offset[3]);
}
insert_args(DNNL_ARG_WEIGHTS_ITER, diff_weight_iter_desc, data[6],
offset[3]);
insert_args(DNNL_ARG_WEIGHTS_LAYER, diff_weight_layer_desc, data[6],
offset[3]);
insert_args(DNNL_ARG_DIFF_SRC_ITER, iter_desc, data[10], offset[4]);
insert_args(DNNL_ARG_DIFF_DST_ITER, iter_desc, data[11], offset[5]);
if (mode == rnn_mode::lstm) {
insert_args(DNNL_ARG_DIFF_SRC_ITER_C, iter_c_desc, data[12], offset[6]);
insert_args(DNNL_ARG_DIFF_DST_ITER_C, iter_c_desc, data[13], offset[7]);
}
insert_args(DNNL_ARG_DIFF_BIAS, bias_desc, data[14], offset[8]);
if (bias_mode == rnn_bias_mode::none) {
_q->memset((uint8_t *)(data[14]) - offset[8], 0, bias_desc.get_size());
}
if (projection_size) {
insert_args(DNNL_ARG_DIFF_WEIGHTS_PROJECTION, projection_desc, data[14],
offset[8]);
}
insert_args(DNNL_ARG_DIFF_WEIGHTS_ITER, weight_iter_desc, data[14],
offset[8]);
insert_args(DNNL_ARG_DIFF_WEIGHTS_LAYER, weight_layer_desc, data[14],
offset[8]);
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
e = execute_primitive<::dnnl::vanilla_rnn_backward>(
{key, static_cast<::dnnl::vanilla_rnn_backward *>(p)},
execution_args);
} else if (mode == rnn_mode::gru) {
e = execute_primitive<::dnnl::gru_backward>(
{key, static_cast<::dnnl::gru_backward *>(p)}, execution_args);
} else if (mode == rnn_mode::lstm) {
e = execute_primitive<::dnnl::lstm_backward>(
{key, static_cast<::dnnl::lstm_backward *>(p)}, execution_args);
}
if (i != iter_num - 1) {
std::swap(data[8], data[9]);
}
}
return e;
}
#define GENERATE_RNN_PRIMITIVE_KEY(name) \
template <> \
inline std::string \
engine_ext::generate_cache_key<::dnnl::name::primitive_desc>( \
const ::dnnl::name::primitive_desc &pd) { \
std::stringstream ss; \
ss << (std::uint8_t)pd.get_kind() << (std::uint8_t)pd.get_prop_kind() \
<< (std::uint8_t)pd.get_cell_kind() << (std::uint8_t)pd.get_direction() \
<< (std::uint8_t)pd.get_algorithm(); \
serialize_mem_desc(ss, pd.src_layer_desc()); \
serialize_mem_desc(ss, pd.src_iter_desc()); \
serialize_mem_desc(ss, pd.dst_layer_desc()); \
serialize_mem_desc(ss, pd.dst_iter_desc()); \
serialize_mem_desc(ss, pd.diff_src_layer_desc()); \
serialize_mem_desc(ss, pd.diff_src_iter_desc()); \
serialize_mem_desc(ss, pd.diff_dst_layer_desc()); \
serialize_mem_desc(ss, pd.diff_dst_iter_desc()); \
serialize_mem_desc(ss, pd.src_iter_c_desc()); \
serialize_mem_desc(ss, pd.dst_iter_c_desc()); \
serialize_mem_desc(ss, pd.diff_src_iter_c_desc()); \
serialize_mem_desc(ss, pd.diff_dst_iter_c_desc()); \
return ss.str(); \
}
#define GENERATE_CONVOLUTION_PRIMITIVE_KEY(name, query_type) \
template <> \
inline std::string \
engine_ext::generate_cache_key<::dnnl::name::primitive_desc>( \
const ::dnnl::name::primitive_desc &pd) { \
std::stringstream ss; \
ss << (std::uint8_t)pd.get_kind() << (std::uint8_t)pd.get_prop_kind() \
<< (std::uint8_t)pd.get_algorithm() \
<< (std::uint8_t)pd.get_primitive_attr().get_fpmath_mode() \
<< (std::uint8_t)pd.get_group_size(); \
serialize_dims(ss, pd.get_strides()); \
serialize_dims(ss, pd.get_dilations()); \
serialize_dims(ss, pd.get_padding_l()); \
serialize_mem_desc(ss, pd.src_desc()); \
serialize_mem_desc(ss, pd.diff_src_desc()); \
serialize_mem_desc(ss, pd.dst_desc()); \
serialize_mem_desc(ss, pd.diff_dst_desc()); \
serialize_mem_desc(ss, pd.query_type()); \
serialize_mem_desc(ss, pd.weights_desc()); \
serialize_mem_desc(ss, pd.diff_weights_desc()); \
return ss.str(); \
}
GENERATE_RNN_PRIMITIVE_KEY(vanilla_rnn_forward)
GENERATE_RNN_PRIMITIVE_KEY(vanilla_rnn_backward)
GENERATE_RNN_PRIMITIVE_KEY(lstm_forward)
GENERATE_RNN_PRIMITIVE_KEY(lstm_backward)
GENERATE_RNN_PRIMITIVE_KEY(gru_forward)
GENERATE_RNN_PRIMITIVE_KEY(gru_backward)
GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_forward, bias_desc)
GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_backward_data, scratchpad_desc)
GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_backward_weights, diff_bias_desc)
template <typename primitive_desc_type>
std::string engine_ext::generate_cache_key(const primitive_desc_type &pd) {
std::stringstream ss;
auto kind = pd.get_kind();
ss << (std::uint8_t)kind << (std::uint8_t)pd.get_prop_kind()
<< (std::uint8_t)pd.get_algorithm();
serialize_mem_desc(ss, pd.src_desc());
serialize_mem_desc(ss, pd.diff_src_desc());
serialize_mem_desc(ss, pd.dst_desc());
serialize_mem_desc(ss, pd.diff_dst_desc());
switch (kind) {
case ::dnnl::primitive::kind::batch_normalization:
ss << pd.get_epsilon() << (std::uint8_t)pd.get_flags();
case ::dnnl::primitive::kind::reduction:
ss << pd.get_p();
break;
case ::dnnl::primitive::kind::eltwise:
ss << pd.get_alpha() << pd.get_beta();
case ::dnnl::primitive::kind::lrn:
ss << pd.get_k();
break;
case ::dnnl::primitive::kind::pooling:
serialize_dims(ss, pd.get_strides());
serialize_dims(ss, pd.get_dilations());
serialize_dims(ss, pd.get_padding_l());
serialize_dims(ss, pd.get_kernel());
break;
case ::dnnl::primitive::kind::softmax:
ss << pd.get_axis();
break;
default:
break;
}
return ss.str();
}
template <typename primitive_type, typename... args_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
engine_ext::create_primitive(args_type &&...args) {
auto pd =
create_primitive_desc<primitive_type>(std::forward<args_type>(args)...);
return create_primitive_with_pd<primitive_type>(pd);
}
template <typename primitive_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
engine_ext::create_primitive_with_pd(
const typename primitive_type::primitive_desc &pd) {
detail::primitive_cache_key_type key = generate_cache_key(pd);
primitive_type *p = (primitive_type *)_primitive_cache.get(key);
if (!p) {
p = new primitive_type(pd);
}
return {key, p};
}
template <typename primitive_type, typename... args_type>
typename primitive_type::primitive_desc
engine_ext::create_primitive_desc(args_type &&...args) {
return typename primitive_type::primitive_desc(
_eng, std::forward<args_type>(args)...);
}
inline
void engine_ext::fill(const memory_desc_ext &src_desc, void *src,
const void *valuePtr) {
async_fill(src_desc, src, valuePtr).wait();
}
inline
void engine_ext::reorder(float alpha, const memory_desc_ext &src_desc,
void *src, float beta, const memory_desc_ext &dst_desc,
void *dst) {
async_reorder(alpha, src_desc, src, beta, dst_desc, dst).wait();
}
inline
void engine_ext::scale(float alpha, const memory_desc_ext &src_desc,
void *src) {
async_scale(alpha, src_desc, src).wait();
}
inline
void engine_ext::sum(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst) {
async_sum(alpha, src_desc, src, beta, dst_desc, dst).wait();
}
inline
void engine_ext::activation_forward(activation_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst) {
async_activation_forward(desc, alpha, src_desc, src, beta, dst_desc, dst)
.wait();
}
inline
void engine_ext::activation_backward(
activation_desc &desc, float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
async_activation_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst,
src_desc, src, beta, diff_src_desc, diff_src)
.wait();
}
inline
void engine_ext::pooling_forward(pooling_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst,
::dnnl::memory *workspace) {
async_pooling_forward(desc, alpha, src_desc, src, beta, dst_desc, dst,
workspace).wait();
}
inline
void engine_ext::pooling_backward(
pooling_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace) {
async_pooling_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst,
src_desc, src, beta, diff_src_desc, diff_src,
workspace)
.wait();
}
inline
void engine_ext::softmax_forward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc, void *dst) {
async_softmax_forward(alg, mode, alpha, src_desc, src, beta, dst_desc, dst)
.wait();
}
inline
void engine_ext::softmax_backward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &dst_desc,
void *dst,
const memory_desc_ext &diff_dst_desc,
void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src) {
async_softmax_backward(alg, mode, alpha, dst_desc, dst, diff_dst_desc,
diff_dst, beta, diff_src_desc, diff_src)
.wait();
}
inline
void engine_ext::lrn_forward(lrn_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace) {
async_lrn_forward(desc, alpha, src_desc, src, beta, dst_desc, dst, workspace)
.wait();
}
inline
void engine_ext::lrn_backward(lrn_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc,
void *diff_dst, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src,
::dnnl::memory *workspace) {
async_lrn_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst,
src_desc, src, beta, diff_src_desc, diff_src, workspace)
.wait();
}
inline
sycl::event engine_ext::async_fill(const memory_desc_ext &src_desc, void *src,
const void *valuePtr) {
::dnnl::memory::data_type dt = src_desc.get_desc().get_data_type();
unsigned mem_size = src_desc.get_size();
switch (dt) {
case ::dnnl::memory::data_type::f32:
return fill_with_type<float>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::f16:
return fill_with_type<sycl::half>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::s32:
return fill_with_type<int32_t>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::s8:
return fill_with_type<int8_t>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::u8:
return fill_with_type<uint8_t>(_q, src, valuePtr, mem_size);
default:
throw std::runtime_error("async_fill: unsupported data type.");
}
}
inline
sycl::event engine_ext::async_reorder(float alpha, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc, void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto pd = ::dnnl::reorder::primitive_desc(_eng, src_desc.get_desc(), _eng,
dst_desc.get_desc());
auto primitive = create_primitive_with_pd<::dnnl::reorder>(pd);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline
sycl::event engine_ext::async_scale(float alpha, const memory_desc_ext &src_desc,
void *src) {
if (alpha == 1.f) {
return sycl::event();
}
void *src_cache = allocate(src_desc);
_q->memcpy(src_cache, src, src_desc.get_size());
auto primitive = create_primitive<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward_inference, ::dnnl::algorithm::eltwise_linear,
src_desc.get_desc(), src_desc.get_desc(), alpha, 0.f);
auto args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, ::dnnl::memory(src_desc.get_desc(), _eng, src)},
{DNNL_ARG_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src_cache)}};
return execute_primitive(primitive, args, {}, {src_cache});
}
inline sycl::event
engine_ext::async_sum(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst) {
if (alpha == 0.f && beta == 1.f) {
return sycl::event();
}
void *dst_cache = allocate(dst_desc);
_q->memcpy(dst_cache, dst, dst_desc.get_size());
auto pd = create_primitive_desc<::dnnl::sum>(
std::vector<float>{alpha, beta},
std::vector<::dnnl::memory::desc>{src_desc.get_desc(),
dst_desc.get_desc()});
std::stringstream ss;
ss << (std::uint8_t)pd.get_kind() << alpha << beta;
serialize_mem_desc(ss, pd.src_desc(0));
serialize_mem_desc(ss, pd.src_desc(1));
detail::primitive_cache_key_type key = ss.str();
::dnnl::sum *p = (::dnnl::sum *)_primitive_cache.get(key);
if (!p) {
p = new ::dnnl::sum(pd);
}
auto args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, ::dnnl::memory(dst_desc.get_desc(), _eng, dst)},
{DNNL_ARG_MULTIPLE_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src)},
{DNNL_ARG_MULTIPLE_SRC + 1,
::dnnl::memory(dst_desc.get_desc(), _eng, dst_cache)}};
return execute_primitive<::dnnl::sum>({key, p}, args, {}, {dst_cache});
}
inline
sycl::event engine_ext::async_binary(binary_op op, float alpha_0,
const memory_desc_ext &src_desc_0, void *src_0,
float alpha_1, const memory_desc_ext &src_desc_1,
void *src_1, float beta,
const memory_desc_ext &dst_desc, void *dst) {
::dnnl::algorithm onednn_algorithm;
switch (op) {
case binary_op::max:
onednn_algorithm = ::dnnl::algorithm::binary_max;
break;
case binary_op::min:
onednn_algorithm = ::dnnl::algorithm::binary_min;
break;
case binary_op::add:
onednn_algorithm = ::dnnl::algorithm::binary_add;
break;
case binary_op::sub:
onednn_algorithm = ::dnnl::algorithm::binary_sub;
break;
case binary_op::mul:
onednn_algorithm = ::dnnl::algorithm::binary_mul;
break;
case binary_op::div:
onednn_algorithm = ::dnnl::algorithm::binary_div;
break;
case binary_op::sqrt:
onednn_algorithm = ::dnnl::algorithm::eltwise_sqrt;
break;
case binary_op::neg:
onednn_algorithm = ::dnnl::algorithm::eltwise_linear;
break;
}
if (onednn_algorithm == ::dnnl::algorithm::eltwise_sqrt ||
onednn_algorithm == ::dnnl::algorithm::eltwise_linear) {
void *src_cache = nullptr, *dst_cache = nullptr;
src_cache = allocate(src_desc_0);
dst_cache = allocate(dst_desc);
_q->memcpy(src_cache, src_0, src_desc_0.get_size());
_q->memcpy(dst_cache, dst, dst_desc.get_size());
async_scale(alpha_0, src_desc_0, src_cache);
async_scale(beta, dst_desc, dst_cache);
// Let the output = 1 - input to simulate the behavior of neg.
auto primitive = create_primitive<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward_inference, onednn_algorithm,
src_desc_0.get_desc(), dst_desc.get_desc(), -1.f, 1.f);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC,
{::dnnl::memory(src_desc_0.get_desc(), _eng, src_cache)}}};
execute_primitive(
primitive, execution_args, {{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}});
auto e = async_sum(1.f, dst_desc, dst_cache, 1.f, dst_desc, dst);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(src_cache, *_q);
sycl::free(dst_cache, *_q);
});
});
return e;
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{};
void *src_0_cache = nullptr, *src_1_cache = nullptr, *dst_cache = nullptr;
src_0_cache = allocate(src_desc_0);
src_1_cache = allocate(src_desc_1);
dst_cache = allocate(dst_desc);
_q->memcpy(src_0_cache, src_0, src_desc_0.get_size());
_q->memcpy(src_1_cache, src_1, src_desc_1.get_size());
_q->memcpy(dst_cache, dst, dst_desc.get_size());
async_scale(alpha_0, src_desc_0, src_0_cache);
async_scale(alpha_1, src_desc_1, src_1_cache);
async_scale(beta, dst_desc, dst_cache);
execution_args->insert({DNNL_ARG_SRC_0, ::dnnl::memory(src_desc_0.get_desc(),
_eng, src_0_cache)});
execution_args->insert({DNNL_ARG_SRC_1, ::dnnl::memory(src_desc_1.get_desc(),
_eng, src_1_cache)});
auto primitive = create_primitive<::dnnl::binary>(
onednn_algorithm, src_desc_0.get_desc(), src_desc_1.get_desc(),
dst_desc.get_desc());
auto e = execute_primitive(primitive, execution_args,
{{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}});
async_sum(1.f, dst_desc, dst_cache, 1.f, dst_desc, dst);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(dst_cache, *_q);
sycl::free(src_0_cache, *_q);
sycl::free(src_1_cache, *_q);
});
});
return e;
}
inline
sycl::event engine_ext::async_reduction(reduction_op op, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst) {
if (alpha == 0.f && beta == 1.f) {
return sycl::event();
}
float p = 2.f;
::dnnl::algorithm onednn_algorithm;
void *cache = nullptr;
switch (op) {
case reduction_op::amax:
cache = allocate(src_desc);
activation_desc adesc;
adesc.set_algorithm(::dnnl::algorithm::eltwise_abs);
async_activation_forward(adesc, 1.f, src_desc, src, 0.f, src_desc, cache);
onednn_algorithm = ::dnnl::algorithm::reduction_max;
src = cache;
break;
case reduction_op::max:
onednn_algorithm = ::dnnl::algorithm::reduction_max;
break;
case reduction_op::min:
onednn_algorithm = ::dnnl::algorithm::reduction_min;
break;
case reduction_op::sum:
onednn_algorithm = ::dnnl::algorithm::reduction_sum;
break;
case reduction_op::mean:
onednn_algorithm = ::dnnl::algorithm::reduction_mean;
break;
case reduction_op::mul:
onednn_algorithm = ::dnnl::algorithm::reduction_mul;
break;
case reduction_op::mul_no_zeros:
cache = allocate(src_desc);
transform_no_zero(src_desc, src, cache);
onednn_algorithm = ::dnnl::algorithm::reduction_mul;
src = cache;
break;
case reduction_op::norm1:
p = 1.f;
onednn_algorithm = ::dnnl::algorithm::reduction_norm_lp_power_p_sum;
break;
case reduction_op::norm2:
onednn_algorithm = ::dnnl::algorithm::reduction_norm_lp_sum;
break;
}
auto primitive = create_primitive<::dnnl::reduction>(
onednn_algorithm, src_desc.get_desc(), dst_desc.get_desc(), p, 0.f);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src)}};
if (cache) {
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}},
{cache});
}
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline
sycl::event engine_ext::async_activation_forward(activation_desc &desc, float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto primitive = create_primitive<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward, desc.get_algorithm(), src_desc.get_desc(),
dst_desc.get_desc(), desc.get_alpha(), desc.get_beta());
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline
sycl::event engine_ext::async_activation_backward(
activation_desc &desc, float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
::dnnl::memory::desc data_desc = dst_desc.get_desc();
auto alg = desc.get_algorithm();
if ((alg == ::dnnl::algorithm::eltwise_clip) ||
(alg == ::dnnl::algorithm::eltwise_linear) ||
(alg == ::dnnl::algorithm::eltwise_swish)) {
data_desc = src_desc.get_desc();
}
auto primitive = create_primitive<::dnnl::eltwise_backward>(
alg, diff_src_desc.get_desc(), diff_dst_desc.get_desc(), data_desc,
desc.get_alpha(), desc.get_beta(),
create_primitive_desc<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward, alg, src_desc.get_desc(),
dst_desc.get_desc(), desc.get_alpha(), desc.get_beta()));
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}},
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline
sycl::event engine_ext::async_pooling_forward(pooling_desc &desc, float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
int pooling_dim = desc.get_stride().size();
std::vector<int64_t> dilation(pooling_dim, 0);
auto primitive_desc =
create_primitive_desc<::dnnl::pooling_forward>(
::dnnl::prop_kind::forward_training, desc.get_algorithm(),
src_desc.get_desc(), dst_desc.get_desc(), desc.get_stride(),
desc.get_kernel(), dilation, desc.get_padding(), desc.get_padding());
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
::dnnl::memory ws_mem(primitive_desc.workspace_desc(), _eng);
execution_args->insert({DNNL_ARG_WORKSPACE, ws_mem});
if (workspace) {
*workspace = ws_mem;
} else {
insert_workspace(src, ws_mem);
}
auto primitive =
create_primitive_with_pd<::dnnl::pooling_forward>(primitive_desc);
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline
sycl::event engine_ext::async_pooling_backward(
pooling_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
int pooling_dim = desc.get_stride().size();
std::vector<int64_t> dilation(pooling_dim, 0);
auto primitive = create_primitive<::dnnl::pooling_backward>(
desc.get_algorithm(), diff_src_desc.get_desc(), diff_dst_desc.get_desc(),
desc.get_stride(), desc.get_kernel(), dilation, desc.get_padding(),
desc.get_padding(),
create_primitive_desc<::dnnl::pooling_forward>(
::dnnl::prop_kind::forward_training, desc.get_algorithm(),
src_desc.get_desc(), dst_desc.get_desc(), desc.get_stride(),
desc.get_kernel(), dilation, desc.get_padding(), desc.get_padding()));
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}},
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
if (workspace) {
execution_args->insert({DNNL_ARG_WORKSPACE, *workspace});
} else {
execution_args->insert({DNNL_ARG_WORKSPACE, get_workspace(src)});
}
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline
sycl::event engine_ext::async_softmax_forward(softmax_algorithm alg,
softmax_mode mode, float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
::dnnl::memory::desc help_src_desc = src_desc.get_desc();
::dnnl::memory::desc help_dst_desc = dst_desc.get_desc();
if (mode == softmax_mode::instance) {
help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc);
help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(help_src_desc, _eng, src)}}};
::dnnl::algorithm softmax_alg = ::dnnl::algorithm::softmax_accurate;
if (alg == softmax_algorithm::log) {
softmax_alg = ::dnnl::algorithm::softmax_log;
}
auto primitive = create_primitive<::dnnl::softmax_forward>(
::dnnl::prop_kind::forward, softmax_alg, help_src_desc,
help_dst_desc, 1);
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, memory_desc_ext(help_dst_desc), dst}});
}
inline
sycl::event engine_ext::async_softmax_backward(
softmax_algorithm alg, softmax_mode mode, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
::dnnl::memory::desc help_diff_src_desc = diff_src_desc.get_desc();
::dnnl::memory::desc help_dst_desc = dst_desc.get_desc();
::dnnl::memory::desc help_diff_dst_desc = diff_dst_desc.get_desc();
if (mode == softmax_mode::instance) {
help_diff_src_desc =
compress_spatial_dimensions_to_channel(help_diff_src_desc);
help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc);
help_diff_dst_desc =
compress_spatial_dimensions_to_channel(help_diff_dst_desc);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(help_dst_desc, _eng, dst)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(help_diff_dst_desc, _eng, diff_dst)}}};
::dnnl::algorithm softmax_alg = ::dnnl::algorithm::softmax_accurate;
if (alg == softmax_algorithm::log) {
softmax_alg = ::dnnl::algorithm::softmax_log;
}
auto primitive = create_primitive<::dnnl::softmax_backward>(
softmax_alg, help_diff_src_desc, help_diff_dst_desc, help_dst_desc, 1,
create_primitive_desc<::dnnl::softmax_forward>(
::dnnl::prop_kind::forward, softmax_alg, help_diff_src_desc,
help_dst_desc, 1));
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC,
memory_desc_ext(help_diff_src_desc), diff_src}});
}
inline
sycl::event engine_ext::async_lrn_forward(lrn_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto primitive_desc = create_primitive_desc<::dnnl::lrn_forward>(
::dnnl::prop_kind::forward_training,
::dnnl::algorithm::lrn_across_channels, src_desc.get_desc(),
dst_desc.get_desc(), desc.get_local_size(), desc.get_alpha(),
desc.get_beta(), desc.get_k());
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
::dnnl::memory ws_mem(primitive_desc.workspace_desc(), _eng);
execution_args->insert({DNNL_ARG_WORKSPACE, ws_mem});
if (workspace) {
*workspace = ws_mem;
} else {
insert_workspace(src, ws_mem);
}
auto primitive =
create_primitive_with_pd<::dnnl::lrn_forward>(primitive_desc);
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline
sycl::event
engine_ext::async_lrn_backward(lrn_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
auto primitive = create_primitive<::dnnl::lrn_backward>(
::dnnl::algorithm::lrn_across_channels, diff_src_desc.get_desc(),
diff_dst_desc.get_desc(), src_desc.get_desc(), desc.get_local_size(),
desc.get_alpha(), desc.get_beta(), desc.get_k(),
create_primitive_desc<::dnnl::lrn_forward>(
::dnnl::prop_kind::forward_training,
::dnnl::algorithm::lrn_across_channels, src_desc.get_desc(),
dst_desc.get_desc(), desc.get_local_size(), desc.get_alpha(),
desc.get_beta(), desc.get_k()));
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}},
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
if (workspace) {
execution_args->insert({DNNL_ARG_WORKSPACE, *workspace});
} else {
execution_args->insert({DNNL_ARG_WORKSPACE, get_workspace(src)});
}
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline
size_t engine_ext::get_batch_normalization_workspace_size(
batch_normalization_ops ops, const memory_desc_ext &src_desc) {
if(ops == batch_normalization_ops::none) {
return 0;
}
return src_desc.get_size();
}
inline
sycl::event engine_ext::async_batch_normalization_forward_inference(
batch_normalization_mode mode, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *mean, void *var) {
return batch_normalization_forward_internal(
true, mode, epsilon, 0.f, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_mean_var_desc, scale, bias, scale_bias_mean_var_desc, mean,
var, nullptr, nullptr);
}
inline
sycl::event engine_ext::async_batch_normalization_forward_inference(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *mean, void *var) {
bool has_post_op = (ops != batch_normalization_ops::none);
sycl::event e;
std::vector<void *> caches;
if (has_post_op) {
void *dst_cache = allocate(dst_desc);
caches.push_back(dst_cache);
batch_normalization_forward_internal(
true, mode, epsilon, 0.f, 1.f, src_desc, src, 0.f, dst_desc, dst_cache,
scale_bias_desc, scale, bias, mean_var_desc, mean, var, nullptr,
nullptr);
if (ops == batch_normalization_ops::add_activation) {
async_sum(1.f, summand_desc, summand, 1.f, dst_desc, dst_cache);
}
async_activation_forward(adesc, 1.f, dst_desc, dst_cache, 0.f, dst_desc,
dst_cache);
e = async_sum(alpha, dst_desc, dst_cache, beta, dst_desc, dst);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
return batch_normalization_forward_internal(
true, mode, epsilon, 0.f, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_desc, scale, bias, mean_var_desc, mean, var, nullptr, nullptr);
}
inline
sycl::event engine_ext::async_batch_normalization_forward_training(
batch_normalization_mode mode, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var) {
return batch_normalization_forward_internal(
false, mode, epsilon, factor, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_mean_var_desc, scale, bias, scale_bias_mean_var_desc,
saved_mean, saved_var, running_mean, running_var);
}
inline
sycl::event engine_ext::async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *running_mean, void *running_var,
void *saved_mean, void *saved_var, size_t workspace_size,
void *workspace) {
bool has_post_op = (ops != batch_normalization_ops::none);
sycl::event e;
if (has_post_op) {
if(workspace_size < dst_desc.get_desc().get_size()) {
throw std::runtime_error("async_batch_normalization_forward_training_ex: "
"no sufficient workspace.");
}
batch_normalization_forward_internal(
false, mode, epsilon, factor, 1.f, src_desc, src, 0.f, dst_desc,
workspace, scale_bias_desc, scale, bias, mean_var_desc,
saved_mean, saved_var, running_mean, running_var);
if (ops == batch_normalization_ops::add_activation) {
async_sum(1.f, summand_desc, summand, 1.f, dst_desc,
workspace);
}
return async_activation_forward(adesc, alpha, dst_desc, workspace,
beta, dst_desc, dst);
}
return batch_normalization_forward_internal(
false, mode, epsilon, factor, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_desc, scale, bias, mean_var_desc, saved_mean, saved_var,
running_mean, running_var);
}
inline
sycl::event engine_ext::async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace) {
return async_batch_normalization_forward_training(
mode, ops, adesc, epsilon, factor, alpha, src_desc, src, beta, dst_desc,
dst, summand_desc, summand, scale_bias_mean_var_desc, scale, bias,
scale_bias_mean_var_desc, running_mean, running_var, saved_mean,
saved_var, workspace_size, workspace);
}
inline
sycl::event engine_ext::async_batch_normalization_backward(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_mean_var_desc, void *scale,
float beta_param, void *diff_scale, void *diff_bias, void *saved_mean,
void *saved_var) {
return batch_normalization_backward_internal(
mode, epsilon, alpha_data, src_desc, src, diff_dst_desc, diff_dst,
beta_data, diff_src_desc, diff_src, alpha_param,
diff_scale_bias_mean_var_desc, scale, nullptr, beta_param, diff_scale,
diff_bias, diff_scale_bias_mean_var_desc, saved_mean, saved_var);
}
inline
sycl::event engine_ext::async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_desc, void *scale,
void *bias, float beta_param, void *diff_scale, void *diff_bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace) {
std::vector<void *> caches;
::dnnl::memory::desc real_diff_dst_desc = diff_dst_desc.get_desc();
void *real_diff_dst = diff_dst;
if (ops != batch_normalization_ops::none &&
workspace_size < dst_desc.get_desc().get_size()) {
throw std::runtime_error("async_batch_normalization_backward_ex: "
"no sufficient workspace.");
}
if (ops == batch_normalization_ops::add_activation) {
void *diff_summand_cache = allocate(diff_summand_desc);
async_activation_backward(adesc, 1.f, dst_desc, dst, diff_dst_desc, diff_dst,
dst_desc, workspace, 0.f,
diff_summand_desc, diff_summand_cache);
caches.push_back(diff_summand_cache);
async_sum(alpha_data, diff_summand_desc, diff_summand_cache, beta_data,
diff_summand_desc, diff_summand);
real_diff_dst_desc = diff_summand_desc.get_desc();
real_diff_dst = diff_summand_cache;
} else if (ops == batch_normalization_ops::activation) {
void *diff_dst_cache = allocate(diff_dst_desc);
caches.push_back(diff_dst_cache);
async_activation_backward(adesc, 1.f, dst_desc, dst, diff_dst_desc,
diff_dst, dst_desc, workspace,
0.f, diff_dst_desc, diff_dst_cache);
real_diff_dst = diff_dst_cache;
}
sycl::event e = batch_normalization_backward_internal(
mode, epsilon, alpha_data, src_desc, src, real_diff_dst_desc,
real_diff_dst, beta_data, diff_src_desc, diff_src, alpha_param,
diff_scale_bias_desc, scale, bias, beta_param, diff_scale, diff_bias,
mean_var_desc, saved_mean, saved_var);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
inline
sycl::event engine_ext::async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_mean_var_desc,
void *scale, void *bias, float beta_param, void *diff_scale,
void *diff_bias, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace) {
return async_batch_normalization_backward(
mode, ops, adesc, epsilon, alpha_data, src_desc, src, dst_desc, dst,
diff_dst_desc, diff_dst, beta_data, diff_src_desc, diff_src,
diff_summand_desc, diff_summand, alpha_param,
diff_scale_bias_mean_var_desc, scale, bias, beta_param, diff_scale,
diff_bias, diff_scale_bias_mean_var_desc, saved_mean, saved_var,
workspace_size, workspace);
}
inline
sycl::event
engine_ext::async_convolution_forward(convolution_desc &desc, ::dnnl::algorithm alg,
float alpha, const memory_desc_ext &src_desc,
void *src, const memory_desc_ext &weight_desc,
void *weight, float beta,
const memory_desc_ext &dst_desc, void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto help_weight_desc =
get_group_weight_desc(desc.get_group_count(), weight_desc);
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto origin_src_md = src_desc.get_desc();
auto origin_dst_md = dst_desc.get_desc();
auto origin_weight_md = help_weight_desc;
auto src_md = transfer_memory_desc_to_format_tag_any(origin_src_md);
auto dst_md = transfer_memory_desc_to_format_tag_any(origin_dst_md);
auto weight_md = transfer_memory_desc_to_format_tag_any(origin_weight_md);
auto primitive = create_primitive<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training, alg, src_md, weight_md, dst_md,
desc.get_stride(), desc.get_dilate(), desc.get_padding(),
desc.get_padding(), attr);
::dnnl::convolution_forward::primitive_desc pd =
::dnnl::convolution_forward::primitive_desc(
const_cast<dnnl_primitive_desc_t>(
primitive.second->get_primitive_desc()));
auto optimal_src_md = pd.src_desc();
auto optimal_dst_md = pd.dst_desc();
auto optimal_weight_md = pd.weights_desc();
void *optimal_src = src, *optimal_dst = dst, *optimal_weight = weight;
std::vector<void *> input_caches, output_caches;
allocate_and_reorder_memory_to_optimal(origin_src_md, src, optimal_src_md,
optimal_src, input_caches);
allocate_and_reorder_memory_to_optimal(origin_dst_md, dst, optimal_dst_md,
optimal_dst, output_caches);
allocate_and_reorder_memory_to_optimal(origin_weight_md, weight,
optimal_weight_md, optimal_weight,
input_caches);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(optimal_src_md, _eng, optimal_src)}},
{DNNL_ARG_WEIGHTS, {::dnnl::memory(optimal_weight_md, _eng, optimal_weight)}}};
auto e = execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, optimal_dst_md, optimal_dst}},
input_caches);
if(origin_dst_md != optimal_dst_md){
e = async_reorder(1.f, optimal_dst_md, optimal_dst, 0.f, origin_dst_md, dst);
}
async_free(_q, e, nullptr, output_caches);
return e;
}
inline
sycl::event engine_ext::async_convolution_forward(
convolution_desc &desc, ::dnnl::algorithm alg, activation_desc &adesc,
float alpha_0, const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &weight_desc, void *weight, float alpha_1,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &bias_desc, void *bias,
const memory_desc_ext &dst_desc, void *dst) {
int channel_num = bias_desc.get_element_num();
auto help_weight_desc =
get_group_weight_desc(desc.get_group_count(), weight_desc);
::dnnl::memory::desc help_bias_desc = {{channel_num},
bias_desc.get_desc().get_data_type(),
::dnnl::memory::format_tag::a};
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto primitive = create_primitive<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training, alg, src_desc.get_desc(),
help_weight_desc, help_bias_desc, dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(), attr);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_BIAS, {::dnnl::memory(help_bias_desc, _eng, bias)}}};
void *cache = nullptr;
if (alpha_0 != 1.f) {
cache = allocate(help_weight_desc);
_q->memcpy(cache, weight, weight_desc.get_size());
async_scale(alpha_0, help_weight_desc, cache);
execution_args->insert(
{DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, cache)}});
execute_primitive(primitive, execution_args,
{{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}}, {cache});
} else {
execution_args->insert(
{DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, weight)}});
execute_primitive(primitive, execution_args,
{{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}});
}
async_sum(alpha_1, summand_desc, summand, 1.f, dst_desc, dst);
return async_activation_forward(adesc, 1.f, dst_desc, dst, 0.f, dst_desc, dst);
}
inline
sycl::event engine_ext::async_convolution_backward_data(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &weight_desc, void *weight,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
if (scale_parameter_preprocess({{alpha, beta, diff_dst_desc, diff_dst}})) {
return sycl::event();
}
auto help_weight_desc =
get_group_weight_desc(desc.get_group_count(), weight_desc);
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto forward_primitive =
create_primitive_desc<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training,
::dnnl::algorithm::convolution_auto, diff_src_desc.get_desc(),
help_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(), attr);
auto primitive = create_primitive<::dnnl::convolution_backward_data>(
::dnnl::algorithm::convolution_auto, diff_src_desc.get_desc(),
help_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(),
forward_primitive, attr);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}},
{DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, weight)}}};
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline
sycl::event engine_ext::async_convolution_backward_weight(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_weight_desc, void *diff_weight) {
if (scale_parameter_preprocess(
{{alpha, beta, diff_weight_desc, diff_weight}})) {
return sycl::event();
}
auto help_diff_weight_desc =
get_group_weight_desc(desc.get_group_count(), diff_weight_desc);
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto forward_primitive =
create_primitive_desc<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training,
::dnnl::algorithm::convolution_auto, src_desc.get_desc(),
help_diff_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(), attr);
auto primitive =
create_primitive<::dnnl::convolution_backward_weights>(
::dnnl::algorithm::convolution_auto, src_desc.get_desc(),
help_diff_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(),
forward_primitive, attr);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_WEIGHTS,
help_diff_weight_desc, diff_weight}});
}
inline
sycl::event engine_ext::async_convolution_backward_bias(
float alpha, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta, const memory_desc_ext &diff_bias_desc, void *diff_bias) {
return async_reduction(reduction_op::sum, alpha, diff_dst_desc, diff_dst, beta,
diff_bias_desc, diff_bias);
}
inline
void engine_ext::rnn_get_weight_space_size(const rnn_desc &desc,
size_t *weight_space_size) {
*weight_space_size = 0;
rnn_forward_internal(desc, ::dnnl::prop_kind::forward_inference,
memory_desc_ext(), nullptr, memory_desc_ext(), nullptr,
memory_desc_ext(), nullptr, nullptr, memory_desc_ext(),
nullptr, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, true,
weight_space_size, nullptr, nullptr);
return;
}
inline
void engine_ext::rnn_get_scratchpad_workspace_size(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, size_t *scratchpad_size,
size_t *workspace_size) {
*workspace_size = 0;
*scratchpad_size = 0;
rnn_forward_internal(desc, kind, src_desc, nullptr, memory_desc_ext(),
nullptr, memory_desc_ext(), nullptr, nullptr,
memory_desc_ext(), nullptr, nullptr, 0, nullptr, 0,
nullptr, 0, nullptr, true, nullptr, workspace_size,
scratchpad_size);
return;
}
inline
sycl::event engine_ext::async_rnn_forward(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight, size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace) {
return rnn_forward_internal(
desc, kind, src_desc, src, dst_desc, dst, iter_desc, src_iter, dst_iter,
iter_c_desc, src_iter_c, dst_iter_c, weight_size, weight, workspace_size,
workspace, scratchpad_size, scratchpad, false, nullptr, nullptr,
nullptr);
}
inline
sycl::event engine_ext::async_rnn_backward(
const rnn_desc &desc, const memory_desc_ext &dst_desc, void *dst,
void *diff_dst, const memory_desc_ext &src_desc, void *src, void *diff_src,
const memory_desc_ext &iter_desc, void *src_iter, void *diff_dst_iter,
void *diff_src_iter, const memory_desc_ext &iter_c_desc, void *src_iter_c,
void *diff_dst_iter_c, void *diff_src_iter_c, size_t weight_size,
void *weight, void *diff_weight, size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace) {
::dnnl::memory::data_type src_dt;
::dnnl::memory::format_tag src_format_tag;
rnn_mode mode;
rnn_memory_format_tag format_tag;
rnn_bias_mode bias_mode;
rnn_direction direction;
dpct::library_data_t dt;
int direction_num = 1, input_size = 0, hidden_size = 0, projection_size = 0,
layer_size = 0, gate_num = 1, output_size = 0, data_type_size = 0,
seq_length = 1, batch_size = 1;
void *last_layer_cache = nullptr;
void *hidden_layer_cache = nullptr;
sycl::event e;
std::vector<int> offset(9, 0);
std::vector<void *> data = {
src,
dst,
(uint8_t *)src_iter + iter_desc.get_size(),
nullptr,
(uint8_t *)src_iter_c + iter_c_desc.get_size(),
nullptr,
(uint8_t *)weight + weight_size,
(uint8_t *)workspace + workspace_size,
diff_src,
diff_dst,
(uint8_t *)diff_src_iter + iter_desc.get_size(),
(uint8_t *)diff_dst_iter + iter_desc.get_size(),
(uint8_t *)diff_src_iter_c + iter_c_desc.get_size(),
(uint8_t *)diff_dst_iter_c + iter_c_desc.get_size(),
(uint8_t *)diff_weight + weight_size,
scratchpad};
desc.get(&mode, &bias_mode, &direction, &dt, &input_size, &hidden_size,
&projection_size, &layer_size);
get_rnn_configuration(src_desc.get_desc(), direction, mode, dt, hidden_size,
&src_dt, &src_format_tag, &projection_size,
&output_size, &seq_length, &batch_size, &direction_num,
&gate_num);
if (direction == rnn_direction::bidirectional) {
if (layer_size > 1) {
last_layer_cache = allocate(src_desc);
hidden_layer_cache = allocate(src_desc);
data[8] = last_layer_cache;
}
e = execute_rnn_backward_primitive(
mode, ::dnnl::rnn_direction::bidirectional_concat, bias_mode, src_dt,
src_format_tag, seq_length, batch_size, output_size, 2 * output_size, 1,
direction_num, hidden_size, gate_num, projection_size, data, offset, 1);
if (layer_size > 1) {
data[8] = hidden_layer_cache;
data[9] = last_layer_cache;
e = execute_rnn_backward_primitive(
mode, ::dnnl::rnn_direction::bidirectional_sum, bias_mode, src_dt,
src_format_tag, seq_length, batch_size, output_size, output_size, 1,
direction_num, hidden_size, gate_num, projection_size, data, offset,
layer_size - 1);
_q->memcpy(diff_src,
((layer_size - 1) % 2 == 0) ? last_layer_cache
: hidden_layer_cache,
src_desc.get_size());
}
} else {
e = execute_rnn_backward_primitive(
mode, ::dnnl::rnn_direction::unidirectional_left2right, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
output_size, layer_size, direction_num, hidden_size, gate_num,
projection_size, data, offset, 1);
}
if (last_layer_cache && hidden_layer_cache) {
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(last_layer_cache, *_q);
sycl::free(hidden_layer_cache, *_q);
});
});
}
return e;
}
inline
size_t engine_ext::get_dropout_state_size(){
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
sycl::queue q;
if(_random_engine_state_size == -1) {
if(_q){
q = *_q;
} else {
q = dpct::get_current_device().default_queue();
}
auto rand_engine = rng_engine_t(q, 0);
_random_engine_state_size = oneapi::mkl::rng::get_state_size(rand_engine);
}
return _random_engine_state_size;
#endif
}
inline size_t
engine_ext::get_dropout_workspace_size(const memory_desc_ext &src_desc) {
return src_desc.get_size();
}
inline
sycl::event engine_ext::async_dropout_forward(dropout_desc &desc,
const memory_desc_ext &src_desc,
void *src,
const memory_desc_ext &dst_desc,
void *dst, void *workspace,
size_t workspace_size) {
if (workspace_size < src_desc.get_size()) {
throw std::runtime_error("async_dropout_forward: no sufficient workspace.");
}
float p = desc.get_probability();
if (p == 1.f) {
return _q->memset(dst, 0, dst_desc.get_size());
} else if (p == 0.f) {
return async_reorder(1.f, src_desc, src, 0.f, dst_desc, dst);
}
float scale_factor = 1.f / (1.f - p);
void *cache = workspace;
memory_desc_ext rng_data_desc(
::dnnl::memory::desc(src_desc.get_dims(), ::dnnl::memory::data_type::s32,
src_desc.get_strides()));
if (src_desc.get_desc().get_data_type() != ::dnnl::memory::data_type::s32) {
cache = allocate(rng_data_desc);
}
desc.generate(_q, _random_engine_state_size, rng_data_desc.get_element_num(),
(std::int32_t *)cache);
if (cache == workspace) {
async_scale(scale_factor, src_desc, workspace);
} else {
async_reorder(scale_factor, rng_data_desc, cache, 0.f, src_desc, workspace);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC_0, ::dnnl::memory(src_desc.get_desc(), _eng, src)},
{DNNL_ARG_SRC_1, ::dnnl::memory(src_desc.get_desc(), _eng, workspace)},
{DNNL_ARG_DST, ::dnnl::memory(dst_desc.get_desc(), _eng, dst)}};
auto primitive = create_primitive<::dnnl::binary>(
::dnnl::algorithm::binary_mul, src_desc.get_desc(), src_desc.get_desc(),
dst_desc.get_desc());
auto e = execute_primitive(primitive, execution_args);
if (cache != workspace) {
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { sycl::free(cache, *_q); });
});
}
return e;
}
inline
sycl::event engine_ext::async_dropout_backward(
dropout_desc &desc, const memory_desc_ext &diff_dst_desc,
void *diff_dst, const memory_desc_ext &diff_src_desc, void *diff_src,
void *workspace, size_t workspace_size) {
float p = desc.get_probability();
if (p == 1.f) {
return _q->memset(diff_src, 0, diff_src_desc.get_size());
} else if (p == 0.f) {
return async_reorder(1.f, diff_dst_desc, diff_dst, 0.f, diff_src_desc,
diff_src);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC_0,
::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)},
{DNNL_ARG_SRC_1,
::dnnl::memory(diff_dst_desc.get_desc(), _eng, workspace)},
{DNNL_ARG_DST, ::dnnl::memory(diff_src_desc.get_desc(), _eng, diff_src)}};
auto primitive = create_primitive<::dnnl::binary>(
::dnnl::algorithm::binary_mul, diff_dst_desc.get_desc(),
diff_dst_desc.get_desc(), diff_src_desc.get_desc());
return execute_primitive(primitive, execution_args);
}
} // namespace dnnl
} // namespace dpct
#endif // __DPCT_DNNL_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/lapack_utils.hpp | //==---- lapack_utils.hpp -------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_LAPACK_UTILS_HPP__
#define __DPCT_LAPACK_UTILS_HPP__
#include "memory.hpp"
#include "util.hpp"
#include "lib_common_utils.hpp"
#include <oneapi/mkl.hpp>
#include <sycl/sycl.hpp>
namespace dpct {
namespace lapack {
/// Computes all eigenvalues and, optionally, eigenvectors of a real generalized
/// symmetric definite eigenproblem using a divide and conquer method.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1 or 2 or 3. Specifies the problem type to be solved.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrices A and B.
/// \param [in,out] a The symmetric matrix A.
/// \param [in] lda The leading dimension of matrix A.
/// \param [in,out] b The symmetric matrix B.
/// \param [in] ldb The leading dimension of matrix B.
/// \param [out] w Eigenvalues.
/// \param [in] scratchpad Scratchpad memory to be used by the routine
/// for storing intermediate results.
/// \param [in] scratchpad_size Size of scratchpad memory as a number of
/// floating point elements of type T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T>
inline int sygvd(sycl::queue &queue, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb,
T *w, T *scratchpad, int scratchpad_size, int *info) {
#ifdef DPCT_USM_LEVEL_NONE
auto info_buf = get_buffer<int>(info);
auto a_buffer = get_buffer<T>(a);
auto b_buffer = get_buffer<T>(b);
auto w_buffer = get_buffer<T>(w);
auto scratchpad_buffer = get_buffer<T>(scratchpad);
int info_val = 0;
int ret_val = 0;
try {
oneapi::mkl::lapack::sygvd(queue, itype, jobz, uplo, n, a_buffer, lda,
b_buffer, ldb, w_buffer, scratchpad_buffer,
scratchpad_size);
} catch (oneapi::mkl::lapack::exception const& e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: sygvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_val = static_cast<int>(e.info());
ret_val = 1;
} catch (sycl::exception const& e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
ret_val = 1;
}
queue.submit([&, info_val](sycl::handler &cgh) {
auto info_acc = info_buf.get_access<sycl::access_mode::write>(cgh);
cgh.single_task<dpct_kernel_name<class sygvd_set_info, T>>(
[=]() { info_acc[0] = info_val; });
});
return ret_val;
#else
try {
oneapi::mkl::lapack::sygvd(queue, itype, jobz, uplo, n, a, lda, b, ldb, w,
scratchpad, scratchpad_size);
} catch (oneapi::mkl::lapack::exception const& e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: sygvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
int info_val = static_cast<int>(e.info());
queue.memcpy(info, &info_val, sizeof(int)).wait();
return 1;
} catch (sycl::exception const& e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, sizeof(int)).wait();
return 1;
}
queue.memset(info, 0, sizeof(int));
return 0;
#endif
}
/// Computes all the eigenvalues, and optionally, the eigenvectors of a complex
/// generalized Hermitian positive-definite eigenproblem using a divide and
/// conquer method.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1 or 2 or 3. Specifies the problem type to be solved.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrices A and B.
/// \param [in,out] a The Hermitian matrix A.
/// \param [in] lda The leading dimension of matrix A.
/// \param [in,out] b The Hermitian matrix B.
/// \param [in] ldb The leading dimension of matrix B.
/// \param [in] w Eigenvalues.
/// \param [in] scratchpad Scratchpad memory to be used by the routine
/// for storing intermediate results.
/// \param [in] scratchpad_size Size of scratchpad memory as a number of
/// floating point elements of type T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename Tw>
inline int hegvd(sycl::queue &queue, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb,
Tw *w, T *scratchpad, int scratchpad_size, int *info) {
using Ty = typename DataType<T>::T2;
#ifdef DPCT_USM_LEVEL_NONE
auto info_buf = get_buffer<int>(info);
auto a_buffer = get_buffer<Ty>(a);
auto b_buffer = get_buffer<Ty>(b);
auto w_buffer = get_buffer<Tw>(w);
auto scratchpad_buffer = get_buffer<Ty>(scratchpad);
int info_val = 0;
int ret_val = 0;
try {
oneapi::mkl::lapack::hegvd(queue, itype, jobz, uplo, n, a_buffer, lda,
b_buffer, ldb, w_buffer, scratchpad_buffer,
scratchpad_size);
} catch (oneapi::mkl::lapack::exception const& e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: hegvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_val = static_cast<int>(e.info());
ret_val = 1;
} catch (sycl::exception const& e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
ret_val = 1;
}
queue.submit([&, info_val](sycl::handler &cgh) {
auto info_acc = info_buf.get_access<sycl::access_mode::write>(cgh);
cgh.single_task<dpct_kernel_name<class hegvd_set_info, T>>(
[=]() { info_acc[0] = info_val; });
});
return ret_val;
#else
try {
oneapi::mkl::lapack::hegvd(queue, itype, jobz, uplo, n, (Ty *)a, lda, (Ty *)b,
ldb, w, (Ty *)scratchpad, scratchpad_size);
} catch (oneapi::mkl::lapack::exception const& e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: hegvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
int info_val = static_cast<int>(e.info());
queue.memcpy(info, &info_val, sizeof(int)).wait();
return 1;
} catch (sycl::exception const& e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, sizeof(int)).wait();
return 1;
}
queue.memset(info, 0, sizeof(int));
return 0;
#endif
}
/// Computes the Cholesky factorizations of a batch of symmetric (or Hermitian,
/// for complex data) positive-definite matrices.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in,out] a Array of pointers to matrix A.
/// \param [in] lda The leading dimension of matrix A.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
/// \param [in] group_size The batch size.
template <typename T>
inline int potrf_batch(sycl::queue &queue, oneapi::mkl::uplo uplo, int n,
T *a[], int lda, int *info, int group_size) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
using Ty = typename DataType<T>::T2;
struct matrix_info_t {
oneapi::mkl::uplo uplo_info;
std::int64_t n_info;
std::int64_t lda_info;
std::int64_t group_size_info;
};
matrix_info_t *matrix_info =
(matrix_info_t *)std::malloc(sizeof(matrix_info_t));
matrix_info->uplo_info = uplo;
matrix_info->n_info = n;
matrix_info->lda_info = lda;
matrix_info->group_size_info = group_size;
std::int64_t scratchpad_size = 0;
sycl::event e;
Ty *scratchpad = nullptr;
try {
scratchpad_size = oneapi::mkl::lapack::potrf_batch_scratchpad_size<Ty>(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info),
&(matrix_info->lda_info), 1, &(matrix_info->group_size_info));
scratchpad = sycl::malloc_device<Ty>(scratchpad_size, queue);
e = oneapi::mkl::lapack::potrf_batch(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info), (Ty **)a,
&(matrix_info->lda_info), 1, &(matrix_info->group_size_info),
scratchpad, scratchpad_size);
} catch (oneapi::mkl::lapack::batch_error const &be) {
std::cerr << "Unexpected exception caught during call to LAPACK API: "
"potrf_batch_scratchpad_size/potrf_batch"
<< std::endl
<< "reason: " << be.what() << std::endl
<< "number: " << be.info() << std::endl;
int i = 0;
auto &ids = be.ids();
std::vector<int> info_vec(group_size);
for (auto const &e : be.exceptions()) {
try {
std::rethrow_exception(e);
} catch (oneapi::mkl::lapack::exception &e) {
std::cerr << "Exception " << ids[i] << std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_vec[i] = e.info();
i++;
}
}
queue.memcpy(info, info_vec.data(), group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad)
sycl::free(scratchpad, queue);
return 1;
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad)
sycl::free(scratchpad, queue);
return 1;
}
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
std::free(matrix_info);
sycl::free(scratchpad, queue);
});
});
queue.memset(info, 0, group_size * sizeof(int));
return 0;
#endif
}
/// Solves a batch of systems of linear equations with a Cholesky-factored
/// symmetric (Hermitian) positive-definite coefficient matrices.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] nrhs The number of right-hand sides.
/// \param [in,out] a Array of pointers to matrix A.
/// \param [in] lda The leading dimension of matrix A.
/// \param [in,out] b Array of pointers to matrix B.
/// \param [in] ldb The leading dimension of matrix B.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
/// \param [in] group_size The batch size.
template <typename T>
inline int potrs_batch(sycl::queue &queue, oneapi::mkl::uplo uplo, int n,
int nrhs, T *a[], int lda, T *b[], int ldb, int *info,
int group_size) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
using Ty = typename DataType<T>::T2;
struct matrix_info_t {
oneapi::mkl::uplo uplo_info;
std::int64_t n_info;
std::int64_t nrhs_info;
std::int64_t lda_info;
std::int64_t ldb_info;
std::int64_t group_size_info;
};
matrix_info_t *matrix_info =
(matrix_info_t *)std::malloc(sizeof(matrix_info_t));
matrix_info->uplo_info = uplo;
matrix_info->n_info = n;
matrix_info->nrhs_info = nrhs;
matrix_info->lda_info = lda;
matrix_info->ldb_info = ldb;
matrix_info->group_size_info = group_size;
std::int64_t scratchpad_size = 0;
sycl::event e;
Ty *scratchpad = nullptr;
try {
scratchpad_size = oneapi::mkl::lapack::potrs_batch_scratchpad_size<Ty>(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info),
&(matrix_info->nrhs_info), &(matrix_info->lda_info),
&(matrix_info->ldb_info), 1, &(matrix_info->group_size_info));
scratchpad = sycl::malloc_device<Ty>(scratchpad_size, queue);
e = oneapi::mkl::lapack::potrs_batch(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info),
&(matrix_info->nrhs_info), (Ty **)a, &(matrix_info->lda_info), (Ty **)b,
&(matrix_info->ldb_info), 1, &(matrix_info->group_size_info),
scratchpad, scratchpad_size);
} catch (oneapi::mkl::lapack::batch_error const &be) {
std::cerr << "Unexpected exception caught during call to LAPACK API: "
"potrs_batch_scratchpad_size/potrs_batch"
<< std::endl
<< "reason: " << be.what() << std::endl
<< "number: " << be.info() << std::endl;
int i = 0;
auto &ids = be.ids();
std::vector<int> info_vec(group_size);
for (auto const &e : be.exceptions()) {
try {
std::rethrow_exception(e);
} catch (oneapi::mkl::lapack::exception &e) {
std::cerr << "Exception " << ids[i] << std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_vec[i] = e.info();
i++;
}
}
queue.memcpy(info, info_vec.data(), group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad)
sycl::free(scratchpad, queue);
return 1;
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad)
sycl::free(scratchpad, queue);
return 1;
}
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
std::free(matrix_info);
sycl::free(scratchpad, queue);
});
});
queue.memset(info, 0, group_size * sizeof(int));
return 0;
#endif
}
namespace detail {
template <template <typename> typename functor_t, typename... args_t>
inline int lapack_shim(sycl::queue &q, library_data_t a_type, int *info,
std::string const &lapack_api_name, args_t &&...args) {
auto handle_lapack_exception = [&](const oneapi::mkl::lapack::exception &e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: "
<< lapack_api_name << std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl
<< "detail: " << e.detail() << std::endl;
if (e.info() < std::numeric_limits<int>::min() ||
e.info() > std::numeric_limits<int>::max()) {
throw std::runtime_error("e.info() exceeds the limit of int type");
}
int info_val = static_cast<int>(e.info());
if (info)
dpct::detail::dpct_memcpy(q, info, &info_val, sizeof(int),
memcpy_direction::host_to_device)
.wait();
return 1;
};
try {
switch (a_type) {
case library_data_t::real_float: {
functor_t<float>()(std::forward<args_t>(args)...);
break;
}
case library_data_t::real_double: {
functor_t<double>()(std::forward<args_t>(args)...);
break;
}
case library_data_t::complex_float: {
functor_t<std::complex<float>>()(std::forward<args_t>(args)...);
break;
}
case library_data_t::complex_double: {
functor_t<std::complex<double>>()(std::forward<args_t>(args)...);
break;
}
default:
throw std::runtime_error("the data type is unsupported");
}
} catch (oneapi::mkl::lapack::batch_error const &be) {
try {
std::rethrow_exception(be.exceptions()[0]);
} catch (oneapi::mkl::lapack::exception &e) {
return handle_lapack_exception(e);
}
} catch (oneapi::mkl::lapack::exception const &e) {
return handle_lapack_exception(e);
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
if (info)
dpct::detail::dpct_memset(q, info, 0, sizeof(int)).wait();
return 1;
}
return 0;
}
template <typename T> class working_memory {
public:
working_memory(std::size_t element_number, const sycl::queue &q) : _q(q) {
_ptr = dpct::detail::dpct_malloc(element_number * sizeof(T), _q);
}
auto get_memory() {
return dpct::detail::get_memory(reinterpret_cast<T *>(_ptr));
}
auto get_ptr() {
return _ptr;
}
void set_event(sycl::event e) { _e = e; }
~working_memory() {
if (_ptr) {
dpct::async_dpct_free({_ptr}, {_e}, _q);
}
}
private:
void *_ptr = nullptr;
sycl::event _e;
sycl::queue _q;
};
std::size_t byte_to_element_number(std::size_t size_in_byte,
dpct::library_data_t element_type) {
auto dv = std::lldiv(
size_in_byte,
dpct::detail::library_data_size[static_cast<unsigned int>(element_type)] /
8);
if (dv.rem) {
throw std::runtime_error(
"size_in_byte is not divisible by the size of element (in bytes)");
}
return dv.quot;
}
std::size_t element_number_to_byte(std::size_t size_in_element,
dpct::library_data_t element_type) {
auto dv = std::lldiv(
dpct::detail::library_data_size[static_cast<unsigned int>(element_type)],
8);
if (dv.rem) {
throw std::runtime_error(
"the size of element (in bits) is not divisible by 8");
}
return size_in_element * dv.quot;
}
inline oneapi::mkl::jobsvd char2jobsvd(signed char job) {
switch (job) {
case 'A':
return oneapi::mkl::jobsvd::vectors;
case 'S':
return oneapi::mkl::jobsvd::somevec;
case 'O':
return oneapi::mkl::jobsvd::vectorsina;
case 'N':
return oneapi::mkl::jobsvd::novec;
default:
throw std::runtime_error("the job type is unsupported");
}
}
template <typename T> struct getrf_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
device_ws_size =
oneapi::mkl::lapack::getrf_scratchpad_size<T>(q, m, n, lda);
}
};
template <typename T> struct getrf_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
std::int64_t *ipiv, void *device_ws,
std::size_t device_ws_size, int *info) {
auto ipiv_data = dpct::detail::get_memory(ipiv);
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::getrf(q, m, n, a_data, lda, ipiv_data, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T> struct getrs_impl {
void operator()(sycl::queue &q, oneapi::mkl::transpose trans, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, std::int64_t *ipiv, library_data_t b_type,
void *b, std::int64_t ldb, int *info) {
auto ipiv_data = dpct::detail::get_memory(ipiv);
std::int64_t device_ws_size = oneapi::mkl::lapack::getrs_scratchpad_size<T>(
q, trans, n, nrhs, lda, ldb);
working_memory<T> device_ws(device_ws_size, q);
auto device_ws_data = device_ws.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
oneapi::mkl::lapack::getrs(q, trans, n, nrhs, a_data, lda, ipiv_data,
b_data, ldb, device_ws_data, device_ws_size);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
device_ws.set_event(e);
}
};
template <typename T> struct geqrf_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
device_ws_size =
oneapi::mkl::lapack::geqrf_scratchpad_size<T>(q, m, n, lda);
}
};
template <typename T> struct geqrf_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t tau_type, void *tau, void *device_ws,
std::size_t device_ws_size, int *info) {
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto tau_data = dpct::detail::get_memory(reinterpret_cast<T *>(tau));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::geqrf(q, m, n, a_data, lda, tau_data, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T> struct getrfnp_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
std::int64_t *ipiv, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
std::int64_t a_stride = m * lda;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::getrfnp_batch(q, m, n, a_data, lda, a_stride, 1,
device_ws_data, device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
template <typename T> struct gesvd_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu,
oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
library_data_t u_type, std::int64_t ldu,
library_data_t vt_type, std::int64_t ldvt,
std::size_t &device_ws_size) {
device_ws_size = oneapi::mkl::lapack::gesvd_scratchpad_size<T>(
q, jobu, jobvt, m, n, lda, ldu, ldvt);
}
};
template <typename T> struct ElementType {
using value_tpye = T;
};
template <typename T> struct ElementType<std::complex<T>> {
using value_tpye = T;
};
template <typename T> struct gesvd_impl {
void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu,
oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t s_type, void *s, library_data_t u_type,
void *u, std::int64_t ldu, library_data_t vt_type, void *vt,
std::int64_t ldvt, void *device_ws,
std::size_t device_ws_size, int *info) {
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto s_data = dpct::detail::get_memory(
reinterpret_cast<typename ElementType<T>::value_tpye *>(s));
auto u_data = dpct::detail::get_memory(reinterpret_cast<T *>(u));
auto vt_data = dpct::detail::get_memory(reinterpret_cast<T *>(vt));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::gesvd(q, jobu, jobvt, m, n, a_data, lda, s_data,
u_data, ldu, vt_data, ldvt, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T> struct gesvd_conj_impl : public gesvd_impl<T> {
void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu,
oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t s_type, void *s, library_data_t u_type,
void *u, std::int64_t ldu, library_data_t vt_type, void *vt,
std::int64_t ldvt, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using base = gesvd_impl<T>;
base::operator()(q, jobu, jobvt, m, n, a_type, a, lda, s_type, s, u_type, u,
ldu, vt_type, vt, ldvt, device_ws, device_ws_size, info);
auto vt_data = dpct::detail::get_memory(reinterpret_cast<T *>(vt));
oneapi::mkl::blas::row_major::imatcopy(q, oneapi::mkl::transpose::conjtrans,
n, n, T(1.0f), vt_data, ldvt, ldvt);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
template <typename T> struct potrf_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
device_ws_size =
oneapi::mkl::lapack::potrf_scratchpad_size<T>(q, uplo, n, lda);
}
};
template <typename T> struct potrf_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
void *device_ws, std::size_t device_ws_size, int *info) {
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::potrf(q, uplo, n, a_data, lda, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T> struct potrs_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, library_data_t b_type, void *b,
std::int64_t ldb, int *info) {
std::int64_t device_ws_size = oneapi::mkl::lapack::potrs_scratchpad_size<T>(
q, uplo, n, nrhs, lda, ldb);
working_memory<T> device_ws(device_ws_size, q);
auto device_ws_data = device_ws.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
oneapi::mkl::lapack::potrs(q, uplo, n, nrhs, a_data, lda, b_data, ldb,
device_ws_data, device_ws_size);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
device_ws.set_event(e);
}
};
template <typename T> struct value_type_trait {
using value_type = T;
};
template <typename T> struct value_type_trait<std::complex<T>> {
using value_type = T;
};
template <typename T> auto lamch_s() {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
if constexpr (std::is_same_v<T, float>) {
return slamch("S");
} else if constexpr (std::is_same_v<T, double>) {
return dlamch("S");
}
throw std::runtime_error("the type is unsupported");
#endif
}
#define DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(FUNC, ...) \
do { \
if constexpr (std::is_floating_point_v<T>) { \
device_ws_size = oneapi::mkl::lapack::sy##FUNC(__VA_ARGS__); \
} else { \
device_ws_size = oneapi::mkl::lapack::he##FUNC(__VA_ARGS__); \
} \
} while (0)
#define DISPATCH_FLOAT_FOR_CALCULATION(FUNC, ...) \
do { \
if constexpr (std::is_floating_point_v<T>) { \
oneapi::mkl::lapack::sy##FUNC(__VA_ARGS__); \
} else { \
oneapi::mkl::lapack::he##FUNC(__VA_ARGS__); \
} \
} while (0)
template <typename T> struct syheevx_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, std::int64_t lda, void *vl, void *vu,
std::int64_t il, std::int64_t iu,
std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(evx_scratchpad_size<T>, q, jobz, range,
uplo, n, lda, vl_value, vu_value, il, iu,
abstol, lda);
#endif
}
};
template <typename T> constexpr library_data_t get_library_data_t_from_type() {
if constexpr (std::is_same_v<T, float>) {
return library_data_t::real_float;
} else if constexpr (std::is_same_v<T, double>) {
return library_data_t::real_double;
} else if constexpr (std::is_same_v<T, sycl::float2> ||
std::is_same_v<T, std::complex<float>>) {
return library_data_t::complex_float;
} else if constexpr (std::is_same_v<T, sycl::double2> ||
std::is_same_v<T, std::complex<double>>) {
return library_data_t::complex_double;
}
throw std::runtime_error("the type is unsupported");
}
template <typename T> struct syheevx_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *vl, void *vu, std::int64_t il,
std::int64_t iu, std::int64_t *m, library_data_t w_type,
void *w, void *device_ws, std::size_t device_ws_size,
int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
working_memory<T> z(n * lda, q);
working_memory<std::int64_t> m_device(1, q);
auto z_data = z.get_memory();
auto m_device_data = m_device.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_CALCULATION(evx, q, jobz, range, uplo, n, a_data, lda,
vl_value, vu_value, il, iu, abstol,
m_device_data, w_data, z_data, lda,
device_ws_data, device_ws_size);
dpct::async_dpct_memcpy(a, z.get_ptr(), n * lda * sizeof(T),
memcpy_direction::device_to_device, q);
dpct::async_dpct_memcpy(m, m_device.get_ptr(), sizeof(std::int64_t),
memcpy_direction::device_to_host, q);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
z.set_event(e);
m_device.set_event(e);
#endif
}
};
template <typename T> struct syhegvx_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, std::int64_t lda, std::int64_t ldb, void *vl,
void *vu, std::int64_t il, std::int64_t iu,
std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(gvx_scratchpad_size<T>, q, itype, jobz,
range, uplo, n, lda, ldb, vl_value,
vu_value, il, iu, abstol, lda);
#endif
}
};
template <typename T> struct syhegvx_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, void *a, std::int64_t lda, void *b,
std::int64_t ldb, void *vl, void *vu, std::int64_t il,
std::int64_t iu, std::int64_t *m, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
working_memory<T> z(n * lda, q);
working_memory<std::int64_t> m_device(1, q);
auto z_data = z.get_memory();
auto m_device_data = m_device.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_CALCULATION(gvx, q, itype, jobz, range, uplo, n, a_data,
lda, b_data, ldb, vl_value, vu_value, il, iu,
abstol, m_device_data, w_data, z_data, lda,
device_ws_data, device_ws_size);
dpct::async_dpct_memcpy(a, z.get_ptr(), n * lda * sizeof(T),
memcpy_direction::device_to_device, q);
dpct::async_dpct_memcpy(m, m_device.get_ptr(), sizeof(std::int64_t),
memcpy_direction::device_to_host, q);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
z.set_event(e);
m_device.set_event(e);
#endif
}
};
template <typename T> struct syhegvd_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda,
std::int64_t ldb, std::size_t &device_ws_size) {
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(gvd_scratchpad_size<T>, q, itype, jobz,
uplo, n, lda, ldb);
}
};
template <typename T> struct syhegvd_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n, void *a,
std::int64_t lda, void *b, std::int64_t ldb, void *w,
void *device_ws, std::size_t device_ws_size,
int *info) {
using value_t = typename value_type_trait<T>::value_type;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
DISPATCH_FLOAT_FOR_CALCULATION(gvd, q, itype, jobz, uplo, n, a_data, lda,
b_data, ldb, w_data, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
oneapi::mkl::compz job2compz(const oneapi::mkl::job &job) {
oneapi::mkl::compz ret;
if (job == oneapi::mkl::job::novec) {
ret = oneapi::mkl::compz::novectors;
} else if (job == oneapi::mkl::job::vec) {
ret = oneapi::mkl::compz::vectors;
} else {
throw std::runtime_error("the job type is unsupported");
}
return ret;
}
template <typename T> struct syheev_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda,
std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(ev_scratchpad_size<T>, q, jobz, uplo, n,
lda);
#endif
}
};
template <typename T> struct syheev_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::uplo uplo, std::int64_t n, void *a,
std::int64_t lda, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
DISPATCH_FLOAT_FOR_CALCULATION(ev, q, jobz, uplo, n, a_data, lda, w_data,
device_ws_data, device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
template <typename T> struct syheevd_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(evd_scratchpad_size<T>, q, jobz, uplo, n,
lda);
}
};
template <typename T> struct syheevd_impl {
void operator()(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
using value_t = typename value_type_trait<T>::value_type;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
DISPATCH_FLOAT_FOR_CALCULATION(evd, q, jobz, uplo, n, a_data, lda, w_data,
device_ws_data, device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
#undef DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE
#undef DISPATCH_FLOAT_FOR_CALCULATION
template <typename T> struct trtri_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::diag diag, std::int64_t n, library_data_t a_type,
std::int64_t lda, std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
device_ws_size =
oneapi::mkl::lapack::trtri_scratchpad_size<T>(q, uplo, diag, n, lda);
#endif
}
};
template <typename T> struct trtri_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::diag diag, std::int64_t n, library_data_t a_type,
void *a, std::int64_t lda, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::trtri(q, uplo, diag, n, a_data, lda, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
} // namespace detail
/// Computes the size of workspace memory of getrf function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int getrf_scratchpad_size(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::getrf_scratchpad_size_impl>(
q, a_type, nullptr, "getrf_scratchpad_size", q, m, n, a_type, lda,
device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the LU factorization of a general m-by-n matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by L and U. The unit
/// diagonal elements of L are not stored.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] ipiv The pivot indices. If \p ipiv is nullptr, non-pivoting
/// LU factorization is computed.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int getrf(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
std::int64_t *ipiv, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
if (ipiv == nullptr) {
return detail::lapack_shim<detail::getrfnp_impl>(
q, a_type, info, "getrfnp_batch", q, m, n, a_type, a, lda, ipiv,
device_ws, device_ws_size_in_element_number, info);
}
return detail::lapack_shim<detail::getrf_impl>(
q, a_type, info, "getrf", q, m, n, a_type, a, lda, ipiv, device_ws,
device_ws_size_in_element_number, info);
#endif
}
/// Solves a system of linear equations with a LU-factored square coefficient
/// matrix, with multiple right-hand sides.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] trans Indicates the form of the linear equation.
/// \param [in] n The order of the matrix A and the number of rows in matrix B.
/// \param [in] nrhs The number of right hand sides.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] a The input matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] ipiv The pivot indices.
/// \param [in] b_type The data type of the matrix B.
/// \param [in, out] b The matrix B, whose columns are the right-hand sides
/// for the systems of equations.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int getrs(sycl::queue &q, oneapi::mkl::transpose trans, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, std::int64_t *ipiv, library_data_t b_type,
void *b, std::int64_t ldb, int *info) {
return detail::lapack_shim<detail::getrs_impl>(
q, a_type, info, "getrs_scratchpad_size/getrs", q, trans, n, nrhs, a_type,
a, lda, ipiv, b_type, b, ldb, info);
}
/// Computes the size of workspace memory of geqrf function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int geqrf_scratchpad_size(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::geqrf_scratchpad_size_impl>(
q, a_type, nullptr, "geqrf_scratchpad_size", q, m, n, a_type, lda,
device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the QR factorization of a general m-by-n matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by the factorization data.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] tau_type The data type of the array tau.
/// \param [in] tau The array contains scalars that define elementary reflectors
/// for the matrix Q in its decomposition in a product of elementary reflectors.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int geqrf(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t tau_type, void *tau, void *device_ws,
std::size_t device_ws_size, int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::geqrf_impl>(
q, a_type, info, "geqrf", q, m, n, a_type, a, lda, tau_type, tau,
device_ws, device_ws_size_in_element_number, info);
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobu Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] jobvt Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] u_type The data type of the matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int gesvd_scratchpad_size(sycl::queue &q, signed char jobu,
signed char jobvt, std::int64_t m,
std::int64_t n, library_data_t a_type,
std::int64_t lda, library_data_t u_type,
std::int64_t ldu, library_data_t vt_type,
std::int64_t ldvt, std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
oneapi::mkl::jobsvd jobu_enum = detail::char2jobsvd(jobu);
oneapi::mkl::jobsvd jobvt_enum = detail::char2jobsvd(jobvt);
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::gesvd_scratchpad_size_impl>(
q, a_type, nullptr, "gesvd_scratchpad_size", q, jobu_enum, jobvt_enum, m,
n, a_type, lda, u_type, ldu, vt_type, ldvt, device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::vec or job::novec
/// \param [in] all_vec Only have effects when \param jobz is job::vec.If the
/// value is zero, all m columns of U are returned in the matrix U, otherwise
/// the first min( \param m, \param n ) columns of U (the left singular vectors)
/// are returned in the matrix U.
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] u_type The data type of the matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \param a_type.
/// \param [out] host_ws_size The host workspace size as a number of elements
/// of type \param a_type. Currently the value is always zero.
inline int gesvd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
std::int64_t all_vec, std::int64_t m,
std::int64_t n, library_data_t a_type,
std::int64_t lda, library_data_t u_type,
std::int64_t ldu, library_data_t vt_type,
std::int64_t ldvt, int *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
oneapi::mkl::jobsvd jobu;
oneapi::mkl::jobsvd jobvt;
if (jobz == oneapi::mkl::job::vec) {
if (all_vec) {
jobu = jobvt = oneapi::mkl::jobsvd::somevec;
} else {
jobu = jobvt = oneapi::mkl::jobsvd::vectors;
}
} else if (jobz == oneapi::mkl::job::novec) {
jobu = jobvt = oneapi::mkl::jobsvd::novec;
} else {
throw std::runtime_error("the job type is unsupported");
}
std::size_t device_ws_size_64;
int ret = detail::lapack_shim<detail::gesvd_scratchpad_size_impl>(
q, a_type, nullptr, "gesvd_scratchpad_size", q, jobu, jobvt, m, n, a_type,
lda, u_type, ldu, vt_type, ldvt, device_ws_size_64);
*device_ws_size = device_ws_size_64;
return ret;
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobu Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] jobvt Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A and it will be overwritten according
/// to \p jobu and \p jobvt.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] s_type The data type of the matrix S.
/// \param [out] s The output matrix S.
/// \param [in] u_type The data type of the matrix U.
/// \param [out] u The output matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [out] vt The output matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int gesvd(sycl::queue &q, signed char jobu, signed char jobvt,
std::int64_t m, std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, library_data_t s_type, void *s,
library_data_t u_type, void *u, std::int64_t ldu,
library_data_t vt_type, void *vt, std::int64_t ldvt,
void *device_ws, std::size_t device_ws_size, int *info) {
oneapi::mkl::jobsvd jobu_enum = detail::char2jobsvd(jobu);
oneapi::mkl::jobsvd jobvt_enum = detail::char2jobsvd(jobvt);
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::gesvd_impl>(
q, a_type, info, "gesvd", q, jobu_enum, jobvt_enum, m, n, a_type, a, lda,
s_type, s, u_type, u, ldu, vt_type, vt, ldvt, device_ws,
device_ws_size_in_element_number, info);
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::vec or job::novec.
/// \param [in] all_vec Only have effects when \param jobz is job::vec.If the
/// value is zero, all m columns of U are returned in the matrix U, otherwise
/// the first min( \param m, \param n ) columns of U (the left singular vectors)
/// are returned in the matrix U.
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A and it will be overwritten according
/// to \p jobu and \p jobvt.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] s_type The data type of the matrix S.
/// \param [out] s The output matrix S.
/// \param [in] u_type The data type of the matrix U.
/// \param [out] u The output matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [out] vt The output matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \param a_type.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int gesvd(sycl::queue &q, oneapi::mkl::job jobz, std::int64_t all_vec,
std::int64_t m, std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, library_data_t s_type, void *s,
library_data_t u_type, void *u, std::int64_t ldu,
library_data_t vt_type, void *vt, std::int64_t ldvt,
void *device_ws, std::size_t device_ws_size, int *info) {
oneapi::mkl::jobsvd jobu;
oneapi::mkl::jobsvd jobvt;
if (jobz == oneapi::mkl::job::vec) {
if (all_vec) {
jobu = jobvt = oneapi::mkl::jobsvd::somevec;
} else {
jobu = jobvt = oneapi::mkl::jobsvd::vectors;
}
} else if (jobz == oneapi::mkl::job::novec) {
jobu = jobvt = oneapi::mkl::jobsvd::novec;
} else {
throw std::runtime_error("the job type is unsupported");
}
detail::lapack_shim<detail::gesvd_conj_impl>(
q, a_type, info, "gesvd", q, jobu, jobvt, m, n, a_type, a, lda, s_type, s,
u_type, u, ldu, vt_type, vt, ldvt, device_ws, device_ws_size, info);
return 0;
}
/// Computes the size of workspace memory of potrf function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int potrf_scratchpad_size(sycl::queue &q, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type,
std::int64_t lda, std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::potrf_scratchpad_size_impl>(
q, a_type, nullptr, "potrf_scratchpad_size", q, uplo, n, a_type, lda,
device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the Cholesky factorization of a symmetric (Hermitian)
/// positive-definite matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by the Cholesky factor U
/// or L, as specified by \p uplo.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int potrf(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
void *device_ws, std::size_t device_ws_size, int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::potrf_impl>(
q, a_type, info, "potrf", q, uplo, n, a_type, a, lda, device_ws,
device_ws_size_in_element_number, info);
}
/// Solves a system of linear equations with a Cholesky-factored symmetric
/// (Hermitian) positive-definite coefficient matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A and the number of rows in matrix B.
/// \param [in] nrhs The number of right hand sides.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by the Cholesky factor U
/// or L, as specified by \p uplo.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] b_type The data type of the matrix B.
/// \param [in, out] b The matrix B, whose columns are the right-hand sides
/// for the systems of equations.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int potrs(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, library_data_t b_type, void *b,
std::int64_t ldb, int *info) {
return detail::lapack_shim<detail::potrs_impl>(
q, a_type, info, "potrs_scratchpad_size/potrs", q, uplo, n, nrhs, a_type,
a, lda, b_type, b, ldb, info);
}
/// Computes the size of workspace memory of syevx/heevx function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int syheevx_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range,
oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, std::int64_t lda,
void *vl, void *vu, std::int64_t il,
std::int64_t iu, library_data_t w_type,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevx_scratchpad_size_impl>(
q, a_type, nullptr, "syevx_scratchpad_size/heevx_scratchpad_size", q,
compz_jobz, range, uplo, n, lda, vl, vu, il, iu,
device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes selected eigenvalues and, optionally, eigenvectors of a
/// symmetric/Hermitian matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, the lower or upper triangle is
/// overwritten.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] m The total number of eigenvalues found.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int syheevx(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *vl, void *vu, std::int64_t il,
std::int64_t iu, std::int64_t *m, library_data_t w_type,
void *w, void *device_ws, std::size_t device_ws_size,
int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
int ret = detail::lapack_shim<detail::syheevx_impl>(
q, a_type, info, "syevx/heevx", q, compz_jobz, range, uplo, n, a_type, a,
lda, vl, vu, il, iu, m, w_type, w, device_ws,
device_ws_size_in_element_number, info);
q.wait();
return ret;
}
/// Computes the size of workspace memory of syevx/heevx function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T, typename ValueT>
inline int syheevx_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range,
oneapi::mkl::uplo uplo, int n, int lda,
ValueT vl, ValueT vu, int il, int iu,
int *device_ws_size) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevx_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"syevx_scratchpad_size/heevx_scratchpad_size", q, compz_jobz, range, uplo,
n, lda, &vl, &vu, il, iu, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes selected eigenvalues and, optionally, eigenvectors of a
/// symmetric/Hermitian matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, the lower or upper triangle is
/// overwritten.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] m The total number of eigenvalues found.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syheevx(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, int n,
T *a, int lda, ValueT vl, ValueT vu, int il, int iu, int *m,
ValueT *w, T *device_ws, int device_ws_size, int *info) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::int64_t m64;
int ret = detail::lapack_shim<detail::syheevx_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "syevx/heevx", q,
compz_jobz, range, uplo, n, detail::get_library_data_t_from_type<T>(), a,
lda, &vl, &vu, il, iu, &m64,
detail::get_library_data_t_from_type<ValueT>(), w, device_ws,
device_ws_size, info);
q.wait();
*m = (int)m64;
return ret;
}
/// Computes the size of workspace memory of sygvx/hegvx function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T, typename ValueT>
inline int
syhegvx_scratchpad_size(sycl::queue &q, int itype, oneapi::mkl::job jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
int n, int lda, int ldb, ValueT vl, ValueT vu, int il,
int iu, int *device_ws_size) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syhegvx_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"sygvx_scratchpad_size/hegvx_scratchpad_size", q, itype, compz_jobz,
range, uplo, n, lda, ldb, &vl, &vu, il, iu, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes selected eigenvalues and, optionally, eigenvectors of a real
/// generalized symmetric/Hermitian definite eigenproblem.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, the lower or upper triangle is
/// overwritten.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in, out] b The input matrix B.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] m The total number of eigenvalues found.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syhegvx(sycl::queue &q, int itype, oneapi::mkl::job jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, int n,
T *a, int lda, T *b, int ldb, ValueT vl, ValueT vu, int il,
int iu, int *m, ValueT *w, T *device_ws, int device_ws_size,
int *info) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::int64_t m64;
int ret = detail::lapack_shim<detail::syhegvx_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "sygvx/hegvx", q,
itype, compz_jobz, range, uplo, n, a, lda, b, ldb, &vl, &vu, il, iu, &m64,
w, device_ws, device_ws_size, info);
q.wait();
*m = (int)m64;
return ret;
}
/// Computes the size of workspace memory of sygvd/hegvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T>
inline int syhegvd_scratchpad_size(sycl::queue &q, int itype,
oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, int lda,
int ldb, int *device_ws_size) {
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syhegvd_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"sygvd_scratchpad_size/hegvd_scratchpad_size", q, itype, jobz, uplo, n,
lda, ldb, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes all eigenvalues and, optionally, eigenvectors of a real generalized
/// symmetric/Hermitian definite eigenproblem using a divide and conquer method.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by eigenvectors.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in, out] b The input matrix B.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syhegvd(sycl::queue &q, int itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb,
ValueT *w, T *device_ws, int device_ws_size, int *info) {
return detail::lapack_shim<detail::syhegvd_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "sygvd/hegvd", q,
itype, jobz, uplo, n, a, lda, b, ldb, w, device_ws, device_ws_size, info);
}
/// Computes the size of workspace memory of syev/heev function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T>
inline int syheev_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, int lda,
int *device_ws_size) {
std::size_t device_ws_size_tmp;
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
int ret = detail::lapack_shim<detail::syheev_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"syev_scratchpad_size/heev_scratchpad_size", q, compz_jobz, uplo, n, lda,
device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes all eigenvalues and, optionally, eigenvectors of a real symmetric
/// or Hermitian matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// eigenvectors.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syheev(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo,
int n, T *a, int lda, ValueT *w, T *device_ws,
int device_ws_size, int *info) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
return detail::lapack_shim<detail::syheev_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "syev/heev", q,
compz_jobz, uplo, n, a, lda, w, device_ws, device_ws_size, info);
}
/// Computes the size of workspace memory of syevd/heevd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] device_ws_size The device workspace in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int syheevd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, std::int64_t lda,
library_data_t w_type,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevd_scratchpad_size_impl>(
q, a_type, nullptr, "syevd_scratchpad_size/heevd_scratchpad_size", q,
jobz, uplo, n, a_type, lda, device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes all eigenvalues and, optionally, all eigenvectors of a real
/// symmetric or Hermitian matrix using divide and conquer algorithm.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// eigenvectors.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int syheevd(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t w_type, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::syheevd_impl>(
q, a_type, info, "syevd/heevd", q, jobz, uplo, n, a_type, a, lda, w,
device_ws, device_ws_size_in_element_number, info);
}
/// Computes the size of workspace memory of syevd/heevd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T>
inline int syheevd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n,
std::int64_t lda, int *device_ws_size) {
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevd_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"syevd_scratchpad_size/heevd_scratchpad_size", q, jobz, uplo, n,
detail::get_library_data_t_from_type<T>(), lda, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes all eigenvalues and, optionally, all eigenvectors of a real
/// symmetric or Hermitian matrix using divide and conquer algorithm.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// eigenvectors.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syheevd(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n, T *a,
std::int64_t lda, ValueT *w, T *device_ws,
int device_ws_size, int *info) {
return detail::lapack_shim<detail::syheevd_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "syevd/heevd", q,
jobz, uplo, n, detail::get_library_data_t_from_type<T>(), a, lda, w,
device_ws, device_ws_size, info);
}
/// Computes the size of workspace memory of trtri function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] diag Must be diag::nonunit or diag::unit.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int trtri_scratchpad_size(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::diag diag, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::trtri_scratchpad_size_impl>(
q, a_type, nullptr, "trtri_scratchpad_size", q, uplo, diag, n, a_type,
lda, device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the inverse of a triangular matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] diag Must be diag::nonunit or diag::unit.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// the inverse matrix of A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int trtri(sycl::queue &q, oneapi::mkl::uplo uplo, oneapi::mkl::diag diag,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *device_ws, std::size_t device_ws_size,
int *info) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::trtri_impl>(
q, a_type, info, "trtri", q, uplo, diag, n, a_type, a, lda, device_ws,
device_ws_size_in_element_number, info);
#endif
}
} // namespace lapack
} // namespace dpct
#endif // __DPCT_LAPACK_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/fft_utils.hpp | //==---- fft_utils.hpp ----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_FFT_UTILS_HPP__
#define __DPCT_FFT_UTILS_HPP__
#include <sycl/sycl.hpp>
#include <oneapi/mkl.hpp>
#include <optional>
#include <utility>
#include "lib_common_utils.hpp"
namespace dpct {
namespace fft {
/// An enumeration type to describe the FFT direction is forward or backward.
enum fft_direction : int {
forward = 0,
backward
};
/// An enumeration type to describe the types of FFT input and output data.
enum fft_type : int {
real_float_to_complex_float = 0,
complex_float_to_real_float,
real_double_to_complex_double,
complex_double_to_real_double,
complex_float_to_complex_float,
complex_double_to_complex_double,
};
/// A class to perform FFT calculation.
class fft_engine {
public:
/// Default constructor.
fft_engine() {}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] input_type Input data type.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] output_type Output data type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, long long *n,
long long *inembed, long long istride, long long idist,
library_data_t input_type, long long *onembed, long long ostride,
long long odist, library_data_t output_type, long long batch,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
init<long long>(dim, n, inembed, istride, idist, input_type, onembed,
ostride, odist, output_type, batch,
direction_and_placement);
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] input_type Input data type.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] output_type Output data type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, int *n, int *inembed,
int istride, int idist, library_data_t input_type, int *onembed,
int ostride, int odist, library_data_t output_type, int batch,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
init<int>(dim, n, inembed, istride, idist, input_type, onembed, ostride,
odist, output_type, batch, direction_and_placement);
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, long long *n,
long long *inembed, long long istride, long long idist,
long long *onembed, long long ostride, long long odist,
fft_type type, long long batch, size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
commit(exec_queue, dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch, scratchpad_size,
direction_and_placement);
}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, int *n, int *inembed,
int istride, int idist, int *onembed, int ostride, int odist,
fft_type type, int batch, size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
commit(exec_queue, dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch, scratchpad_size,
direction_and_placement);
}
/// Commit the configuration to calculate 1-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n1 The size of the dimension of the data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int n1, fft_type type, int batch,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
_n.resize(1);
_n[0] = n1;
std::tie(_input_type, _output_type) = fft_type_to_data_type(type);
_dim = 1;
_batch = batch;
_is_basic = true;
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
config_and_commit_basic();
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate 2-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n2 The size of the 2nd dimension (outermost) of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int n2, int n1, fft_type type,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
_n.resize(2);
_n[0] = n2;
_n[1] = n1;
std::tie(_input_type, _output_type) = fft_type_to_data_type(type);
_dim = 2;
_is_basic = true;
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
config_and_commit_basic();
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate 3-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n3 The size of the 3rd dimension (outermost) of the data.
/// \param [in] n2 The size of the 2nd dimension of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int n3, int n2, int n1, fft_type type,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
_n.resize(3);
_n[0] = n3;
_n[1] = n2;
_n[2] = n1;
std::tie(_input_type, _output_type) = fft_type_to_data_type(type);
_dim = 3;
_is_basic = true;
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
config_and_commit_basic();
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Create the class for calculate 1-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n1 The size of the dimension of the data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *
create(sycl::queue *exec_queue, int n1, fft_type type, int batch,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, n1, type, batch, nullptr,
direction_and_placement);
return engine;
}
/// Create the class for calculate 2-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n2 The size of the 2nd dimension (outermost) of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *
create(sycl::queue *exec_queue, int n2, int n1, fft_type type,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, n2, n1, type, nullptr, direction_and_placement);
return engine;
}
/// Create the class for calculate 3-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n3 The size of the 3rd dimension (outermost) of the data.
/// \param [in] n2 The size of the 2nd dimension of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *
create(sycl::queue *exec_queue, int n3, int n2, int n1, fft_type type,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, n3, n2, n1, type, nullptr,
direction_and_placement);
return engine;
}
/// Create the class for calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *
create(sycl::queue *exec_queue, int dim, int *n, int *inembed, int istride,
int idist, int *onembed, int ostride, int odist, fft_type type,
int batch,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, dim, n, inembed, istride, idist, onembed,
ostride, odist, type, batch, nullptr,
direction_and_placement);
return engine;
}
/// Create the class for calculate FFT without commit any config.
static fft_engine *create() {
fft_engine *engine = new fft_engine();
return engine;
}
/// Destroy the class for calculate FFT.
/// \param [in] engine Pointer returned from fft_engine::craete.
static void destroy(fft_engine *engine) { delete engine; }
#ifdef __INTEL_MKL__
/// Estimates the workspace size for calculating n-D FFT.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by default.
static void
estimate_size(int dim, long long *n, long long *inembed, long long istride,
long long idist, long long *onembed, long long ostride,
long long odist, fft_type type, long long batch,
size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating n-D FFT.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by default.
static void
estimate_size(int dim, int *n, int *inembed, int istride, int idist,
int *onembed, int ostride, int odist, fft_type type, int batch,
size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating 1-D FFT.
/// \param [in] n1 The size of the dimension of the data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If it is not set, forward direction(if current FFT is
/// complex-to-complex) and out-of-place (false) are set by default.
static void
estimate_size(int n1, fft_type type, int batch,
size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), n1, type, batch,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating 2-D FFT.
/// \param [in] n2 The size of the 2nd dimension (outermost) of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by default.
static void
estimate_size(int n2, int n1, fft_type type,
size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), n2, n1, type,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating 3-D FFT.
/// \param [in] n3 The size of the 3rd dimension (outermost) of the data.
/// \param [in] n2 The size of the 2nd dimension of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by default.
static void
estimate_size(int n3, int n2, int n1, fft_type type,
size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), n3, n2, n1, type,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
#endif
/// Execute the FFT calculation.
/// \param [in] input Pointer to the input data.
/// \param [out] output Pointer to the output data.
/// \param [in] direction The FFT direction.
template <typename input_t, typename output_t>
void compute(input_t *input, output_t *output, fft_direction direction) {
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
compute_complex<float, oneapi::mkl::dft::precision::SINGLE>(
(float *)input, (float *)output, direction);
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
compute_complex<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output, direction);
} else if (_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
} else if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
} else if (_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output);
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output);
}
}
template <>
void compute(float *input, sycl::float2 *output, fft_direction direction) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
}
template <>
void compute(sycl::float2 *input, float *output, fft_direction direction) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
}
template <>
void compute(double *input, sycl::double2 *output, fft_direction direction) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>((double *)input,
(double *)output);
}
template <>
void compute(sycl::double2 *input, double *output, fft_direction direction) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>((double *)input,
(double *)output);
}
template <>
void compute(sycl::float2 *input, sycl::float2 *output,
fft_direction direction) {
compute_complex<float, oneapi::mkl::dft::precision::SINGLE>(
(float *)input, (float *)output, direction);
}
template <>
void compute(sycl::double2 *input, sycl::double2 *output,
fft_direction direction) {
compute_complex<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output, direction);
}
/// Setting the user's SYCL queue for calculation.
/// \param [in] q Pointer to the SYCL queue.
void set_queue(sycl::queue *q) { _q = q; }
#ifdef __INTEL_MKL__
/// Setting whether to use external or internal workspace.
/// \param [in] flag True means using internal workspace. False means using
/// external workspace.
void use_internal_workspace(bool flag = true) {
_use_external_workspace = !flag;
}
/// Specify the external workspace.
/// \param [in] ptr Pointer to the workspace.
void set_workspace(void *ptr) {
if (!_use_external_workspace) {
return;
}
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<float *>(ptr));
_desc_sc->set_workspace(data);
}
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<double *>(ptr));
_desc_dc->set_workspace(data);
}
} else if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float)) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<float *>(ptr));
_desc_sr->set_workspace(data);
}
} else if ((_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) ||
(_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double)) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<double *>(ptr));
_desc_dr->set_workspace(data);
}
} else {
throw sycl::exception(sycl::make_error_code(sycl::errc::invalid),
"invalid fft type");
}
}
#endif
/// Get the workspace size.
/// \param [out] scratchpad_size Workspace size in bytes.
void get_workspace_size(size_t *scratchpad_size) {
if (scratchpad_size) {
*scratchpad_size = _workspace_bytes;
}
}
private:
static std::pair<library_data_t, library_data_t>
fft_type_to_data_type(fft_type type) {
switch (type) {
case fft_type::real_float_to_complex_float: {
return std::make_pair(library_data_t::real_float,
library_data_t::complex_float);
}
case fft_type::complex_float_to_real_float: {
return std::make_pair(library_data_t::complex_float,
library_data_t::real_float);
}
case fft_type::real_double_to_complex_double: {
return std::make_pair(library_data_t::real_double,
library_data_t::complex_double);
}
case fft_type::complex_double_to_real_double: {
return std::make_pair(library_data_t::complex_double,
library_data_t::real_double);
}
case fft_type::complex_float_to_complex_float: {
return std::make_pair(library_data_t::complex_float,
library_data_t::complex_float);
}
case fft_type::complex_double_to_complex_double: {
return std::make_pair(library_data_t::complex_double,
library_data_t::complex_double);
}
}
}
void config_and_commit_basic() {
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
_desc_sc = std::make_shared<
oneapi::mkl::dft::descriptor<oneapi::mkl::dft::precision::SINGLE,
oneapi::mkl::dft::domain::COMPLEX>>(_n);
std::int64_t distance = 1;
for (auto i : _n)
distance = distance * i;
_fwd_dist = distance;
_bwd_dist = distance;
_desc_sc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE,
distance);
_desc_sc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE,
distance);
_desc_sc->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
else
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_sc->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_sc->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_sc->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_sc->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
else
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
_desc_sc->commit(*_q);
#endif
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
_desc_dc = std::make_shared<
oneapi::mkl::dft::descriptor<oneapi::mkl::dft::precision::DOUBLE,
oneapi::mkl::dft::domain::COMPLEX>>(_n);
std::int64_t distance = 1;
for (auto i : _n)
distance = distance * i;
_fwd_dist = distance;
_bwd_dist = distance;
_desc_dc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE,
distance);
_desc_dc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE,
distance);
_desc_dc->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
else
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_dc->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_dc->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_dc->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_dc->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
else
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
_desc_dc->commit(*_q);
#endif
} else if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float)) {
_desc_sr = std::make_shared<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::REAL>>(
_n);
if (_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float)
_direction = fft_direction::forward;
else
_direction = fft_direction::backward;
_desc_sr->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
set_stride_and_distance_basic<true>(_desc_sr);
} else {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_sr);
}
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_sr->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_sr->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_sr->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_sr->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
set_stride_and_distance_basic<true>(_desc_sr);
} else {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_sr);
}
_desc_sr->commit(*_q);
#endif
} else if ((_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) ||
(_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double)) {
_desc_dr = std::make_shared<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::REAL>>(
_n);
if (_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double)
_direction = fft_direction::forward;
else
_direction = fft_direction::backward;
_desc_dr->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
set_stride_and_distance_basic<true>(_desc_dr);
} else {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_dr);
}
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_dr->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_dr->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_dr->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_dr->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
set_stride_and_distance_basic<true>(_desc_dr);
} else {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_dr);
}
_desc_dr->commit(*_q);
#endif
} else {
throw sycl::exception(sycl::make_error_code(sycl::errc::invalid),
"invalid fft type");
}
}
void config_and_commit_advanced() {
#ifdef __INTEL_MKL__
#define CONFIG_AND_COMMIT(DESC, PREC, DOM, TYPE) \
{ \
DESC = std::make_shared<oneapi::mkl::dft::descriptor< \
oneapi::mkl::dft::precision::PREC, oneapi::mkl::dft::domain::DOM>>( \
_n); \
set_stride_advanced(DESC); \
DESC->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _fwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _bwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, \
_batch); \
if (_is_user_specified_dir_and_placement && _is_inplace) \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
DFTI_CONFIG_VALUE::DFTI_INPLACE); \
else \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); \
if (_use_external_workspace) { \
DESC->set_value(oneapi::mkl::dft::config_param::WORKSPACE, \
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL); \
} \
if (_is_estimate_call) { \
if (_q->get_device().is_gpu()) { \
DESC->get_value( \
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES, \
&_workspace_estimate_bytes); \
} \
} else { \
DESC->commit(*_q); \
if (_is_estimate_call) { \
DESC->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES, \
&_workspace_bytes); \
} \
} \
}
#else
#define CONFIG_AND_COMMIT(DESC, PREC, DOM, TYPE) \
{ \
DESC = std::make_shared<oneapi::mkl::dft::descriptor< \
oneapi::mkl::dft::precision::PREC, oneapi::mkl::dft::domain::DOM>>( \
_n); \
set_stride_advanced(DESC); \
DESC->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _fwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _bwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, \
_batch); \
if (_is_user_specified_dir_and_placement && _is_inplace) \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
oneapi::mkl::dft::config_value::INPLACE); \
else \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
oneapi::mkl::dft::config_value::NOT_INPLACE); \
DESC->commit(*_q); \
}
#endif
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
CONFIG_AND_COMMIT(_desc_sc, SINGLE, COMPLEX, float);
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
CONFIG_AND_COMMIT(_desc_dc, DOUBLE, COMPLEX, double);
} else if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float)) {
CONFIG_AND_COMMIT(_desc_sr, SINGLE, REAL, float);
} else if ((_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) ||
(_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double)) {
CONFIG_AND_COMMIT(_desc_dr, DOUBLE, REAL, double);
} else {
throw sycl::exception(sycl::make_error_code(sycl::errc::invalid),
"invalid fft type");
}
#undef CONFIG_AND_COMMIT
}
template <typename T>
void init(int dim, T *n, T *inembed, T istride, T idist,
library_data_t input_type, T *onembed, T ostride, T odist,
library_data_t output_type, T batch,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement) {
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
_n.resize(dim);
_inembed.resize(dim);
_onembed.resize(dim);
_input_type = input_type;
_output_type = output_type;
for (int i = 0; i < dim; i++) {
_n[i] = n[i];
}
if (inembed && onembed) {
for (int i = 0; i < dim; i++) {
_inembed[i] = inembed[i];
_onembed[i] = onembed[i];
}
_istride = istride;
_ostride = ostride;
if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double)) {
_fwd_dist = idist;
_bwd_dist = odist;
} else if ((_output_type == library_data_t::real_float &&
_input_type == library_data_t::complex_float) ||
(_output_type == library_data_t::real_double &&
_input_type == library_data_t::complex_double)) {
_fwd_dist = odist;
_bwd_dist = idist;
} else {
if (_is_user_specified_dir_and_placement &&
(_direction == fft_direction::backward)) {
_fwd_dist = odist;
_bwd_dist = idist;
} else {
_fwd_dist = idist;
_bwd_dist = odist;
}
}
} else {
_is_basic = true;
}
_batch = batch;
_dim = dim;
if (_is_basic)
config_and_commit_basic();
else
config_and_commit_advanced();
}
template <class Desc_t>
void set_stride_advanced(std::shared_ptr<Desc_t> desc) {
if (_dim == 1) {
std::int64_t input_stride[2] = {0, _istride};
std::int64_t output_stride[2] = {0, _ostride};
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES,
input_stride);
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES,
output_stride);
} else if (_dim == 2) {
std::int64_t input_stride[3] = {0, _inembed[1] * _istride, _istride};
std::int64_t output_stride[3] = {0, _onembed[1] * _ostride, _ostride};
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES,
input_stride);
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES,
output_stride);
} else if (_dim == 3) {
std::int64_t input_stride[4] = {0, _inembed[2] * _inembed[1] * _istride,
_inembed[2] * _istride, _istride};
std::int64_t output_stride[4] = {0, _onembed[2] * _onembed[1] * _ostride,
_onembed[2] * _ostride, _ostride};
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES,
input_stride);
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES,
output_stride);
}
}
template <class Desc_t> void swap_distance(std::shared_ptr<Desc_t> desc) {
desc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _bwd_dist);
desc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _fwd_dist);
std::int64_t temp = _bwd_dist;
_bwd_dist = _fwd_dist;
_fwd_dist = temp;
}
template <bool Is_inplace, class Desc_t>
void set_stride_and_distance_basic(std::shared_ptr<Desc_t> desc) {
std::int64_t forward_distance = 0;
std::int64_t backward_distance = 0;
#define SET_STRIDE \
{ \
if (_direction == fft_direction::forward) { \
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES, \
real_stride); \
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES, \
complex_stride); \
} else { \
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES, \
complex_stride); \
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES, \
real_stride); \
} \
}
if (_dim == 1) {
if constexpr (Is_inplace) {
std::int64_t real_stride[2] = {0, 1};
std::int64_t complex_stride[2] = {0, 1};
SET_STRIDE;
forward_distance = 2 * (_n[0] / 2 + 1);
backward_distance = _n[0] / 2 + 1;
} else {
std::int64_t real_stride[2] = {0, 1};
std::int64_t complex_stride[2] = {0, 1};
SET_STRIDE;
forward_distance = _n[0];
backward_distance = _n[0] / 2 + 1;
}
} else if (_dim == 2) {
if constexpr (Is_inplace) {
std::int64_t complex_stride[3] = {0, _n[1] / 2 + 1, 1};
std::int64_t real_stride[3] = {0, 2 * (_n[1] / 2 + 1), 1};
SET_STRIDE;
forward_distance = _n[0] * 2 * (_n[1] / 2 + 1);
backward_distance = _n[0] * (_n[1] / 2 + 1);
} else {
std::int64_t complex_stride[3] = {0, _n[1] / 2 + 1, 1};
std::int64_t real_stride[3] = {0, _n[1], 1};
SET_STRIDE;
forward_distance = _n[0] * _n[1];
backward_distance = _n[0] * (_n[1] / 2 + 1);
}
} else if (_dim == 3) {
if constexpr (Is_inplace) {
std::int64_t complex_stride[4] = {0, _n[1] * (_n[2] / 2 + 1),
_n[2] / 2 + 1, 1};
std::int64_t real_stride[4] = {0, _n[1] * 2 * (_n[2] / 2 + 1),
2 * (_n[2] / 2 + 1), 1};
SET_STRIDE;
forward_distance = _n[0] * _n[1] * 2 * (_n[2] / 2 + 1);
backward_distance = _n[0] * _n[1] * (_n[2] / 2 + 1);
} else {
std::int64_t complex_stride[4] = {0, _n[1] * (_n[2] / 2 + 1),
_n[2] / 2 + 1, 1};
std::int64_t real_stride[4] = {0, _n[1] * _n[2], _n[2], 1};
SET_STRIDE;
forward_distance = _n[0] * _n[1] * _n[2];
backward_distance = _n[0] * _n[1] * (_n[2] / 2 + 1);
}
}
#undef SET_STRIDE
desc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE,
forward_distance);
desc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE,
backward_distance);
}
#define COMPUTE(DESC) \
{ \
if (_is_inplace) { \
auto data_input = \
dpct::detail::get_memory(reinterpret_cast<T *>(input)); \
if (_direction == fft_direction::forward) { \
oneapi::mkl::dft::compute_forward(*DESC, data_input); \
} else { \
oneapi::mkl::dft::compute_backward(*DESC, data_input); \
} \
} else { \
auto data_input = \
dpct::detail::get_memory(reinterpret_cast<T *>(input)); \
auto data_output = \
dpct::detail::get_memory(reinterpret_cast<T *>(output)); \
if (_direction == fft_direction::forward) { \
oneapi::mkl::dft::compute_forward(*DESC, data_input, data_output); \
} else { \
oneapi::mkl::dft::compute_backward(*DESC, data_input, data_output); \
} \
} \
}
template <class T, oneapi::mkl::dft::precision Precision>
void compute_complex(T *input, T *output, fft_direction direction) {
bool is_this_compute_inplace = input == output;
if (!_is_user_specified_dir_and_placement) {
// The complex domain descriptor need different config values if the
// FFT direction or placement is different.
// Here we check the conditions, and new config values are set and
// re-committed if needed.
if (direction != _direction || is_this_compute_inplace != _is_inplace) {
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
if (direction != _direction) {
swap_distance(_desc_sc);
_direction = direction;
}
if (is_this_compute_inplace != _is_inplace) {
_is_inplace = is_this_compute_inplace;
#ifdef __INTEL_MKL__
if (_is_inplace) {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
} else {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
}
#else
if (_is_inplace) {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
} else {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
}
#endif
}
_desc_sc->commit(*_q);
} else {
if (direction != _direction) {
swap_distance(_desc_dc);
_direction = direction;
}
if (is_this_compute_inplace != _is_inplace) {
_is_inplace = is_this_compute_inplace;
#ifdef __INTEL_MKL__
if (_is_inplace) {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
} else {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
}
#else
if (_is_inplace) {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
} else {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
}
#endif
}
_desc_dc->commit(*_q);
}
}
}
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
COMPUTE(_desc_sc);
} else {
COMPUTE(_desc_dc);
}
}
template <class T, oneapi::mkl::dft::precision Precision>
void compute_real(T *input, T *output) {
bool is_this_compute_inplace = input == output;
if (!_is_user_specified_dir_and_placement) {
// The real domain descriptor need different config values if the
// FFT placement is different.
// Here we check the condition, and new config values are set and
// re-committed if needed.
if (is_this_compute_inplace != _is_inplace) {
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
_is_inplace = is_this_compute_inplace;
if (_is_inplace) {
#ifdef __INTEL_MKL__
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
#else
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
#endif
if (_is_basic)
set_stride_and_distance_basic<true>(_desc_sr);
} else {
#ifdef __INTEL_MKL__
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
#else
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
#endif
if (_is_basic)
set_stride_and_distance_basic<false>(_desc_sr);
}
_desc_sr->commit(*_q);
} else {
_is_inplace = is_this_compute_inplace;
if (_is_inplace) {
#ifdef __INTEL_MKL__
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
#else
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
#endif
if (_is_basic)
set_stride_and_distance_basic<true>(_desc_dr);
} else {
#ifdef __INTEL_MKL__
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
#else
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
#endif
if (_is_basic)
set_stride_and_distance_basic<false>(_desc_dr);
}
_desc_dr->commit(*_q);
}
}
}
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
COMPUTE(_desc_sr);
} else {
COMPUTE(_desc_dr);
}
}
#undef COMPUTE
private:
sycl::queue *_q = nullptr;
int _dim;
std::vector<std::int64_t> _n;
std::vector<std::int64_t> _inembed;
std::int64_t _istride;
std::int64_t _fwd_dist;
library_data_t _input_type;
std::vector<std::int64_t> _onembed;
std::int64_t _ostride;
std::int64_t _bwd_dist;
library_data_t _output_type;
std::int64_t _batch = 1;
bool _is_basic = false;
bool _is_inplace = false;
fft_direction _direction = fft_direction::forward;
bool _is_user_specified_dir_and_placement = false;
bool _use_external_workspace = false;
void *_external_workspace_ptr = nullptr;
size_t _workspace_bytes = 0;
bool _is_estimate_call = false;
size_t _workspace_estimate_bytes = 0;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::REAL>>
_desc_sr;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::REAL>>
_desc_dr;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::COMPLEX>>
_desc_sc;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::COMPLEX>>
_desc_dc;
};
using fft_engine_ptr = fft_engine *;
} // namespace fft
} // namespace dpct
#endif // __DPCT_FFT_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/lib_common_utils.hpp | //==---- lib_common_utils.hpp ---------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_LIB_COMMON_UTILS_HPP__
#define __DPCT_LIB_COMMON_UTILS_HPP__
#include <sycl/sycl.hpp>
#include <oneapi/mkl.hpp>
#include "memory.hpp"
#include "util.hpp"
namespace dpct {
namespace detail {
template <typename T> inline auto get_memory(T *x) {
#ifdef DPCT_USM_LEVEL_NONE
return dpct::get_buffer<std::remove_cv_t<T>>(x);
#else
return x;
#endif
}
template <typename T>
inline typename DataType<T>::T2 get_value(const T *s, sycl::queue &q) {
using Ty = typename DataType<T>::T2;
Ty s_h;
detail::dpct_memcpy(q, (void *)&s_h, (void *)s, sizeof(T), automatic).wait();
return s_h;
}
}
enum class version_field : int {
major,
minor,
update,
patch
};
/// Returns the requested field of Intel(R) oneAPI Math Kernel Library version.
/// \param field The version information field (major, minor, update or patch).
/// \param result The result value.
inline void mkl_get_version(version_field field, int *result) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
MKLVersion version;
mkl_get_version(&version);
if (version_field::major == field) {
*result = version.MajorVersion;
} else if (version_field::minor == field) {
*result = version.MinorVersion;
} else if (version_field::update == field) {
*result = version.UpdateVersion;
} else if (version_field::patch == field) {
*result = 0;
} else {
throw std::runtime_error("unknown field");
}
#endif
}
enum class library_data_t : unsigned char {
real_float = 0,
complex_float,
real_double,
complex_double,
real_half,
complex_half,
real_bfloat16,
complex_bfloat16,
real_int4,
complex_int4,
real_uint4,
complex_uint4,
real_int8,
complex_int8,
real_uint8,
complex_uint8,
real_int16,
complex_int16,
real_uint16,
complex_uint16,
real_int32,
complex_int32,
real_uint32,
complex_uint32,
real_int64,
complex_int64,
real_uint64,
complex_uint64,
real_int8_4,
real_int8_32,
real_uint8_4,
library_data_t_size
};
namespace detail {
template <typename ArgT>
inline constexpr std::uint64_t get_type_combination_id(ArgT Val) {
static_assert((unsigned char)library_data_t::library_data_t_size <=
std::numeric_limits<unsigned char>::max() &&
"library_data_t size exceeds limit.");
static_assert(std::is_same_v<ArgT, library_data_t>, "Unsupported ArgT");
return (std::uint64_t)Val;
}
template <typename FirstT, typename... RestT>
inline constexpr std::uint64_t get_type_combination_id(FirstT FirstVal,
RestT... RestVal) {
static_assert((std::uint8_t)library_data_t::library_data_t_size <=
std::numeric_limits<unsigned char>::max() &&
"library_data_t size exceeds limit.");
static_assert(sizeof...(RestT) <= 8 && "Too many parameters");
static_assert(std::is_same_v<FirstT, library_data_t>, "Unsupported FirstT");
return get_type_combination_id(RestVal...) << 8 | ((std::uint64_t)FirstVal);
}
inline constexpr std::size_t library_data_size[] = {
8 * sizeof(float), // real_float
8 * sizeof(std::complex<float>), // complex_float
8 * sizeof(double), // real_double
8 * sizeof(std::complex<double>), // complex_double
8 * sizeof(sycl::half), // real_half
8 * sizeof(std::complex<sycl::half>), // complex_half
16, // real_bfloat16
16 * 2, // complex_bfloat16
4, // real_int4
4 * 2, // complex_int4
4, // real_uint4
4 * 2, // complex_uint4
8, // real_int8
8 * 2, // complex_int8
8, // real_uint8
8 * 2, // complex_uint8
16, // real_int16
16 * 2, // complex_int16
16, // real_uint16
16 * 2, // complex_uint16
32, // real_int32
32 * 2, // complex_int32
32, // real_uint32
32 * 2, // complex_uint32
64, // real_int64
64 * 2, // complex_int64
64, // real_uint64
64 * 2, // complex_uint64
8, // real_int8_4
8, // real_int8_32
8 // real_uint8_4
};
} // namespace detail
} // namespace dpct
#endif // __DPCT_LIB_COMMON_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/sparse_utils.hpp | //==---- sparse_utils.hpp -------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_SPARSE_UTILS_HPP__
#define __DPCT_SPARSE_UTILS_HPP__
#include "lib_common_utils.hpp"
#include <oneapi/mkl.hpp>
#include <sycl/sycl.hpp>
namespace dpct {
namespace sparse {
/// Describes properties of a sparse matrix.
/// The properties are matrix type, diag, uplo and index base.
class matrix_info {
public:
/// Matrix types are:
/// ge: General matrix
/// sy: Symmetric matrix
/// he: Hermitian matrix
/// tr: Triangular matrix
enum class matrix_type : int { ge = 0, sy, he, tr };
auto get_matrix_type() const { return _matrix_type; }
auto get_diag() const { return _diag; }
auto get_uplo() const { return _uplo; }
auto get_index_base() const { return _index_base; }
void set_matrix_type(matrix_type mt) { _matrix_type = mt; }
void set_diag(oneapi::mkl::diag d) { _diag = d; }
void set_uplo(oneapi::mkl::uplo u) { _uplo = u; }
void set_index_base(oneapi::mkl::index_base ib) { _index_base = ib; }
private:
matrix_type _matrix_type = matrix_type::ge;
oneapi::mkl::diag _diag = oneapi::mkl::diag::nonunit;
oneapi::mkl::uplo _uplo = oneapi::mkl::uplo::upper;
oneapi::mkl::index_base _index_base = oneapi::mkl::index_base::zero;
};
/// Computes a CSR format sparse matrix-dense vector product.
/// y = alpha * op(A) * x + beta * y
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans The operation applied to the matrix A.
/// \param [in] num_rows Number of rows of the matrix A.
/// \param [in] num_cols Number of columns of the matrix A.
/// \param [in] alpha Scaling factor for the matrix A.
/// \param [in] info Matrix info of the matrix A.
/// \param [in] val An array containing the non-zero elements of the matrix A.
/// \param [in] row_ptr An array of length \p num_rows + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] x Data of the vector x.
/// \param [in] beta Scaling factor for the vector x.
/// \param [in, out] y Data of the vector y.
template <typename T>
void csrmv(sycl::queue &queue, oneapi::mkl::transpose trans, int num_rows,
int num_cols, const T *alpha,
const std::shared_ptr<matrix_info> info, const T *val,
const int *row_ptr, const int *col_ind, const T *x, const T *beta,
T *y) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using Ty = typename dpct::DataType<T>::T2;
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
oneapi::mkl::sparse::matrix_handle_t *sparse_matrix_handle =
new oneapi::mkl::sparse::matrix_handle_t;
oneapi::mkl::sparse::init_matrix_handle(sparse_matrix_handle);
auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr));
auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind));
auto data_val =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val)));
oneapi::mkl::sparse::set_csr_data(queue, *sparse_matrix_handle, num_rows,
num_cols, info->get_index_base(),
data_row_ptr, data_col_ind, data_val);
auto data_x =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(x)));
auto data_y = dpct::detail::get_memory(reinterpret_cast<Ty *>(y));
switch (info->get_matrix_type()) {
case matrix_info::matrix_type::ge: {
oneapi::mkl::sparse::optimize_gemv(queue, trans, *sparse_matrix_handle);
oneapi::mkl::sparse::gemv(queue, trans, alpha_value, *sparse_matrix_handle,
data_x, beta_value, data_y);
break;
}
case matrix_info::matrix_type::sy: {
oneapi::mkl::sparse::symv(queue, info->get_uplo(), alpha_value,
*sparse_matrix_handle, data_x, beta_value,
data_y);
break;
}
case matrix_info::matrix_type::tr: {
oneapi::mkl::sparse::optimize_trmv(queue, info->get_uplo(), trans,
info->get_diag(), *sparse_matrix_handle);
oneapi::mkl::sparse::trmv(queue, info->get_uplo(), trans, info->get_diag(),
alpha_value, *sparse_matrix_handle, data_x,
beta_value, data_y);
break;
}
default:
throw std::runtime_error(
"the spmv does not support matrix_info::matrix_type::he");
}
sycl::event e =
oneapi::mkl::sparse::release_matrix_handle(queue, sparse_matrix_handle);
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { delete sparse_matrix_handle; });
});
#endif
}
/// Computes a CSR format sparse matrix-dense matrix product.
/// C = alpha * op(A) * B + beta * C
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans The operation applied to the matrix A.
/// \param [in] sparse_rows Number of rows of the matrix A.
/// \param [in] dense_cols Number of columns of the matrix B or C.
/// \param [in] sparse_cols Number of columns of the matrix A.
/// \param [in] alpha Scaling factor for the matrix A.
/// \param [in] info Matrix info of the matrix A.
/// \param [in] val An array containing the non-zero elements of the matrix A.
/// \param [in] row_ptr An array of length \p num_rows + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] b Data of the matrix B.
/// \param [in] ldb Leading dimension of the matrix B.
/// \param [in] beta Scaling factor for the matrix B.
/// \param [in, out] c Data of the matrix C.
/// \param [in] ldc Leading dimension of the matrix C.
template <typename T>
void csrmm(sycl::queue &queue, oneapi::mkl::transpose trans, int sparse_rows,
int dense_cols, int sparse_cols, const T *alpha,
const std::shared_ptr<matrix_info> info, const T *val,
const int *row_ptr, const int *col_ind, const T *b, int ldb,
const T *beta, T *c, int ldc) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using Ty = typename dpct::DataType<T>::T2;
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
oneapi::mkl::sparse::matrix_handle_t *sparse_matrix_handle =
new oneapi::mkl::sparse::matrix_handle_t;
oneapi::mkl::sparse::init_matrix_handle(sparse_matrix_handle);
auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr));
auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind));
auto data_val =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val)));
oneapi::mkl::sparse::set_csr_data(queue, *sparse_matrix_handle, sparse_rows,
sparse_cols, info->get_index_base(),
data_row_ptr, data_col_ind, data_val);
auto data_b =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(b)));
auto data_c = dpct::detail::get_memory(reinterpret_cast<Ty *>(c));
switch (info->get_matrix_type()) {
case matrix_info::matrix_type::ge: {
oneapi::mkl::sparse::gemm(queue, oneapi::mkl::layout::row_major, trans,
oneapi::mkl::transpose::nontrans, alpha_value,
*sparse_matrix_handle, data_b, dense_cols, ldb,
beta_value, data_c, ldc);
break;
}
default:
throw std::runtime_error(
"the csrmm does not support matrix_info::matrix_type::sy, "
"matrix_info::matrix_type::tr and matrix_info::matrix_type::he");
}
sycl::event e =
oneapi::mkl::sparse::release_matrix_handle(queue, sparse_matrix_handle);
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { delete sparse_matrix_handle; });
});
#endif
}
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
/// Saving the optimization information for solving a system of linear
/// equations.
class optimize_info {
public:
/// Constructor
optimize_info() { oneapi::mkl::sparse::init_matrix_handle(&_matrix_handle); }
/// Destructor
~optimize_info() {
oneapi::mkl::sparse::release_matrix_handle(get_default_queue(),
&_matrix_handle, _deps)
.wait();
}
/// Add dependency for the destructor.
/// \param [in] e The event which the destructor depends on.
void add_dependency(sycl::event e) { _deps.push_back(e); }
/// Get the internal saved matrix handle.
/// \return Returns the matrix handle.
oneapi::mkl::sparse::matrix_handle_t get_matrix_handle() const noexcept {
return _matrix_handle;
}
private:
oneapi::mkl::sparse::matrix_handle_t _matrix_handle = nullptr;
std::vector<sycl::event> _deps;
};
#endif
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
/// Performs internal optimizations for solving a system of linear equations for
/// a CSR format sparse matrix.
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans The operation applied to the sparse matrix.
/// \param [in] row_col Number of rows of the sparse matrix.
/// \param [in] info Matrix info of the sparse matrix.
/// \param [in] val An array containing the non-zero elements of the sparse matrix.
/// \param [in] row_ptr An array of length \p num_rows + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [out] optimize_info The result of the optimizations.
template <typename T>
void optimize_csrsv(sycl::queue &queue, oneapi::mkl::transpose trans,
int row_col, const std::shared_ptr<matrix_info> info,
const T *val, const int *row_ptr, const int *col_ind,
std::shared_ptr<optimize_info> optimize_info) {
using Ty = typename dpct::DataType<T>::T2;
auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr));
auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind));
auto data_val =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val)));
oneapi::mkl::sparse::set_csr_data(queue, optimize_info->get_matrix_handle(),
row_col, row_col, info->get_index_base(),
data_row_ptr, data_col_ind, data_val);
if (info->get_matrix_type() != matrix_info::matrix_type::tr)
return;
#ifndef DPCT_USM_LEVEL_NONE
sycl::event e;
e =
#endif
oneapi::mkl::sparse::optimize_trsv(queue, info->get_uplo(), trans,
info->get_diag(),
optimize_info->get_matrix_handle());
#ifndef DPCT_USM_LEVEL_NONE
optimize_info->add_dependency(e);
#endif
}
#endif
class sparse_matrix_desc;
using sparse_matrix_desc_t = std::shared_ptr<sparse_matrix_desc>;
/// Structure for describe a dense vector
class dense_vector_desc {
public:
dense_vector_desc(std::int64_t ele_num, void *value,
library_data_t value_type)
: _ele_num(ele_num), _value(value), _value_type(value_type) {}
void get_desc(std::int64_t *ele_num, const void **value,
library_data_t *value_type) const noexcept {
*ele_num = _ele_num;
*value = _value;
*value_type = _value_type;
}
void get_desc(std::int64_t *ele_num, void **value,
library_data_t *value_type) const noexcept {
get_desc(ele_num, const_cast<const void **>(value), value_type);
}
void *get_value() const noexcept { return _value; }
void set_value(void *value) { _value = value; }
private:
std::int64_t _ele_num;
void *_value;
library_data_t _value_type;
};
/// Structure for describe a dense matrix
class dense_matrix_desc {
public:
dense_matrix_desc(std::int64_t row_num, std::int64_t col_num,
std::int64_t leading_dim, void *value,
library_data_t value_type, oneapi::mkl::layout layout)
: _row_num(row_num), _col_num(col_num), _leading_dim(leading_dim),
_value(value), _value_type(value_type), _layout(layout) {}
void get_desc(std::int64_t *row_num, std::int64_t *col_num,
std::int64_t *leading_dim, void **value,
library_data_t *value_type,
oneapi::mkl::layout *layout) const noexcept {
*row_num = _row_num;
*col_num = _col_num;
*leading_dim = _leading_dim;
*value = _value;
*value_type = _value_type;
*layout = _layout;
}
void *get_value() const noexcept { return _value; }
void set_value(void *value) { _value = value; }
std::int64_t get_col_num() const noexcept { return _col_num; }
std::int64_t get_leading_dim() const noexcept { return _leading_dim; }
oneapi::mkl::layout get_layout() const noexcept { return _layout; }
private:
std::int64_t _row_num;
std::int64_t _col_num;
std::int64_t _leading_dim;
void *_value;
library_data_t _value_type;
oneapi::mkl::layout _layout;
};
/// Sparse matrix data format
enum matrix_format : int {
csr = 1,
};
/// Sparse matrix attribute
enum matrix_attribute : int { uplo = 0, diag };
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
/// Structure for describe a sparse matrix
class sparse_matrix_desc {
public:
/// Constructor
/// \param [out] desc The descriptor to be created
/// \param [in] row_num Number of rows of the sparse matrix.
/// \param [in] col_num Number of colums of the sparse matrix.
/// \param [in] nnz Non-zero elements in the sparse matrix.
/// \param [in] row_ptr An array of length \p row_num + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] value An array containing the non-zero elements of the sparse matrix.
/// \param [in] row_ptr_type Data type of the \p row_ptr .
/// \param [in] col_ind_type Data type of the \p col_ind .
/// \param [in] base Indicates how input arrays are indexed.
/// \param [in] value_type Data type of the \p value .
/// \param [in] data_format The matrix data format.
sparse_matrix_desc(std::int64_t row_num, std::int64_t col_num,
std::int64_t nnz, void *row_ptr, void *col_ind,
void *value, library_data_t row_ptr_type,
library_data_t col_ind_type, oneapi::mkl::index_base base,
library_data_t value_type, matrix_format data_format)
: _row_num(row_num), _col_num(col_num), _nnz(nnz), _row_ptr(row_ptr),
_col_ind(col_ind), _value(value), _row_ptr_type(row_ptr_type),
_col_ind_type(col_ind_type), _base(base), _value_type(value_type),
_data_format(data_format) {
if (_data_format != matrix_format::csr) {
throw std::runtime_error("the sparse matrix data format is unsupported");
}
oneapi::mkl::sparse::init_matrix_handle(&_matrix_handle);
construct();
}
/// Destructor
~sparse_matrix_desc() {
oneapi::mkl::sparse::release_matrix_handle(get_default_queue(),
&_matrix_handle, _deps)
.wait();
}
/// Add dependency for the destroy method.
/// \param [in] e The event which the destroy method depends on.
void add_dependency(sycl::event e) { _deps.push_back(e); }
/// Get the internal saved matrix handle.
/// \return Returns the matrix handle.
oneapi::mkl::sparse::matrix_handle_t get_matrix_handle() const noexcept {
return _matrix_handle;
}
/// Get the values saved in the descriptor
/// \param [out] row_num Number of rows of the sparse matrix.
/// \param [out] col_num Number of colums of the sparse matrix.
/// \param [out] nnz Non-zero elements in the sparse matrix.
/// \param [out] row_ptr An array of length \p row_num + 1.
/// \param [out] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [out] value An array containing the non-zero elements of the sparse matrix.
/// \param [out] row_ptr_type Data type of the \p row_ptr .
/// \param [out] col_ind_type Data type of the \p col_ind .
/// \param [out] base Indicates how input arrays are indexed.
/// \param [out] value_type Data type of the \p value .
void get_desc(int64_t *row_num, int64_t *col_num, int64_t *nnz,
void **row_ptr, void **col_ind, void **value,
library_data_t *row_ptr_type, library_data_t *col_ind_type,
oneapi::mkl::index_base *base,
library_data_t *value_type) const noexcept {
*row_num = _row_num;
*col_num = _col_num;
*nnz = _nnz;
*row_ptr = _row_ptr;
*col_ind = _col_ind;
*value = _value;
*row_ptr_type = _row_ptr_type;
*col_ind_type = _col_ind_type;
*base = _base;
*value_type = _value_type;
}
/// Get the sparse matrix data format of this descriptor
/// \param [out] format The matrix data format result
void get_format(matrix_format *data_format) const noexcept {
*data_format = _data_format;
}
/// Get the index base of this descriptor
/// \param [out] base The index base result
void get_base(oneapi::mkl::index_base *base) const noexcept { *base = _base; }
/// Get the value pointer of this descriptor
/// \param [out] value The value pointer result
void get_value(void **value) const noexcept { *value = _value; }
/// Set the value pointer of this descriptor
/// \param [in] value The input value pointer
void set_value(void *value) {
// Assume the new data is different from the old data
_value = value;
construct();
}
/// Get the size of the sparse matrix
/// \param [out] row_num Number of rows of the sparse matrix.
/// \param [out] col_num Number of colums of the sparse matrix.
/// \param [out] nnz Non-zero elements in the sparse matrix.
void get_size(int64_t *row_num, int64_t *col_num,
int64_t *nnz) const noexcept {
*row_num = _row_num;
*col_num = _col_num;
*nnz = _nnz;
}
/// Set the sparse matrix attribute
/// \param [in] attribute The attribute type
/// \param [in] data The attribute value
/// \param [in] data_size The data size of the attribute value
void set_attribute(matrix_attribute attribute, const void *data,
size_t data_size) {
if (attribute == matrix_attribute::diag) {
const oneapi::mkl::diag *diag_ptr =
reinterpret_cast<const oneapi::mkl::diag *>(data);
if (*diag_ptr == oneapi::mkl::diag::unit) {
_diag = oneapi::mkl::diag::unit;
} else if (*diag_ptr == oneapi::mkl::diag::nonunit) {
_diag = oneapi::mkl::diag::nonunit;
} else {
throw std::runtime_error("unsupported diag value");
}
} else if (attribute == matrix_attribute::uplo) {
const oneapi::mkl::uplo *uplo_ptr =
reinterpret_cast<const oneapi::mkl::uplo *>(data);
if (*uplo_ptr == oneapi::mkl::uplo::upper) {
_uplo = oneapi::mkl::uplo::upper;
} else if (*uplo_ptr == oneapi::mkl::uplo::lower) {
_uplo = oneapi::mkl::uplo::lower;
} else {
throw std::runtime_error("unsupported uplo value");
}
} else {
throw std::runtime_error("unsupported attribute");
}
}
/// Get the sparse matrix attribute
/// \param [out] attribute The attribute type
/// \param [out] data The attribute value
/// \param [out] data_size The data size of the attribute value
void get_attribute(matrix_attribute attribute, void *data,
size_t data_size) const {
if (attribute == matrix_attribute::diag) {
oneapi::mkl::diag *diag_ptr = reinterpret_cast<oneapi::mkl::diag *>(data);
if (_diag.has_value()) {
*diag_ptr = _diag.value();
} else {
*diag_ptr = oneapi::mkl::diag::nonunit;
}
} else if (attribute == matrix_attribute::uplo) {
oneapi::mkl::uplo *uplo_ptr = reinterpret_cast<oneapi::mkl::uplo *>(data);
if (_uplo.has_value()) {
*uplo_ptr = _uplo.value();
} else {
*uplo_ptr = oneapi::mkl::uplo::lower;
}
} else {
throw std::runtime_error("unsupported attribute");
}
}
/// Set the pointers for describing the sparse matrix
/// \param [in] row_ptr An array of length \p row_num + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] value An array containing the non-zero elements of the sparse matrix.
void set_pointers(void *row_ptr, void *col_ind, void *value) {
// Assume the new data is different from the old data
_row_ptr = row_ptr;
_col_ind = col_ind;
_value = value;
construct();
}
/// Get the diag attribute
/// \return diag value
std::optional<oneapi::mkl::diag> get_diag() const noexcept { return _diag; }
/// Get the uplo attribute
/// \return uplo value
std::optional<oneapi::mkl::uplo> get_uplo() const noexcept { return _uplo; }
private:
template <typename index_t, typename value_t> void set_data() {
auto data_row_ptr =
dpct::detail::get_memory(reinterpret_cast<index_t *>(_row_ptr));
auto data_col_ind =
dpct::detail::get_memory(reinterpret_cast<index_t *>(_col_ind));
auto data_value =
dpct::detail::get_memory(reinterpret_cast<value_t *>(_value));
oneapi::mkl::sparse::set_csr_data(get_default_queue(), _matrix_handle,
_row_num, _col_num, _base, data_row_ptr,
data_col_ind, data_value);
get_default_queue().wait();
}
void construct() {
std::uint64_t key = dpct::detail::get_type_combination_id(
_row_ptr_type, _col_ind_type, _value_type);
switch (key) {
case dpct::detail::get_type_combination_id(library_data_t::real_int32,
library_data_t::real_int32,
library_data_t::real_float): {
set_data<std::int32_t, float>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int32,
library_data_t::real_int32,
library_data_t::real_double): {
set_data<std::int32_t, double>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int32,
library_data_t::real_int32,
library_data_t::complex_float): {
set_data<std::int32_t, std::complex<float>>();
break;
}
case dpct::detail::get_type_combination_id(
library_data_t::real_int32, library_data_t::real_int32,
library_data_t::complex_double): {
set_data<std::int32_t, std::complex<double>>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int64,
library_data_t::real_int64,
library_data_t::real_float): {
set_data<std::int64_t, float>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int64,
library_data_t::real_int64,
library_data_t::real_double): {
set_data<std::int64_t, double>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int64,
library_data_t::real_int64,
library_data_t::complex_float): {
set_data<std::int64_t, std::complex<float>>();
break;
}
case dpct::detail::get_type_combination_id(
library_data_t::real_int64, library_data_t::real_int64,
library_data_t::complex_double): {
set_data<std::int64_t, std::complex<double>>();
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
std::int64_t _row_num;
std::int64_t _col_num;
std::int64_t _nnz;
void *_row_ptr;
void *_col_ind;
void *_value;
library_data_t _row_ptr_type;
library_data_t _col_ind_type;
oneapi::mkl::index_base _base;
library_data_t _value_type;
oneapi::mkl::sparse::matrix_handle_t _matrix_handle = nullptr;
std::vector<sycl::event> _deps;
matrix_format _data_format;
std::optional<oneapi::mkl::uplo> _uplo;
std::optional<oneapi::mkl::diag> _diag;
};
namespace detail {
#ifdef DPCT_USM_LEVEL_NONE
#define SPARSE_CALL(X) \
do { \
X; \
} while (0)
#else
#define SPARSE_CALL(X) \
do { \
sycl::event e = X; \
a->add_dependency(e); \
} while (0)
#endif
template <typename Ty>
inline void spmv_impl(sycl::queue queue, oneapi::mkl::transpose trans,
const void *alpha, sparse_matrix_desc_t a,
std::shared_ptr<dense_vector_desc> x, const void *beta,
std::shared_ptr<dense_vector_desc> y) {
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
auto data_x =
dpct::detail::get_memory(reinterpret_cast<Ty *>(x->get_value()));
auto data_y =
dpct::detail::get_memory(reinterpret_cast<Ty *>(y->get_value()));
if (a->get_diag().has_value() && a->get_uplo().has_value()) {
oneapi::mkl::sparse::optimize_trmv(queue, a->get_uplo().value(), trans,
a->get_diag().value(),
a->get_matrix_handle());
SPARSE_CALL(oneapi::mkl::sparse::trmv(
queue, a->get_uplo().value(), trans, a->get_diag().value(), alpha_value,
a->get_matrix_handle(), data_x, beta_value, data_y));
} else {
oneapi::mkl::sparse::optimize_gemv(queue, trans, a->get_matrix_handle());
SPARSE_CALL(oneapi::mkl::sparse::gemv(queue, trans, alpha_value,
a->get_matrix_handle(), data_x,
beta_value, data_y));
}
}
template <typename Ty>
inline void spmm_impl(sycl::queue queue, oneapi::mkl::transpose trans_a,
oneapi::mkl::transpose trans_b, const void *alpha,
sparse_matrix_desc_t a,
std::shared_ptr<dense_matrix_desc> b, const void *beta,
std::shared_ptr<dense_matrix_desc> c) {
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
auto data_b =
dpct::detail::get_memory(reinterpret_cast<Ty *>(b->get_value()));
auto data_c =
dpct::detail::get_memory(reinterpret_cast<Ty *>(c->get_value()));
SPARSE_CALL(oneapi::mkl::sparse::gemm(
queue, b->get_layout(), trans_a, trans_b, alpha_value,
a->get_matrix_handle(), data_b, b->get_col_num(), b->get_leading_dim(),
beta_value, data_c, c->get_leading_dim()));
}
#undef SPARSE_CALL
} // namespace detail
/// Computes a sparse matrix-dense vector product: y = alpha * op(a) * x + beta * y.
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans Specifies operation on input matrix.
/// \param [in] alpha Specifies the scalar alpha.
/// \param [in] a Specifies the sparse matrix a.
/// \param [in] x Specifies the dense vector x.
/// \param [in] beta Specifies the scalar beta.
/// \param [in, out] y Specifies the dense vector y.
/// \param [in] data_type Specifies the data type of \param a, \param x and \param y .
inline void spmv(sycl::queue queue, oneapi::mkl::transpose trans,
const void *alpha, sparse_matrix_desc_t a,
std::shared_ptr<dense_vector_desc> x, const void *beta,
std::shared_ptr<dense_vector_desc> y,
library_data_t data_type) {
switch (data_type) {
case library_data_t::real_float: {
detail::spmv_impl<float>(queue, trans, alpha, a, x, beta, y);
break;
}
case library_data_t::real_double: {
detail::spmv_impl<double>(queue, trans, alpha, a, x, beta, y);
break;
}
case library_data_t::complex_float: {
detail::spmv_impl<std::complex<float>>(queue, trans, alpha, a, x, beta, y);
break;
}
case library_data_t::complex_double: {
detail::spmv_impl<std::complex<double>>(queue, trans, alpha, a, x, beta, y);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes a sparse matrix-dense matrix product: c = alpha * op(a) * op(b) + beta * c.
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans_a Specifies operation on input matrix a.
/// \param [in] trans_b Specifies operation on input matrix b.
/// \param [in] alpha Specifies the scalar alpha.
/// \param [in] a Specifies the sparse matrix a.
/// \param [in] b Specifies the dense matrix b.
/// \param [in] beta Specifies the scalar beta.
/// \param [in, out] c Specifies the dense matrix c.
/// \param [in] data_type Specifies the data type of \param a, \param b and \param c .
inline void spmm(sycl::queue queue, oneapi::mkl::transpose trans_a,
oneapi::mkl::transpose trans_b, const void *alpha,
sparse_matrix_desc_t a, std::shared_ptr<dense_matrix_desc> b,
const void *beta, std::shared_ptr<dense_matrix_desc> c,
library_data_t data_type) {
if (b->get_layout() != c->get_layout())
throw std::runtime_error("the layout of b and c are different");
switch (data_type) {
case library_data_t::real_float: {
detail::spmm_impl<float>(queue, trans_a, trans_b, alpha, a, b, beta, c);
break;
}
case library_data_t::real_double: {
detail::spmm_impl<double>(queue, trans_a, trans_b, alpha, a, b, beta, c);
break;
}
case library_data_t::complex_float: {
detail::spmm_impl<std::complex<float>>(queue, trans_a, trans_b, alpha, a, b,
beta, c);
break;
}
case library_data_t::complex_double: {
detail::spmm_impl<std::complex<double>>(queue, trans_a, trans_b, alpha, a,
b, beta, c);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
#endif
} // namespace sparse
} // namespace dpct
#endif // __DPCT_SPARSE_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/device.hpp | //==---- device.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_DEVICE_HPP__
#define __DPCT_DEVICE_HPP__
#include <sycl/sycl.hpp>
#include <algorithm>
#include <array>
#include <cstring>
#include <iostream>
#include <mutex>
#include <set>
#include <sstream>
#include <map>
#include <vector>
#include <thread>
#if defined(__linux__)
#include <unistd.h>
#include <sys/syscall.h>
#endif
#if defined(_WIN64)
#define NOMINMAX
#include <windows.h>
#endif
namespace dpct {
/// SYCL default exception handler
inline auto exception_handler = [](sycl::exception_list exceptions) {
for (std::exception_ptr const &e : exceptions) {
try {
std::rethrow_exception(e);
} catch (sycl::exception const &e) {
std::cerr << "Caught asynchronous SYCL exception:" << std::endl
<< e.what() << std::endl
<< "Exception caught at file:" << __FILE__
<< ", line:" << __LINE__ << std::endl;
}
}
};
typedef sycl::event *event_ptr;
typedef sycl::queue *queue_ptr;
typedef char *device_ptr;
/// Destroy \p event pointed memory.
///
/// \param event Pointer to the sycl::event address.
static void destroy_event(event_ptr event) {
delete event;
}
class device_info {
public:
// get interface
const char *get_name() const { return _name; }
char *get_name() { return _name; }
template <typename WorkItemSizesTy = sycl::id<3>,
std::enable_if_t<std::is_same_v<WorkItemSizesTy, sycl::id<3>> ||
std::is_same_v<WorkItemSizesTy, int *>,
int> = 0>
auto get_max_work_item_sizes() const {
if constexpr (std::is_same_v<WorkItemSizesTy, sycl::id<3>>)
return _max_work_item_sizes;
else
return _max_work_item_sizes_i;
}
template <typename WorkItemSizesTy = sycl::id<3>,
std::enable_if_t<std::is_same_v<WorkItemSizesTy, sycl::id<3>> ||
std::is_same_v<WorkItemSizesTy, int *>,
int> = 0>
auto get_max_work_item_sizes() {
if constexpr (std::is_same_v<WorkItemSizesTy, sycl::id<3>>)
return _max_work_item_sizes;
else
return _max_work_item_sizes_i;
}
bool get_host_unified_memory() const { return _host_unified_memory; }
int get_major_version() const { return _major; }
int get_minor_version() const { return _minor; }
int get_integrated() const { return _integrated; }
int get_max_clock_frequency() const { return _frequency; }
int get_max_compute_units() const { return _max_compute_units; }
int get_max_work_group_size() const { return _max_work_group_size; }
int get_max_sub_group_size() const { return _max_sub_group_size; }
int get_max_work_items_per_compute_unit() const {
return _max_work_items_per_compute_unit;
}
int get_max_register_size_per_work_group() const {
return _max_register_size_per_work_group;
}
template <typename NDRangeSizeTy = size_t *,
std::enable_if_t<std::is_same_v<NDRangeSizeTy, size_t *> ||
std::is_same_v<NDRangeSizeTy, int *>,
int> = 0>
auto get_max_nd_range_size() const {
if constexpr (std::is_same_v<NDRangeSizeTy, size_t *>)
return _max_nd_range_size;
else
return _max_nd_range_size_i;
}
template <typename NDRangeSizeTy = size_t *,
std::enable_if_t<std::is_same_v<NDRangeSizeTy, size_t *> ||
std::is_same_v<NDRangeSizeTy, int *>,
int> = 0>
auto get_max_nd_range_size() {
if constexpr (std::is_same_v<NDRangeSizeTy, size_t *>)
return _max_nd_range_size;
else
return _max_nd_range_size_i;
}
size_t get_global_mem_size() const { return _global_mem_size; }
size_t get_local_mem_size() const { return _local_mem_size; }
/// Returns the maximum clock rate of device's global memory in kHz. If
/// compiler does not support this API then returns default value 3200000 kHz.
unsigned int get_memory_clock_rate() const { return _memory_clock_rate; }
/// Returns the maximum bus width between device and memory in bits. If
/// compiler does not support this API then returns default value 64 bits.
unsigned int get_memory_bus_width() const { return _memory_bus_width; }
uint32_t get_device_id() const { return _device_id; }
std::array<unsigned char, 16> get_uuid() const { return _uuid; }
// set interface
void set_name(const char* name) {
size_t length = strlen(name);
if (length < 256) {
std::memcpy(_name, name, length + 1);
} else {
std::memcpy(_name, name, 255);
_name[255] = '\0';
}
}
void set_max_work_item_sizes(const sycl::id<3> max_work_item_sizes) {
_max_work_item_sizes = max_work_item_sizes;
for (int i = 0; i < 3; ++i)
_max_work_item_sizes_i[i] = max_work_item_sizes[i];
}
void set_host_unified_memory(bool host_unified_memory) {
_host_unified_memory = host_unified_memory;
}
void set_major_version(int major) { _major = major; }
void set_minor_version(int minor) { _minor = minor; }
void set_integrated(int integrated) { _integrated = integrated; }
void set_max_clock_frequency(int frequency) { _frequency = frequency; }
void set_max_compute_units(int max_compute_units) {
_max_compute_units = max_compute_units;
}
void set_global_mem_size(size_t global_mem_size) {
_global_mem_size = global_mem_size;
}
void set_local_mem_size(size_t local_mem_size) {
_local_mem_size = local_mem_size;
}
void set_max_work_group_size(int max_work_group_size) {
_max_work_group_size = max_work_group_size;
}
void set_max_sub_group_size(int max_sub_group_size) {
_max_sub_group_size = max_sub_group_size;
}
void
set_max_work_items_per_compute_unit(int max_work_items_per_compute_unit) {
_max_work_items_per_compute_unit = max_work_items_per_compute_unit;
}
void set_max_nd_range_size(int max_nd_range_size[]) {
for (int i = 0; i < 3; i++) {
_max_nd_range_size[i] = max_nd_range_size[i];
_max_nd_range_size_i[i] = max_nd_range_size[i];
}
}
void set_memory_clock_rate(unsigned int memory_clock_rate) {
_memory_clock_rate = memory_clock_rate;
}
void set_memory_bus_width(unsigned int memory_bus_width) {
_memory_bus_width = memory_bus_width;
}
void
set_max_register_size_per_work_group(int max_register_size_per_work_group) {
_max_register_size_per_work_group = max_register_size_per_work_group;
}
void set_device_id(uint32_t device_id) {
_device_id = device_id;
}
void set_uuid(std::array<unsigned char, 16> uuid) {
_uuid = std::move(uuid);
}
private:
char _name[256];
sycl::id<3> _max_work_item_sizes;
int _max_work_item_sizes_i[3];
bool _host_unified_memory = false;
int _major;
int _minor;
int _integrated = 0;
int _frequency;
// Set estimated value 3200000 kHz as default value.
unsigned int _memory_clock_rate = 3200000;
// Set estimated value 64 bits as default value.
unsigned int _memory_bus_width = 64;
int _max_compute_units;
int _max_work_group_size;
int _max_sub_group_size;
int _max_work_items_per_compute_unit;
int _max_register_size_per_work_group;
size_t _global_mem_size;
size_t _local_mem_size;
size_t _max_nd_range_size[3];
int _max_nd_range_size_i[3];
uint32_t _device_id;
std::array<unsigned char, 16> _uuid;
};
/// dpct device extension
class device_ext : public sycl::device {
typedef std::mutex mutex_type;
public:
device_ext() : sycl::device(), _ctx(*this) {}
~device_ext() {
std::lock_guard<mutex_type> lock(m_mutex);
clear_queues();
}
device_ext(const sycl::device &base) : sycl::device(base), _ctx(*this) {
std::lock_guard<mutex_type> lock(m_mutex);
init_queues();
}
int is_native_atomic_supported() { return 0; }
int get_major_version() const {
int major, minor;
get_version(major, minor);
return major;
}
int get_minor_version() const {
int major, minor;
get_version(major, minor);
return minor;
}
int get_max_compute_units() const {
return get_device_info().get_max_compute_units();
}
/// Return the maximum clock frequency of this device in KHz.
int get_max_clock_frequency() const {
return get_device_info().get_max_clock_frequency();
}
int get_integrated() const { return get_device_info().get_integrated(); }
int get_max_sub_group_size() const {
return get_device_info().get_max_sub_group_size();
}
int get_max_register_size_per_work_group() const {
return get_device_info().get_max_register_size_per_work_group();
}
int get_max_work_group_size() const {
return get_device_info().get_max_work_group_size();
}
int get_mem_base_addr_align() const {
return get_info<sycl::info::device::mem_base_addr_align>();
}
size_t get_global_mem_size() const {
return get_device_info().get_global_mem_size();
}
/// Get the number of bytes of free and total memory on the SYCL device.
/// \param [out] free_memory The number of bytes of free memory on the SYCL device.
/// \param [out] total_memory The number of bytes of total memory on the SYCL device.
void get_memory_info(size_t &free_memory, size_t &total_memory) {
#if (defined(__SYCL_COMPILER_VERSION) && __SYCL_COMPILER_VERSION >= 20221105)
if (!has(sycl::aspect::ext_intel_free_memory)) {
std::cerr << "get_memory_info: ext_intel_free_memory is not supported." << std::endl;
free_memory = 0;
} else {
free_memory = get_info<sycl::ext::intel::info::device::free_memory>();
}
#else
std::cerr << "get_memory_info: ext_intel_free_memory is not supported." << std::endl;
free_memory = 0;
#if defined(_MSC_VER) && !defined(__clang__)
#pragma message("Querying the number of bytes of free memory is not supported")
#else
#warning "Querying the number of bytes of free memory is not supported"
#endif
#endif
total_memory = get_device_info().get_global_mem_size();
}
void get_device_info(device_info &out) const {
device_info prop;
prop.set_name(get_info<sycl::info::device::name>().c_str());
int major, minor;
get_version(major, minor);
prop.set_major_version(major);
prop.set_minor_version(minor);
prop.set_max_work_item_sizes(
#if (__SYCL_COMPILER_VERSION && __SYCL_COMPILER_VERSION<20220902)
// oneAPI DPC++ compiler older than 2022/09/02, where max_work_item_sizes is an enum class element
get_info<sycl::info::device::max_work_item_sizes>());
#else
// SYCL 2020-conformant code, max_work_item_sizes is a struct templated by an int
get_info<sycl::info::device::max_work_item_sizes<3>>());
#endif
prop.set_host_unified_memory(
this->has(sycl::aspect::usm_host_allocations));
prop.set_max_clock_frequency(
get_info<sycl::info::device::max_clock_frequency>() * 1000);
prop.set_max_compute_units(
get_info<sycl::info::device::max_compute_units>());
prop.set_max_work_group_size(
get_info<sycl::info::device::max_work_group_size>());
prop.set_global_mem_size(
get_info<sycl::info::device::global_mem_size>());
prop.set_local_mem_size(get_info<sycl::info::device::local_mem_size>());
#if (defined(SYCL_EXT_INTEL_DEVICE_INFO) && SYCL_EXT_INTEL_DEVICE_INFO >= 6)
if (this->has(sycl::aspect::ext_intel_memory_clock_rate)) {
unsigned int tmp =
this->get_info<sycl::ext::intel::info::device::memory_clock_rate>();
if (tmp != 0)
prop.set_memory_clock_rate(1000 * tmp);
}
if (this->has(sycl::aspect::ext_intel_memory_bus_width)) {
prop.set_memory_bus_width(
this->get_info<sycl::ext::intel::info::device::memory_bus_width>());
}
if (this->has(sycl::aspect::ext_intel_device_id)) {
prop.set_device_id(
this->get_info<sycl::ext::intel::info::device::device_id>());
}
if (this->has(sycl::aspect::ext_intel_device_info_uuid)) {
prop.set_uuid(
this->get_info<sycl::ext::intel::info::device::uuid>());
}
#elif defined(_MSC_VER) && !defined(__clang__)
#pragma message("get_device_info: querying memory_clock_rate and \
memory_bus_width are not supported by the compiler used. \
Use 3200000 kHz as memory_clock_rate default value. \
Use 64 bits as memory_bus_width default value.")
#else
#warning "get_device_info: querying memory_clock_rate and \
memory_bus_width are not supported by the compiler used. \
Use 3200000 kHz as memory_clock_rate default value. \
Use 64 bits as memory_bus_width default value."
#endif
size_t max_sub_group_size = 1;
std::vector<size_t> sub_group_sizes =
get_info<sycl::info::device::sub_group_sizes>();
for (const auto &sub_group_size : sub_group_sizes) {
if (max_sub_group_size < sub_group_size)
max_sub_group_size = sub_group_size;
}
prop.set_max_sub_group_size(max_sub_group_size);
prop.set_max_work_items_per_compute_unit(
get_info<sycl::info::device::max_work_group_size>());
int max_nd_range_size[] = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
prop.set_max_nd_range_size(max_nd_range_size);
// Estimates max register size per work group, feel free to update the value
// according to device properties.
prop.set_max_register_size_per_work_group(65536);
out = prop;
}
device_info get_device_info() const {
device_info prop;
get_device_info(prop);
return prop;
}
void reset() {
std::lock_guard<mutex_type> lock(m_mutex);
clear_queues();
init_queues();
}
sycl::queue &in_order_queue() { return *_q_in_order; }
sycl::queue &out_of_order_queue() { return *_q_out_of_order; }
sycl::queue &default_queue() {
#ifdef DPCT_USM_LEVEL_NONE
return out_of_order_queue();
#else
return in_order_queue();
#endif // DPCT_USM_LEVEL_NONE
}
void queues_wait_and_throw() {
std::unique_lock<mutex_type> lock(m_mutex);
std::vector<std::shared_ptr<sycl::queue>> current_queues(
_queues);
lock.unlock();
for (const auto &q : current_queues) {
q->wait_and_throw();
}
// Guard the destruct of current_queues to make sure the ref count is safe.
lock.lock();
}
sycl::queue *create_queue(bool enable_exception_handler = false) {
#ifdef DPCT_USM_LEVEL_NONE
return create_out_of_order_queue(enable_exception_handler);
#else
return create_in_order_queue(enable_exception_handler);
#endif // DPCT_USM_LEVEL_NONE
}
sycl::queue *create_in_order_queue(bool enable_exception_handler = false) {
std::lock_guard<mutex_type> lock(m_mutex);
return create_queue_impl(enable_exception_handler,
sycl::property::queue::in_order());
}
sycl::queue *create_out_of_order_queue(bool enable_exception_handler = false) {
std::lock_guard<mutex_type> lock(m_mutex);
return create_queue_impl(enable_exception_handler);
}
void destroy_queue(sycl::queue *&queue) {
std::lock_guard<mutex_type> lock(m_mutex);
_queues.erase(std::remove_if(_queues.begin(), _queues.end(),
[=](const std::shared_ptr<sycl::queue> &q) -> bool {
return q.get() == queue;
}),
_queues.end());
queue = nullptr;
}
void set_saved_queue(sycl::queue* q) {
std::lock_guard<mutex_type> lock(m_mutex);
_saved_queue = q;
}
sycl::queue *get_saved_queue() const {
std::lock_guard<mutex_type> lock(m_mutex);
return _saved_queue;
}
sycl::context get_context() const { return _ctx; }
private:
void clear_queues() {
_queues.clear();
_q_in_order = _q_out_of_order = _saved_queue = nullptr;
}
void init_queues() {
_q_in_order = create_queue_impl(true, sycl::property::queue::in_order());
_q_out_of_order = create_queue_impl(true);
_saved_queue = &default_queue();
}
/// Caller should acquire resource \p m_mutex before calling this function.
template <class... Properties>
sycl::queue *create_queue_impl(bool enable_exception_handler,
Properties... properties) {
sycl::async_handler eh = {};
if (enable_exception_handler) {
eh = exception_handler;
}
_queues.push_back(std::make_shared<sycl::queue>(
_ctx, *this, eh,
sycl::property_list(
#ifdef DPCT_PROFILING_ENABLED
sycl::property::queue::enable_profiling(),
#endif
properties...)));
return _queues.back().get();
}
void get_version(int &major, int &minor) const {
// Version string has the following format:
// a. OpenCL<space><major.minor><space><vendor-specific-information>
// b. <major.minor>
std::string ver;
ver = get_info<sycl::info::device::version>();
std::string::size_type i = 0;
while (i < ver.size()) {
if (isdigit(ver[i]))
break;
i++;
}
major = std::stoi(&(ver[i]));
while (i < ver.size()) {
if (ver[i] == '.')
break;
i++;
}
i++;
minor = std::stoi(&(ver[i]));
}
sycl::queue *_q_in_order, *_q_out_of_order;
sycl::queue *_saved_queue;
sycl::context _ctx;
std::vector<std::shared_ptr<sycl::queue>> _queues;
mutable mutex_type m_mutex;
};
static inline unsigned int get_tid() {
#if defined(__linux__)
return syscall(SYS_gettid);
#elif defined(_WIN64)
return GetCurrentThreadId();
#else
#error "Only support Windows and Linux."
#endif
}
/// device manager
class dev_mgr {
public:
device_ext ¤t_device() {
unsigned int dev_id=current_device_id();
check_id(dev_id);
return *_devs[dev_id];
}
device_ext &cpu_device() const {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
if (_cpu_device == -1) {
throw std::runtime_error("no valid cpu device");
} else {
return *_devs[_cpu_device];
}
}
device_ext &get_device(unsigned int id) const {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
check_id(id);
return *_devs[id];
}
unsigned int current_device_id() const {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
auto it=_thread2dev_map.find(get_tid());
if(it != _thread2dev_map.end())
return it->second;
return DEFAULT_DEVICE_ID;
}
/// Select device with a device ID.
/// \param [in] id The id of the device which can
/// be obtained through get_device_id(const sycl::device).
void select_device(unsigned int id) {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
check_id(id);
_thread2dev_map[get_tid()]=id;
}
unsigned int device_count() { return _devs.size(); }
unsigned int get_device_id(const sycl::device &dev) {
unsigned int id = 0;
for(auto dev_item : _devs) {
if (*dev_item == dev) {
break;
}
id++;
}
return id;
}
template <class DeviceSelector>
std::enable_if_t<
std::is_invocable_r_v<int, DeviceSelector, const sycl::device &>>
select_device(const DeviceSelector &selector = sycl::gpu_selector_v) {
sycl::device selected_device = sycl::device(selector);
unsigned int selected_device_id = get_device_id(selected_device);
select_device(selected_device_id);
}
/// Returns the instance of device manager singleton.
static dev_mgr &instance() {
static dev_mgr d_m;
return d_m;
}
dev_mgr(const dev_mgr &) = delete;
dev_mgr &operator=(const dev_mgr &) = delete;
dev_mgr(dev_mgr &&) = delete;
dev_mgr &operator=(dev_mgr &&) = delete;
private:
mutable std::recursive_mutex m_mutex;
dev_mgr() {
sycl::device default_device =
sycl::device(sycl::default_selector_v);
_devs.push_back(std::make_shared<device_ext>(default_device));
std::vector<sycl::device> sycl_all_devs =
sycl::device::get_devices(sycl::info::device_type::all);
// Collect other devices except for the default device.
if (default_device.is_cpu())
_cpu_device = 0;
for (auto &dev : sycl_all_devs) {
if (dev == default_device) {
continue;
}
_devs.push_back(std::make_shared<device_ext>(dev));
if (_cpu_device == -1 && dev.is_cpu()) {
_cpu_device = _devs.size() - 1;
}
}
}
void check_id(unsigned int id) const {
if (id >= _devs.size()) {
throw std::runtime_error("invalid device id");
}
}
std::vector<std::shared_ptr<device_ext>> _devs;
/// DEFAULT_DEVICE_ID is used, if current_device_id() can not find current
/// thread id in _thread2dev_map, which means default device should be used
/// for the current thread.
const unsigned int DEFAULT_DEVICE_ID = 0;
/// thread-id to device-id map.
std::map<unsigned int, unsigned int> _thread2dev_map;
int _cpu_device = -1;
};
/// Util function to get the default queue of current selected device depends on
/// the USM config. Return the default out-of-ordered queue when USM-none is
/// enabled, otherwise return the default in-ordered queue.
static inline sycl::queue &get_default_queue() {
return dev_mgr::instance().current_device().default_queue();
}
/// Util function to get the default in-ordered queue of current device in
/// dpct device manager.
static inline sycl::queue &get_in_order_queue() {
return dev_mgr::instance().current_device().in_order_queue();
}
/// Util function to get the default out-of-ordered queue of current device in
/// dpct device manager.
static inline sycl::queue &get_out_of_order_queue() {
return dev_mgr::instance().current_device().out_of_order_queue();
}
/// Util function to get the id of current device in
/// dpct device manager.
static inline unsigned int get_current_device_id() {
return dev_mgr::instance().current_device_id();
}
/// Util function to get the current device.
static inline device_ext &get_current_device() {
return dev_mgr::instance().current_device();
}
/// Util function to get a device by id.
static inline device_ext &get_device(unsigned int id) {
return dev_mgr::instance().get_device(id);
}
/// Util function to get the context of the default queue of current
/// device in dpct device manager.
static inline sycl::context get_default_context() {
return dpct::get_current_device().get_context();
}
/// Util function to get a CPU device.
static inline device_ext &cpu_device() {
return dev_mgr::instance().cpu_device();
}
static inline unsigned int select_device(unsigned int id) {
dev_mgr::instance().select_device(id);
return id;
}
template <class DeviceSelector>
static inline std::enable_if_t<
std::is_invocable_r_v<int, DeviceSelector, const sycl::device &>>
select_device(const DeviceSelector &selector = sycl::gpu_selector_v) {
dev_mgr::instance().select_device(selector);
}
static inline unsigned int get_device_id(const sycl::device &dev){
return dev_mgr::instance().get_device_id(dev);
}
/// Util function to check whether a device supports some kinds of sycl::aspect.
inline void
has_capability_or_fail(const sycl::device &dev,
const std::initializer_list<sycl::aspect> &props) {
for (const auto &it : props) {
if (dev.has(it))
continue;
switch (it) {
case sycl::aspect::fp64:
throw std::runtime_error("'double' is not supported in '" +
dev.get_info<sycl::info::device::name>() +
"' device");
break;
case sycl::aspect::fp16:
throw std::runtime_error("'half' is not supported in '" +
dev.get_info<sycl::info::device::name>() +
"' device");
break;
default:
#define __SYCL_ASPECT(ASPECT, ID) \
case sycl::aspect::ASPECT: \
return #ASPECT;
#define __SYCL_ASPECT_DEPRECATED(ASPECT, ID, MESSAGE) __SYCL_ASPECT(ASPECT, ID)
#define __SYCL_ASPECT_DEPRECATED_ALIAS(ASPECT, ID, MESSAGE)
auto getAspectNameStr = [](sycl::aspect AspectNum) -> std::string {
switch (AspectNum) {
#include <sycl/info/aspects.def>
#include <sycl/info/aspects_deprecated.def>
default:
return "unknown aspect";
}
};
#undef __SYCL_ASPECT_DEPRECATED_ALIAS
#undef __SYCL_ASPECT_DEPRECATED
#undef __SYCL_ASPECT
throw std::runtime_error(
"'" + getAspectNameStr(it) + "' is not supported in '" +
dev.get_info<sycl::info::device::name>() + "' device");
}
break;
}
}
} // namespace dpct
#endif // __DPCT_DEVICE_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/memory.hpp | //==---- memory.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MEMORY_HPP__
#define __DPCT_MEMORY_HPP__
#include "device.hpp"
#include <sycl/sycl.hpp>
#include <cassert>
#include <cstdint>
#include <cstring>
#include <mutex>
#include <unordered_map>
#include <map>
#include <utility>
#include <thread>
#include <type_traits>
#if defined(__linux__)
#include <sys/mman.h>
#elif defined(_WIN64)
#define NOMINMAX
#include <windows.h>
#else
#error "Only support Windows and Linux."
#endif
namespace dpct {
enum memcpy_direction {
host_to_host,
host_to_device,
device_to_host,
device_to_device,
automatic
};
enum memory_region {
global = 0, // device global memory
constant, // device constant memory
local, // device local memory
shared, // memory which can be accessed by host and device
};
typedef uint8_t byte_t;
/// Buffer type to be used in Memory Management runtime.
typedef sycl::buffer<byte_t> buffer_t;
/// Pitched 2D/3D memory data.
class pitched_data {
public:
pitched_data() : pitched_data(nullptr, 0, 0, 0) {}
pitched_data(void *data, size_t pitch, size_t x, size_t y)
: _data(data), _pitch(pitch), _x(x), _y(y) {}
void *get_data_ptr() { return _data; }
void set_data_ptr(void *data) { _data = data; }
size_t get_pitch() { return _pitch; }
void set_pitch(size_t pitch) { _pitch = pitch; }
size_t get_x() { return _x; }
void set_x(size_t x) { _x = x; };
size_t get_y() { return _y; }
void set_y(size_t y) { _y = y; }
private:
void *_data;
size_t _pitch, _x, _y;
};
namespace detail {
class mem_mgr {
mem_mgr() {
// Reserved address space, no real memory allocation happens here.
#if defined(__linux__)
mapped_address_space =
(byte_t *)mmap(nullptr, mapped_region_size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
#elif defined(_WIN64)
mapped_address_space = (byte_t *)VirtualAlloc(
NULL, // NULL specified as the base address parameter
mapped_region_size, // Size of allocation
MEM_RESERVE, // Allocate reserved pages
PAGE_NOACCESS); // Protection = no access
#else
#error "Only support Windows and Linux."
#endif
next_free = mapped_address_space;
};
public:
using buffer_id_t = int;
struct allocation {
buffer_t buffer;
byte_t *alloc_ptr;
size_t size;
};
~mem_mgr() {
#if defined(__linux__)
munmap(mapped_address_space, mapped_region_size);
#elif defined(_WIN64)
VirtualFree(mapped_address_space, 0, MEM_RELEASE);
#else
#error "Only support Windows and Linux."
#endif
};
mem_mgr(const mem_mgr &) = delete;
mem_mgr &operator=(const mem_mgr &) = delete;
mem_mgr(mem_mgr &&) = delete;
mem_mgr &operator=(mem_mgr &&) = delete;
/// Allocate
void *mem_alloc(size_t size) {
if (!size)
return nullptr;
std::lock_guard<std::mutex> lock(m_mutex);
if (next_free + size > mapped_address_space + mapped_region_size) {
throw std::runtime_error("dpct_malloc: out of memory for virtual memory pool");
}
// Allocation
sycl::range<1> r(size);
buffer_t buf(r);
allocation A{buf, next_free, size};
// Map allocation to device pointer
void *result = next_free;
m_map.emplace(next_free + size, A);
// Update pointer to the next free space.
next_free += (size + extra_padding + alignment - 1) & ~(alignment - 1);
return result;
}
/// Deallocate
void mem_free(const void *ptr) {
if (!ptr)
return;
std::lock_guard<std::mutex> lock(m_mutex);
auto it = get_map_iterator(ptr);
m_map.erase(it);
}
/// map: device pointer -> allocation(buffer, alloc_ptr, size)
allocation translate_ptr(const void *ptr) {
std::lock_guard<std::mutex> lock(m_mutex);
auto it = get_map_iterator(ptr);
return it->second;
}
/// Check if the pointer represents device pointer or not.
bool is_device_ptr(const void *ptr) const {
std::lock_guard<std::mutex> lock(m_mutex);
return (mapped_address_space <= ptr) &&
(ptr < mapped_address_space + mapped_region_size);
}
/// Returns the instance of memory manager singleton.
static mem_mgr &instance() {
static mem_mgr m;
return m;
}
private:
std::map<byte_t *, allocation> m_map;
mutable std::mutex m_mutex;
byte_t *mapped_address_space;
byte_t *next_free;
const size_t mapped_region_size = 128ull * 1024 * 1024 * 1024;
const size_t alignment = 256;
/// This padding may be defined to some positive value to debug
/// out of bound accesses.
const size_t extra_padding = 0;
std::map<byte_t *, allocation>::iterator get_map_iterator(const void *ptr) {
auto it = m_map.upper_bound((byte_t *)ptr);
if (it == m_map.end()) {
// Not a virtual pointer.
throw std::runtime_error("can not get buffer from non-virtual pointer");
}
const allocation &alloc = it->second;
if (ptr < alloc.alloc_ptr) {
// Out of bound.
// This may happen if there's a gap between allocations due to alignment
// or extra padding and pointer points to this gap.
throw std::runtime_error("invalid virtual pointer");
}
return it;
}
};
template <class T, memory_region Memory, size_t Dimension> class accessor;
template <memory_region Memory, class T = byte_t> class memory_traits {
public:
static constexpr sycl::access::target target =
sycl::access::target::device;
static constexpr sycl::access_mode mode =
(Memory == constant) ? sycl::access_mode::read
: sycl::access_mode::read_write;
static constexpr size_t type_size = sizeof(T);
using element_t =
typename std::conditional<Memory == constant, const T, T>::type;
using value_t = typename std::remove_cv<T>::type;
template <size_t Dimension = 1>
using accessor_t = typename std::conditional<
Memory == local, sycl::local_accessor<value_t, Dimension>,
sycl::accessor<T, Dimension, mode, target>>::type;
using pointer_t = T *;
};
static inline void *dpct_malloc(size_t size, sycl::queue &q) {
#ifdef DPCT_USM_LEVEL_NONE
return mem_mgr::instance().mem_alloc(size * sizeof(byte_t));
#else
return sycl::malloc_device(size, q.get_device(), q.get_context());
#endif // DPCT_USM_LEVEL_NONE
}
#define PITCH_DEFAULT_ALIGN(x) (((x) + 31) & ~(0x1F))
static inline void *dpct_malloc(size_t &pitch, size_t x, size_t y, size_t z,
sycl::queue &q) {
pitch = PITCH_DEFAULT_ALIGN(x);
return dpct_malloc(pitch * y * z, q);
}
/// Set \p value to the first \p size bytes starting from \p dev_ptr in \p q.
///
/// \param q The queue in which the operation is done.
/// \param dev_ptr Pointer to the device memory address.
/// \param value Value to be set.
/// \param size Number of bytes to be set to the value.
/// \returns An event representing the memset operation.
static inline sycl::event dpct_memset(sycl::queue &q, void *dev_ptr,
int value, size_t size) {
#ifdef DPCT_USM_LEVEL_NONE
auto &mm = mem_mgr::instance();
assert(mm.is_device_ptr(dev_ptr));
auto alloc = mm.translate_ptr(dev_ptr);
size_t offset = (byte_t *)dev_ptr - alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
auto r = sycl::range<1>(size);
auto o = sycl::id<1>(offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
acc(alloc.buffer, cgh, r, o);
cgh.fill(acc, (byte_t)value);
});
#else
return q.memset(dev_ptr, value, size);
#endif // DPCT_USM_LEVEL_NONE
}
/// Set \p value to the 3D memory region pointed by \p data in \p q. \p size
/// specifies the 3D memory size to set.
///
/// \param q The queue in which the operation is done.
/// \param data Pointer to the device memory region.
/// \param value Value to be set.
/// \param size Memory region size.
/// \returns An event list representing the memset operations.
static inline std::vector<sycl::event>
dpct_memset(sycl::queue &q, pitched_data data, int value,
sycl::range<3> size) {
std::vector<sycl::event> event_list;
size_t slice = data.get_pitch() * data.get_y();
unsigned char *data_surface = (unsigned char *)data.get_data_ptr();
for (size_t z = 0; z < size.get(2); ++z) {
unsigned char *data_ptr = data_surface;
for (size_t y = 0; y < size.get(1); ++y) {
event_list.push_back(dpct_memset(q, data_ptr, value, size.get(0)));
data_ptr += data.get_pitch();
}
data_surface += slice;
}
return event_list;
}
/// memset 2D matrix with pitch.
static inline std::vector<sycl::event>
dpct_memset(sycl::queue &q, void *ptr, size_t pitch, int val, size_t x,
size_t y) {
return dpct_memset(q, pitched_data(ptr, pitch, x, 1), val,
sycl::range<3>(x, y, 1));
}
enum class pointer_access_attribute {
host_only = 0,
device_only,
host_device,
end
};
static pointer_access_attribute get_pointer_attribute(sycl::queue &q,
const void *ptr) {
#ifdef DPCT_USM_LEVEL_NONE
return mem_mgr::instance().is_device_ptr(ptr)
? pointer_access_attribute::device_only
: pointer_access_attribute::host_only;
#else
switch (sycl::get_pointer_type(ptr, q.get_context())) {
case sycl::usm::alloc::unknown:
return pointer_access_attribute::host_only;
case sycl::usm::alloc::device:
return pointer_access_attribute::device_only;
case sycl::usm::alloc::shared:
case sycl::usm::alloc::host:
return pointer_access_attribute::host_device;
}
#endif
}
static memcpy_direction deduce_memcpy_direction(sycl::queue &q, void *to_ptr,
const void *from_ptr,
memcpy_direction dir) {
switch (dir) {
case memcpy_direction::host_to_host:
case memcpy_direction::host_to_device:
case memcpy_direction::device_to_host:
case memcpy_direction::device_to_device:
return dir;
case memcpy_direction::automatic: {
// table[to_attribute][from_attribute]
static const memcpy_direction
direction_table[static_cast<unsigned>(pointer_access_attribute::end)]
[static_cast<unsigned>(pointer_access_attribute::end)] =
{{memcpy_direction::host_to_host,
memcpy_direction::device_to_host,
memcpy_direction::host_to_host},
{memcpy_direction::host_to_device,
memcpy_direction::device_to_device,
memcpy_direction::device_to_device},
{memcpy_direction::host_to_host,
memcpy_direction::device_to_device,
memcpy_direction::device_to_device}};
return direction_table[static_cast<unsigned>(get_pointer_attribute(
q, to_ptr))][static_cast<unsigned>(get_pointer_attribute(q, from_ptr))];
}
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
}
static sycl::event
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction,
const std::vector<sycl::event> &dep_events = {}) {
if (!size)
return sycl::event{};
#ifdef DPCT_USM_LEVEL_NONE
auto &mm = mem_mgr::instance();
auto real_direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction);
switch (real_direction) {
case host_to_host:
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
cgh.host_task([=] { std::memcpy(to_ptr, from_ptr, size); });
});
case host_to_device: {
auto alloc = mm.translate_ptr(to_ptr);
size_t offset = (byte_t *)to_ptr - alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto r = sycl::range<1>(size);
auto o = sycl::id<1>(offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
acc(alloc.buffer, cgh, r, o);
cgh.copy(from_ptr, acc);
});
}
case device_to_host: {
auto alloc = mm.translate_ptr(from_ptr);
size_t offset = (byte_t *)from_ptr - alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto r = sycl::range<1>(size);
auto o = sycl::id<1>(offset);
sycl::accessor<byte_t, 1, sycl::access_mode::read,
sycl::access::target::device>
acc(alloc.buffer, cgh, r, o);
cgh.copy(acc, to_ptr);
});
}
case device_to_device: {
auto to_alloc = mm.translate_ptr(to_ptr);
auto from_alloc = mm.translate_ptr(from_ptr);
size_t to_offset = (byte_t *)to_ptr - to_alloc.alloc_ptr;
size_t from_offset = (byte_t *)from_ptr - from_alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto r = sycl::range<1>(size);
auto to_o = sycl::id<1>(to_offset);
auto from_o = sycl::id<1>(from_offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
to_acc(to_alloc.buffer, cgh, r, to_o);
sycl::accessor<byte_t, 1, sycl::access_mode::read,
sycl::access::target::device>
from_acc(from_alloc.buffer, cgh, r, from_o);
cgh.copy(from_acc, to_acc);
});
}
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
#else
return q.memcpy(to_ptr, from_ptr, size, dep_events);
#endif // DPCT_USM_LEVEL_NONE
}
// Get actual copy range and make sure it will not exceed range.
static inline size_t get_copy_range(sycl::range<3> size, size_t slice,
size_t pitch) {
return slice * (size.get(2) - 1) + pitch * (size.get(1) - 1) + size.get(0);
}
static inline size_t get_offset(sycl::id<3> id, size_t slice,
size_t pitch) {
return slice * id.get(2) + pitch * id.get(1) + id.get(0);
}
/// copy 3D matrix specified by \p size from 3D matrix specified by \p from_ptr
/// and \p from_range to another specified by \p to_ptr and \p to_range.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
sycl::range<3> to_range, sycl::range<3> from_range,
sycl::id<3> to_id, sycl::id<3> from_id,
sycl::range<3> size, memcpy_direction direction,
const std::vector<sycl::event> &dep_events = {}) {
// RAII for host pointer
class host_buffer {
void *_buf;
size_t _size;
sycl::queue &_q;
const std::vector<sycl::event> &_deps; // free operation depends
public:
host_buffer(size_t size, sycl::queue &q,
const std::vector<sycl::event> &deps)
: _buf(std::malloc(size)), _size(size), _q(q), _deps(deps) {}
void *get_ptr() const { return _buf; }
size_t get_size() const { return _size; }
~host_buffer() {
if (_buf) {
_q.submit([&](sycl::handler &cgh) {
cgh.depends_on(_deps);
cgh.host_task([buf = _buf] { std::free(buf); });
});
}
}
};
std::vector<sycl::event> event_list;
size_t to_slice = to_range.get(1) * to_range.get(0),
from_slice = from_range.get(1) * from_range.get(0);
unsigned char *to_surface =
(unsigned char *)to_ptr + get_offset(to_id, to_slice, to_range.get(0));
const unsigned char *from_surface =
(const unsigned char *)from_ptr +
get_offset(from_id, from_slice, from_range.get(0));
if (to_slice == from_slice && to_slice == size.get(1) * size.get(0)) {
return {dpct_memcpy(q, to_surface, from_surface, to_slice * size.get(2),
direction, dep_events)};
}
direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction);
size_t size_slice = size.get(1) * size.get(0);
switch (direction) {
case host_to_host:
for (size_t z = 0; z < size.get(2); ++z) {
unsigned char *to_ptr = to_surface;
const unsigned char *from_ptr = from_surface;
if (to_range.get(0) == from_range.get(0) &&
to_range.get(0) == size.get(0)) {
event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size_slice,
direction, dep_events));
} else {
for (size_t y = 0; y < size.get(1); ++y) {
event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size.get(0),
direction, dep_events));
to_ptr += to_range.get(0);
from_ptr += from_range.get(0);
}
}
to_surface += to_slice;
from_surface += from_slice;
}
break;
case host_to_device: {
host_buffer buf(get_copy_range(size, to_slice, to_range.get(0)), q,
event_list);
std::vector<sycl::event> host_events;
if (to_slice == size_slice) {
// Copy host data to a temp host buffer with the shape of target.
host_events =
dpct_memcpy(q, buf.get_ptr(), from_surface, to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size,
host_to_host, dep_events);
} else {
// Copy host data to a temp host buffer with the shape of target.
host_events = dpct_memcpy(
q, buf.get_ptr(), from_surface, to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host,
// If has padding data, not sure whether it is useless. So fill temp
// buffer with it.
std::vector<sycl::event>{
dpct_memcpy(q, buf.get_ptr(), to_surface, buf.get_size(),
device_to_host, dep_events)});
}
// Copy from temp host buffer to device with only one submit.
event_list.push_back(dpct_memcpy(q, to_surface, buf.get_ptr(),
buf.get_size(), host_to_device,
host_events));
break;
}
case device_to_host: {
host_buffer buf(get_copy_range(size, from_slice, from_range.get(0)), q,
event_list);
// Copy from host temp buffer to host target with reshaping.
event_list = dpct_memcpy(
q, to_surface, buf.get_ptr(), to_range, from_range, sycl::id<3>(0, 0, 0),
sycl::id<3>(0, 0, 0), size, host_to_host,
// Copy from device to temp host buffer with only one submit.
std::vector<sycl::event>{dpct_memcpy(q, buf.get_ptr(), from_surface,
buf.get_size(),
device_to_host, dep_events)});
break;
}
case device_to_device:
#ifdef DPCT_USM_LEVEL_NONE
{
auto &mm = mem_mgr::instance();
auto to_alloc = mm.translate_ptr(to_surface);
auto from_alloc = mm.translate_ptr(from_surface);
size_t to_offset = (byte_t *)to_surface - to_alloc.alloc_ptr;
size_t from_offset = (byte_t *)from_surface - from_alloc.alloc_ptr;
event_list.push_back(q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto to_o = sycl::id<1>(to_offset);
auto from_o = sycl::id<1>(from_offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
to_acc(to_alloc.buffer, cgh,
get_copy_range(size, to_slice, to_range.get(0)), to_o);
sycl::accessor<byte_t, 1, sycl::access_mode::read,
sycl::access::target::device>
from_acc(from_alloc.buffer, cgh,
get_copy_range(size, from_slice, from_range.get(0)), from_o);
cgh.parallel_for<class dpct_memcpy_3d_detail_usmnone>(
size,
[=](sycl::id<3> id) {
to_acc[get_offset(id, to_slice, to_range.get(0))] =
from_acc[get_offset(id, from_slice, from_range.get(0))];
});
}));
}
#else
event_list.push_back(q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
cgh.parallel_for<class dpct_memcpy_3d_detail>(
size,
[=](sycl::id<3> id) {
to_surface[get_offset(id, to_slice, to_range.get(0))] =
from_surface[get_offset(id, from_slice, from_range.get(0))];
});
}));
#endif
break;
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
return event_list;
}
/// memcpy 2D/3D matrix specified by pitched_data.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, pitched_data to, sycl::id<3> to_id,
pitched_data from, sycl::id<3> from_id, sycl::range<3> size,
memcpy_direction direction = automatic) {
return dpct_memcpy(q, to.get_data_ptr(), from.get_data_ptr(),
sycl::range<3>(to.get_pitch(), to.get_y(), 1),
sycl::range<3>(from.get_pitch(), from.get_y(), 1), to_id, from_id,
size, direction);
}
/// memcpy 2D matrix with pitch.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
size_t to_pitch, size_t from_pitch, size_t x, size_t y,
memcpy_direction direction = automatic) {
return dpct_memcpy(q, to_ptr, from_ptr, sycl::range<3>(to_pitch, y, 1),
sycl::range<3>(from_pitch, y, 1),
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0),
sycl::range<3>(x, y, 1), direction);
}
namespace deprecated {
template <typename T, sycl::usm::alloc AllocKind>
class usm_allocator {
private:
using Alloc = sycl::usm_allocator<T, AllocKind>;
Alloc _impl;
public:
using value_type = typename std::allocator_traits<Alloc>::value_type;
using pointer = typename std::allocator_traits<Alloc>::pointer;
using const_pointer = typename std::allocator_traits<Alloc>::const_pointer;
using void_pointer = typename std::allocator_traits<Alloc>::void_pointer;
using const_void_pointer =
typename std::allocator_traits<Alloc>::const_void_pointer;
using reference = typename std::allocator_traits<Alloc>::value_type &;
using const_reference =
const typename std::allocator_traits<Alloc>::value_type &;
using difference_type =
typename std::allocator_traits<Alloc>::difference_type;
using size_type = typename std::allocator_traits<Alloc>::size_type;
using propagate_on_container_copy_assignment = typename std::allocator_traits<
Alloc>::propagate_on_container_copy_assignment;
using propagate_on_container_move_assignment = typename std::allocator_traits<
Alloc>::propagate_on_container_move_assignment;
using propagate_on_container_swap =
typename std::allocator_traits<Alloc>::propagate_on_container_swap;
using is_always_equal =
typename std::allocator_traits<Alloc>::is_always_equal;
template <typename U> struct rebind {
typedef usm_allocator<U, AllocKind> other;
};
usm_allocator() : _impl(dpct::get_default_queue()) {}
~usm_allocator() {}
usm_allocator(const usm_allocator &other) : _impl(other._impl) {}
usm_allocator(usm_allocator &&other) : _impl(std::move(other._impl)) {}
pointer address(reference r) { return &r; }
const_pointer address(const_reference r) { return &r; }
pointer allocate(size_type cnt, const_void_pointer hint = nullptr) {
return std::allocator_traits<Alloc>::allocate(_impl, cnt, hint);
}
void deallocate(pointer p, size_type cnt) {
std::allocator_traits<Alloc>::deallocate(_impl, p, cnt);
}
size_type max_size() const {
return std::allocator_traits<Alloc>::max_size(_impl);
}
bool operator==(const usm_allocator &other) const { return _impl == other._impl; }
bool operator!=(const usm_allocator &other) const { return _impl != other._impl; }
};
} // namespace deprecated
inline void dpct_free(void *ptr,
const sycl::queue &q) {
if (ptr) {
#ifdef DPCT_USM_LEVEL_NONE
detail::mem_mgr::instance().mem_free(ptr);
#else
sycl::free(ptr, q.get_context());
#endif // DPCT_USM_LEVEL_NONE
}
}
} // namespace detail
#ifdef DPCT_USM_LEVEL_NONE
/// Check if the pointer \p ptr represents device pointer or not.
///
/// \param ptr The pointer to be checked.
/// \returns true if \p ptr is a device pointer.
template<class T>
static inline bool is_device_ptr(T ptr) {
if constexpr (std::is_pointer<T>::value) {
return detail::mem_mgr::instance().is_device_ptr(ptr);
}
return false;
}
#endif
/// Get the buffer and the offset of a piece of memory pointed to by \p ptr.
///
/// \param ptr Pointer to a piece of memory.
/// If NULL is passed as an argument, an exception will be thrown.
/// \returns a pair containing both the buffer and the offset.
static std::pair<buffer_t, size_t> get_buffer_and_offset(const void *ptr) {
if (ptr) {
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
size_t offset = (byte_t *)ptr - alloc.alloc_ptr;
return std::make_pair(alloc.buffer, offset);
} else {
throw std::runtime_error(
"NULL pointer argument in get_buffer_and_offset function is invalid");
}
}
/// Get the data pointed from \p ptr as a 1D buffer reinterpreted as type T.
template <typename T> static sycl::buffer<T> get_buffer(const void *ptr) {
if (!ptr)
return sycl::buffer<T>(sycl::range<1>(0));
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
return alloc.buffer.reinterpret<T>(
sycl::range<1>(alloc.size / sizeof(T)));
}
/// Get the buffer of a piece of memory pointed to by \p ptr.
///
/// \param ptr Pointer to a piece of memory.
/// \returns the buffer.
static buffer_t get_buffer(const void *ptr) {
return detail::mem_mgr::instance().translate_ptr(ptr).buffer;
}
/// A wrapper class contains an accessor and an offset.
template <typename dataT,
sycl::access_mode accessMode = sycl::access_mode::read_write>
class access_wrapper {
sycl::accessor<byte_t, 1, accessMode> accessor;
size_t offset;
public:
/// Construct the accessor wrapper for memory pointed by \p ptr.
///
/// \param ptr Pointer to memory.
/// \param cgh The command group handler.
access_wrapper(const void *ptr, sycl::handler &cgh)
: accessor(get_buffer(ptr).get_access<accessMode>(cgh)), offset(0) {
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
offset = (byte_t *)ptr - alloc.alloc_ptr;
}
/// Get the device pointer.
///
/// \returns a device pointer with offset.
dataT get_raw_pointer() const { return (dataT)(&accessor[0] + offset); }
};
/// Get the accessor for memory pointed by \p ptr.
///
/// \param ptr Pointer to memory.
/// If NULL is passed as an argument, an exception will be thrown.
/// \param cgh The command group handler.
/// \returns an accessor.
template <sycl::access_mode accessMode = sycl::access_mode::read_write>
static sycl::accessor<byte_t, 1, accessMode>
get_access(const void *ptr, sycl::handler &cgh) {
if (ptr) {
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
return alloc.buffer.get_access<accessMode>(cgh);
} else {
throw std::runtime_error(
"NULL pointer argument in get_access function is invalid");
}
}
/// Allocate memory block on the device.
/// \param num_bytes Number of bytes to allocate.
/// \param q Queue to execute the allocate task.
/// \returns A pointer to the newly allocated memory.
template <typename T>
static inline void *dpct_malloc(T num_bytes,
sycl::queue &q = get_default_queue()) {
return detail::dpct_malloc(static_cast<size_t>(num_bytes), q);
}
/// Get the host pointer from a buffer that is mapped to virtual pointer ptr.
/// \param ptr Virtual Pointer mapped to device buffer
/// \returns A host pointer
template <typename T> static inline T *get_host_ptr(const void *ptr) {
auto BufferOffset = get_buffer_and_offset(ptr);
auto host_ptr =
BufferOffset.first.get_host_access()
.get_pointer();
return (T *)(host_ptr + BufferOffset.second);
}
/// Allocate memory block for 3D array on the device.
/// \param size Size of the memory block, in bytes.
/// \param q Queue to execute the allocate task.
/// \returns A pitched_data object which stores the memory info.
static inline pitched_data
dpct_malloc(sycl::range<3> size, sycl::queue &q = get_default_queue()) {
pitched_data pitch(nullptr, 0, size.get(0), size.get(1));
size_t pitch_size;
pitch.set_data_ptr(detail::dpct_malloc(pitch_size, size.get(0), size.get(1),
size.get(2), q));
pitch.set_pitch(pitch_size);
return pitch;
}
/// Allocate memory block for 2D array on the device.
/// \param [out] pitch Aligned size of x in bytes.
/// \param x Range in dim x.
/// \param y Range in dim y.
/// \param q Queue to execute the allocate task.
/// \returns A pointer to the newly allocated memory.
static inline void *dpct_malloc(size_t &pitch, size_t x, size_t y,
sycl::queue &q = get_default_queue()) {
return detail::dpct_malloc(pitch, x, y, 1, q);
}
/// free
/// \param ptr Point to free.
/// \param q Queue to execute the free task.
/// \returns no return value.
static inline void dpct_free(void *ptr,
sycl::queue &q = get_default_queue()) {
detail::dpct_free(ptr, q);
}
/// Free the device memory pointed by a batch of pointers in \p pointers which
/// are related to \p q after \p events completed.
///
/// \param pointers The pointers point to the device memory requested to be freed.
/// \param events The events to be waited.
/// \param q The sycl::queue the memory relates to.
inline void async_dpct_free(const std::vector<void *> &pointers,
const std::vector<sycl::event> &events,
sycl::queue &q = get_default_queue()) {
q.submit([&](sycl::handler &cgh) {
cgh.depends_on(events);
cgh.host_task([=] {
for (auto p : pointers)
if (p) {
detail::dpct_free(p, q);
}
});
});
}
/// Synchronously copies \p size bytes from the address specified by \p from_ptr
/// to the address specified by \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device,
/// \a device_to_host, \a device_to_device or \a automatic. The function will
/// return after the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param from_ptr Pointer to source memory address.
/// \param size Number of bytes to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static void dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction).wait();
}
/// Asynchronously copies \p size bytes from the address specified by \p
/// from_ptr to the address specified by \p to_ptr. The value of \p direction is
/// used to set the copy direction, it can be \a host_to_host, \a
/// host_to_device, \a device_to_host, \a device_to_device or \a automatic. The
/// return of the function does NOT guarantee the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param from_ptr Pointer to source memory address.
/// \param size Number of bytes to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static void async_dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction);
}
/// Synchronously copies 2D matrix specified by \p x and \p y from the address
/// specified by \p from_ptr to the address specified by \p to_ptr, while \p
/// from_pitch and \p to_pitch are the range of dim x in bytes of the matrix
/// specified by \p from_ptr and \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device, \a
/// device_to_host, \a device_to_device or \a automatic. The function will
/// return after the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param to_pitch Range of dim x in bytes of destination matrix.
/// \param from_ptr Pointer to source memory address.
/// \param from_pitch Range of dim x in bytes of source matrix.
/// \param x Range of dim x of matrix to be copied.
/// \param y Range of dim y of matrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void dpct_memcpy(void *to_ptr, size_t to_pitch,
const void *from_ptr, size_t from_pitch,
size_t x, size_t y,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
sycl::event::wait(detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch,
from_pitch, x, y, direction));
}
/// Asynchronously copies 2D matrix specified by \p x and \p y from the address
/// specified by \p from_ptr to the address specified by \p to_ptr, while \p
/// \p from_pitch and \p to_pitch are the range of dim x in bytes of the matrix
/// specified by \p from_ptr and \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device, \a
/// device_to_host, \a device_to_device or \a automatic. The return of the
/// function does NOT guarantee the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param to_pitch Range of dim x in bytes of destination matrix.
/// \param from_ptr Pointer to source memory address.
/// \param from_pitch Range of dim x in bytes of source matrix.
/// \param x Range of dim x of matrix to be copied.
/// \param y Range of dim y of matrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void
async_dpct_memcpy(void *to_ptr, size_t to_pitch, const void *from_ptr,
size_t from_pitch, size_t x, size_t y,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch, from_pitch, x, y,
direction);
}
/// Synchronously copies a subset of a 3D matrix specified by \p to to another
/// 3D matrix specified by \p from. The from and to position info are specified
/// by \p from_pos and \p to_pos The copied matrix size is specified by \p size.
/// The value of \p direction is used to set the copy direction, it can be \a
/// host_to_host, \a host_to_device, \a device_to_host, \a device_to_device or
/// \a automatic. The function will return after the copy is completed.
///
/// \param to Destination matrix info.
/// \param to_pos Position of destination.
/// \param from Source matrix info.
/// \param from_pos Position of destination.
/// \param size Range of the submatrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void dpct_memcpy(pitched_data to, sycl::id<3> to_pos,
pitched_data from, sycl::id<3> from_pos,
sycl::range<3> size,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
sycl::event::wait(
detail::dpct_memcpy(q, to, to_pos, from, from_pos, size, direction));
}
/// Asynchronously copies a subset of a 3D matrix specified by \p to to another
/// 3D matrix specified by \p from. The from and to position info are specified
/// by \p from_pos and \p to_pos The copied matrix size is specified by \p size.
/// The value of \p direction is used to set the copy direction, it can be \a
/// host_to_host, \a host_to_device, \a device_to_host, \a device_to_device or
/// \a automatic. The return of the function does NOT guarantee the copy is
/// completed.
///
/// \param to Destination matrix info.
/// \param to_pos Position of destination.
/// \param from Source matrix info.
/// \param from_pos Position of destination.
/// \param size Range of the submatrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void
async_dpct_memcpy(pitched_data to, sycl::id<3> to_pos, pitched_data from,
sycl::id<3> from_pos, sycl::range<3> size,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to, to_pos, from, from_pos, size, direction);
}
/// Synchronously sets \p value to the first \p size bytes starting from \p
/// dev_ptr. The function will return after the memset operation is completed.
///
/// \param dev_ptr Pointer to the device memory address.
/// \param value Value to be set.
/// \param size Number of bytes to be set to the value.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static void dpct_memset(void *dev_ptr, int value, size_t size,
sycl::queue &q = get_default_queue()) {
detail::dpct_memset(q, dev_ptr, value, size).wait();
}
/// Asynchronously sets \p value to the first \p size bytes starting from \p
/// dev_ptr. The return of the function does NOT guarantee the memset operation
/// is completed.
///
/// \param dev_ptr Pointer to the device memory address.
/// \param value Value to be set.
/// \param size Number of bytes to be set to the value.
/// \returns no return value.
static void async_dpct_memset(void *dev_ptr, int value, size_t size,
sycl::queue &q = dpct::get_default_queue()) {
detail::dpct_memset(q, dev_ptr, value, size);
}
/// Sets \p value to the 2D memory region pointed by \p ptr in \p q. \p x and
/// \p y specify the setted 2D memory size. \p pitch is the bytes in linear
/// dimension, including padding bytes. The function will return after the
/// memset operation is completed.
///
/// \param ptr Pointer to the device memory region.
/// \param pitch Bytes in linear dimension, including padding bytes.
/// \param value Value to be set.
/// \param x The setted memory size in linear dimension.
/// \param y The setted memory size in second dimension.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void dpct_memset(void *ptr, size_t pitch, int val, size_t x,
size_t y,
sycl::queue &q = get_default_queue()) {
sycl::event::wait(detail::dpct_memset(q, ptr, pitch, val, x, y));
}
/// Sets \p value to the 2D memory region pointed by \p ptr in \p q. \p x and
/// \p y specify the setted 2D memory size. \p pitch is the bytes in linear
/// dimension, including padding bytes. The return of the function does NOT
/// guarantee the memset operation is completed.
///
/// \param ptr Pointer to the device memory region.
/// \param pitch Bytes in linear dimension, including padding bytes.
/// \param value Value to be set.
/// \param x The setted memory size in linear dimension.
/// \param y The setted memory size in second dimension.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void async_dpct_memset(void *ptr, size_t pitch, int val, size_t x,
size_t y,
sycl::queue &q = get_default_queue()) {
detail::dpct_memset(q, ptr, pitch, val, x, y);
}
/// Sets \p value to the 3D memory region specified by \p pitch in \p q. \p size
/// specify the setted 3D memory size. The function will return after the
/// memset operation is completed.
///
/// \param pitch Specify the 3D memory region.
/// \param value Value to be set.
/// \param size The setted 3D memory size.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void dpct_memset(pitched_data pitch, int val,
sycl::range<3> size,
sycl::queue &q = get_default_queue()) {
sycl::event::wait(detail::dpct_memset(q, pitch, val, size));
}
/// Sets \p value to the 3D memory region specified by \p pitch in \p q. \p size
/// specify the setted 3D memory size. The return of the function does NOT
/// guarantee the memset operation is completed.
///
/// \param pitch Specify the 3D memory region.
/// \param value Value to be set.
/// \param size The setted 3D memory size.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void async_dpct_memset(pitched_data pitch, int val,
sycl::range<3> size,
sycl::queue &q = get_default_queue()) {
detail::dpct_memset(q, pitch, val, size);
}
/// dpct accessor used as device function parameter.
template <class T, memory_region Memory, size_t Dimension> class accessor;
template <class T, memory_region Memory> class accessor<T, Memory, 3> {
public:
using memory_t = detail::memory_traits<Memory, T>;
using element_t = typename memory_t::element_t;
using pointer_t = typename memory_t::pointer_t;
using accessor_t = typename memory_t::template accessor_t<3>;
accessor(pointer_t data, const sycl::range<3> &in_range)
: _data(data), _range(in_range) {}
template <memory_region M = Memory>
accessor(typename std::enable_if<M != local, const accessor_t>::type &acc)
: accessor(acc, acc.get_range()) {}
accessor(const accessor_t &acc, const sycl::range<3> &in_range)
: accessor(acc.get_pointer(), in_range) {}
accessor<T, Memory, 2> operator[](size_t index) const {
sycl::range<2> sub(_range.get(1), _range.get(2));
return accessor<T, Memory, 2>(_data + index * sub.size(), sub);
}
pointer_t get_ptr() const { return _data; }
private:
pointer_t _data;
sycl::range<3> _range;
};
template <class T, memory_region Memory> class accessor<T, Memory, 2> {
public:
using memory_t = detail::memory_traits<Memory, T>;
using element_t = typename memory_t::element_t;
using pointer_t = typename memory_t::pointer_t;
using accessor_t = typename memory_t::template accessor_t<2>;
accessor(pointer_t data, const sycl::range<2> &in_range)
: _data(data), _range(in_range) {}
template <memory_region M = Memory>
accessor(typename std::enable_if<M != local, const accessor_t>::type &acc)
: accessor(acc, acc.get_range()) {}
accessor(const accessor_t &acc, const sycl::range<2> &in_range)
: accessor(acc.get_pointer(), in_range) {}
pointer_t operator[](size_t index) const {
return _data + _range.get(1) * index;
}
pointer_t get_ptr() const { return _data; }
private:
pointer_t _data;
sycl::range<2> _range;
};
namespace detail {
/// Device variable with address space of shared, global or constant.
template <class T, memory_region Memory, size_t Dimension>
class device_memory {
public:
using accessor_t =
typename detail::memory_traits<Memory, T>::template accessor_t<Dimension>;
using value_t = typename detail::memory_traits<Memory, T>::value_t;
using dpct_accessor_t = dpct::accessor<T, Memory, Dimension>;
device_memory() : device_memory(sycl::range<Dimension>(1)) {}
/// Constructor of 1-D array with initializer list
device_memory(
const sycl::range<Dimension> &in_range,
std::initializer_list<value_t> &&init_list)
: device_memory(in_range) {
assert(init_list.size() <= in_range.size());
_host_ptr = (value_t *)std::malloc(_size);
std::memset(_host_ptr, 0, _size);
std::memcpy(_host_ptr, init_list.begin(), init_list.size() * sizeof(T));
}
/// Constructor of 2-D array with initializer list
template <size_t D = Dimension>
device_memory(
const typename std::enable_if<D == 2, sycl::range<2>>::type &in_range,
std::initializer_list<std::initializer_list<value_t>> &&init_list)
: device_memory(in_range) {
assert(init_list.size() <= in_range[0]);
_host_ptr = (value_t *)std::malloc(_size);
std::memset(_host_ptr, 0, _size);
auto tmp_data = _host_ptr;
for (auto sub_list : init_list) {
assert(sub_list.size() <= in_range[1]);
std::memcpy(tmp_data, sub_list.begin(), sub_list.size() * sizeof(T));
tmp_data += in_range[1];
}
}
/// Constructor with range
device_memory(const sycl::range<Dimension> &range_in)
: _size(range_in.size() * sizeof(T)), _range(range_in), _reference(false),
_host_ptr(nullptr), _device_ptr(nullptr) {
static_assert(
(Memory == global) || (Memory == constant) || (Memory == shared),
"device memory region should be global, constant or shared");
// Make sure that singleton class mem_mgr and dev_mgr will destruct later
// than this.
detail::mem_mgr::instance();
dev_mgr::instance();
}
/// Constructor with range
template <class... Args>
device_memory(Args... Arguments)
: device_memory(sycl::range<Dimension>(Arguments...)) {}
~device_memory() {
if (_device_ptr && !_reference)
dpct::dpct_free(_device_ptr);
if (_host_ptr)
std::free(_host_ptr);
}
/// Allocate memory with default queue, and init memory if has initial value.
void init() {
init(dpct::get_default_queue());
}
/// Allocate memory with specified queue, and init memory if has initial value.
void init(sycl::queue &q) {
if (_device_ptr)
return;
if (!_size)
return;
allocate_device(q);
if (_host_ptr)
detail::dpct_memcpy(q, _device_ptr, _host_ptr, _size, host_to_device);
}
/// The variable is assigned to a device pointer.
void assign(value_t *src, size_t size) {
this->~device_memory();
new (this) device_memory(src, size);
}
/// Get memory pointer of the memory object, which is virtual pointer when
/// usm is not used, and device pointer when usm is used.
value_t *get_ptr() {
return get_ptr(get_default_queue());
}
/// Get memory pointer of the memory object, which is virtual pointer when
/// usm is not used, and device pointer when usm is used.
value_t *get_ptr(sycl::queue &q) {
init(q);
return _device_ptr;
}
/// Get the device memory object size in bytes.
size_t get_size() { return _size; }
template <size_t D = Dimension>
typename std::enable_if<D == 1, T>::type &operator[](size_t index) {
init();
#ifdef DPCT_USM_LEVEL_NONE
return dpct::get_buffer<typename std::enable_if<D == 1, T>::type>(
_device_ptr)
.template get_access<sycl::access_mode::read_write>()[index];
#else
return _device_ptr[index];
#endif // DPCT_USM_LEVEL_NONE
}
#ifdef DPCT_USM_LEVEL_NONE
/// Get sycl::accessor for the device memory object when usm is not used.
accessor_t get_access(sycl::handler &cgh) {
return get_buffer(_device_ptr)
.template reinterpret<T, Dimension>(_range)
.template get_access<detail::memory_traits<Memory, T>::mode,
detail::memory_traits<Memory, T>::target>(cgh);
}
#else
/// Get dpct::accessor with dimension info for the device memory object
/// when usm is used and dimension is greater than 1.
template <size_t D = Dimension>
typename std::enable_if<D != 1, dpct_accessor_t>::type
get_access(sycl::handler &cgh) {
return dpct_accessor_t((T *)_device_ptr, _range);
}
#endif // DPCT_USM_LEVEL_NONE
private:
device_memory(value_t *memory_ptr, size_t size)
: _size(size), _range(size / sizeof(T)), _reference(true),
_device_ptr(memory_ptr) {}
void allocate_device(sycl::queue &q) {
#ifndef DPCT_USM_LEVEL_NONE
if (Memory == shared) {
_device_ptr = (value_t *)sycl::malloc_shared(
_size, q.get_device(), q.get_context());
return;
}
#endif
_device_ptr = (value_t *)detail::dpct_malloc(_size, q);
}
size_t _size;
sycl::range<Dimension> _range;
bool _reference;
value_t *_host_ptr;
value_t *_device_ptr;
};
template <class T, memory_region Memory>
class device_memory<T, Memory, 0> : public device_memory<T, Memory, 1> {
public:
using base = device_memory<T, Memory, 1>;
using value_t = typename base::value_t;
using accessor_t =
typename detail::memory_traits<Memory, T>::template accessor_t<0>;
/// Constructor with initial value.
device_memory(const value_t &val) : base(sycl::range<1>(1), {val}) {}
/// Default constructor
device_memory() : base(1) {}
#ifdef DPCT_USM_LEVEL_NONE
/// Get sycl::accessor for the device memory object when usm is not used.
accessor_t get_access(sycl::handler &cgh) {
auto buf = get_buffer(base::get_ptr())
.template reinterpret<T, 1>(sycl::range<1>(1));
return accessor_t(buf, cgh);
}
#endif // DPCT_USM_LEVEL_NONE
};
}
template <class T, size_t Dimension>
using global_memory = detail::device_memory<T, global, Dimension>;
template <class T, size_t Dimension>
using constant_memory = detail::device_memory<T, constant, Dimension>;
template <class T, size_t Dimension>
using shared_memory = detail::device_memory<T, shared, Dimension>;
// dpct::deprecated:: is for functionality that was introduced for compatibility
// purpose, but relies on deprecated C++ features, which are either removed or
// will be removed in the future standards.
// Direct use of deprecated functionality in this namespace should be avoided.
namespace deprecated {
template <typename T>
using usm_host_allocator = detail::deprecated::usm_allocator<T, sycl::usm::alloc::host>;
template <typename T>
using usm_device_allocator = detail::deprecated::usm_allocator<T, sycl::usm::alloc::shared>;
} // namespace deprecated
class pointer_attributes {
public:
void init(const void *ptr,
sycl::queue &q = dpct::get_default_queue()) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error(
"dpct::pointer_attributes: only works for USM pointer.");
#else
memory_type = sycl::get_pointer_type(ptr, q.get_context());
device_pointer = (memory_type !=
sycl::usm::alloc::unknown) ? ptr : nullptr;
host_pointer = (memory_type !=
sycl::usm::alloc::unknown) &&
(memory_type != sycl::usm::alloc::device) ? ptr : nullptr;
sycl::device device_obj = sycl::get_pointer_device(ptr, q.get_context());
device_id = dpct::dev_mgr::instance().get_device_id(device_obj);
#endif
}
sycl::usm::alloc get_memory_type() {
return memory_type;
}
const void *get_device_pointer() {
return device_pointer;
}
const void *get_host_pointer() {
return host_pointer;
}
bool is_memory_shared() {
return memory_type == sycl::usm::alloc::shared;
}
unsigned int get_device_id() {
return device_id;
}
private:
sycl::usm::alloc memory_type = sycl::usm::alloc::unknown;
const void *device_pointer = nullptr;
const void *host_pointer = nullptr;
unsigned int device_id = 0;
};
} // namespace dpct
#endif // __DPCT_MEMORY_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/dpl_utils.hpp | //==---- dpl_utils.hpp ----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_DPL_UTILS_HPP__
#define __DPCT_DPL_UTILS_HPP__
#define ONEDPL_USE_DPCPP_BACKEND 1
#define __USE_DPCT 1
#include <oneapi/dpl/execution>
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/numeric>
#include "dpl_extras/memory.h"
#include "dpl_extras/algorithm.h"
#include "dpl_extras/numeric.h"
#include "dpl_extras/iterators.h"
#include "dpl_extras/vector.h"
#include "dpl_extras/dpcpp_extensions.h"
#endif // __DPCT_DPL_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/math.hpp | //==---- math.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MATH_HPP__
#define __DPCT_MATH_HPP__
#include <sycl/sycl.hpp>
namespace dpct {
namespace detail {
template <typename VecT, class BinaryOperation, class = void>
class vectorized_binary {
public:
inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op) {
VecT v4;
for (size_t i = 0; i < v4.size(); ++i) {
v4[i] = binary_op(a[i], b[i]);
}
return v4;
}
};
template <typename VecT, class BinaryOperation>
class vectorized_binary<
VecT, BinaryOperation,
std::void_t<std::invoke_result_t<BinaryOperation, VecT, VecT>>> {
public:
inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op) {
return binary_op(a, b).template as<VecT>();
}
};
template <typename T> bool isnan(const T a) { return sycl::isnan(a); }
// TODO: Need add more specialization such as bfloat16 version.
} // namespace detail
/// Compute fast_length for variable-length array
/// \param [in] a The array
/// \param [in] len Length of the array
/// \returns The computed fast_length
inline float fast_length(const float *a, int len) {
switch (len) {
case 1:
return a[0];
case 2:
return sycl::fast_length(sycl::float2(a[0], a[1]));
case 3:
return sycl::fast_length(sycl::float3(a[0], a[1], a[2]));
case 4:
return sycl::fast_length(sycl::float4(a[0], a[1], a[2], a[3]));
case 0:
return 0;
default:
float f = 0;
for (int i = 0; i < len; ++i)
f += a[i] * a[i];
return sycl::sqrt(f);
}
}
/// Calculate the square root of the input array.
/// \param [in] a The array pointer
/// \param [in] len Length of the array
/// \returns The square root
template <typename T> inline T length(const T *a, const int len) {
switch (len) {
case 1:
return a[0];
case 2:
return sycl::length(sycl::vec<T, 2>(a[0], a[1]));
case 3:
return sycl::length(sycl::vec<T, 3>(a[0], a[1], a[2]));
case 4:
return sycl::length(sycl::vec<T, 4>(a[0], a[1], a[2], a[3]));
default:
T ret = 0;
for (int i = 0; i < len; ++i)
ret += a[i] * a[i];
return sycl::sqrt(ret);
}
}
/// Performs comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<
std::is_same_v<std::invoke_result_t<BinaryOperation, T, T>, bool>, bool>
compare(const T a, const T b, const BinaryOperation binary_op) {
return binary_op(a, b);
}
template <typename T>
inline std::enable_if_t<
std::is_same_v<std::invoke_result_t<std::not_equal_to<>, T, T>, bool>, bool>
compare(const T a, const T b, const std::not_equal_to<> binary_op) {
return !detail::isnan(a) && !detail::isnan(b) && binary_op(a, b);
}
/// Performs unordered comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<
std::is_same_v<std::invoke_result_t<BinaryOperation, T, T>, bool>, bool>
unordered_compare(const T a, const T b, const BinaryOperation binary_op) {
return detail::isnan(a) || detail::isnan(b) || binary_op(a, b);
}
/// Performs 2 element comparison and return true if both results are true.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, bool>
compare_both(const T a, const T b, const BinaryOperation binary_op) {
return compare(a[0], b[0], binary_op) && compare(a[1], b[1], binary_op);
}
/// Performs 2 element unordered comparison and return true if both results are
/// true.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, bool>
unordered_compare_both(const T a, const T b, const BinaryOperation binary_op) {
return unordered_compare(a[0], b[0], binary_op) &&
unordered_compare(a[1], b[1], binary_op);
}
/// Performs 2 element comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, T>
compare(const T a, const T b, const BinaryOperation binary_op) {
return {compare(a[0], b[0], binary_op), compare(a[1], b[1], binary_op)};
}
/// Performs 2 elements comparison, compare result of each element is 0 (false)
/// or 0xffff (true), returns an unsigned int by composing compare result of two
/// elements.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline unsigned compare_mask(const sycl::vec<T, 2> a, const sycl::vec<T, 2> b,
const BinaryOperation binary_op) {
return sycl::vec<short, 2>(-compare(a[0], b[0], binary_op),
-compare(a[1], b[1], binary_op))
.as<sycl::vec<unsigned, 1>>();
}
/// Performs 2 element unordered comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, T>
unordered_compare(const T a, const T b, const BinaryOperation binary_op) {
return {unordered_compare(a[0], b[0], binary_op),
unordered_compare(a[1], b[1], binary_op)};
}
/// Performs 2 elements unordered comparison, compare result of each element is
/// 0 (false) or 0xffff (true), returns an unsigned int by composing compare
/// result of two elements.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline unsigned unordered_compare_mask(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b,
const BinaryOperation binary_op) {
return sycl::vec<short, 2>(-unordered_compare(a[0], b[0], binary_op),
-unordered_compare(a[1], b[1], binary_op))
.as<sycl::vec<unsigned, 1>>();
}
/// Determine whether 2 element value is NaN.
/// \param [in] a The input value
/// \returns the comparison result
template <typename T>
inline std::enable_if_t<T::size() == 2, T> isnan(const T a) {
return {detail::isnan(a[0]), detail::isnan(a[1])};
}
// min function overloads.
// For floating-point types, `float` or `double` arguments are acceptable.
// For integer types, `std::uint32_t`, `std::int32_t`, `std::uint64_t` or
// `std::int64_t` type arguments are acceptable.
inline double min(const double a, const float b) {
return sycl::fmin(a, static_cast<double>(b));
}
inline double min(const float a, const double b) {
return sycl::fmin(static_cast<double>(a), b);
}
inline float min(const float a, const float b) { return sycl::fmin(a, b); }
inline double min(const double a, const double b) { return sycl::fmin(a, b); }
inline std::uint32_t min(const std::uint32_t a, const std::int32_t b) {
return sycl::min(a, static_cast<std::uint32_t>(b));
}
inline std::uint32_t min(const std::int32_t a, const std::uint32_t b) {
return sycl::min(static_cast<std::uint32_t>(a), b);
}
inline std::int32_t min(const std::int32_t a, const std::int32_t b) {
return sycl::min(a, b);
}
inline std::uint32_t min(const std::uint32_t a, const std::uint32_t b) {
return sycl::min(a, b);
}
inline std::uint64_t min(const std::uint64_t a, const std::int64_t b) {
return sycl::min(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t min(const std::int64_t a, const std::uint64_t b) {
return sycl::min(static_cast<std::uint64_t>(a), b);
}
inline std::int64_t min(const std::int64_t a, const std::int64_t b) {
return sycl::min(a, b);
}
inline std::uint64_t min(const std::uint64_t a, const std::uint64_t b) {
return sycl::min(a, b);
}
inline std::uint64_t min(const std::uint64_t a, const std::int32_t b) {
return sycl::min(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t min(const std::int32_t a, const std::uint64_t b) {
return sycl::min(static_cast<std::uint64_t>(a), b);
}
inline std::uint64_t min(const std::uint64_t a, const std::uint32_t b) {
return sycl::min(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t min(const std::uint32_t a, const std::uint64_t b) {
return sycl::min(static_cast<std::uint64_t>(a), b);
}
// max function overloads.
// For floating-point types, `float` or `double` arguments are acceptable.
// For integer types, `std::uint32_t`, `std::int32_t`, `std::uint64_t` or
// `std::int64_t` type arguments are acceptable.
inline double max(const double a, const float b) {
return sycl::fmax(a, static_cast<double>(b));
}
inline double max(const float a, const double b) {
return sycl::fmax(static_cast<double>(a), b);
}
inline float max(const float a, const float b) { return sycl::fmax(a, b); }
inline double max(const double a, const double b) { return sycl::fmax(a, b); }
inline std::uint32_t max(const std::uint32_t a, const std::int32_t b) {
return sycl::max(a, static_cast<std::uint32_t>(b));
}
inline std::uint32_t max(const std::int32_t a, const std::uint32_t b) {
return sycl::max(static_cast<std::uint32_t>(a), b);
}
inline std::int32_t max(const std::int32_t a, const std::int32_t b) {
return sycl::max(a, b);
}
inline std::uint32_t max(const std::uint32_t a, const std::uint32_t b) {
return sycl::max(a, b);
}
inline std::uint64_t max(const std::uint64_t a, const std::int64_t b) {
return sycl::max(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t max(const std::int64_t a, const std::uint64_t b) {
return sycl::max(static_cast<std::uint64_t>(a), b);
}
inline std::int64_t max(const std::int64_t a, const std::int64_t b) {
return sycl::max(a, b);
}
inline std::uint64_t max(const std::uint64_t a, const std::uint64_t b) {
return sycl::max(a, b);
}
inline std::uint64_t max(const std::uint64_t a, const std::int32_t b) {
return sycl::max(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t max(const std::int32_t a, const std::uint64_t b) {
return sycl::max(static_cast<std::uint64_t>(a), b);
}
inline std::uint64_t max(const std::uint64_t a, const std::uint32_t b) {
return sycl::max(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t max(const std::uint32_t a, const std::uint64_t b) {
return sycl::max(static_cast<std::uint64_t>(a), b);
}
/// Performs relu saturation.
/// \param [in] a The input value
/// \returns the relu saturation result
template <typename T> inline T relu(const T a) {
if (!detail::isnan(a) && a < 0.f)
return 0.f;
return a;
}
template <class T> inline sycl::vec<T, 2> relu(const sycl::vec<T, 2> a) {
return {relu(a[0]), relu(a[1])};
}
/// Performs complex number multiply addition.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] c The third value
/// \returns the operation result
template <typename T>
inline sycl::vec<T, 2> complex_mul_add(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b,
const sycl::vec<T, 2> c) {
return sycl::vec<T, 2>{a[0] * b[0] - a[1] * b[1] + c[0],
a[0] * b[1] + a[1] * b[0] + c[1]};
}
/// Performs 2 elements comparison and returns the bigger one. If either of
/// inputs is NaN, then return NaN.
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns the bigger value
template <typename T> inline T fmax_nan(const T a, const T b) {
if (detail::isnan(a) || detail::isnan(b))
return NAN;
return sycl::fmax(a, b);
}
template <typename T>
inline sycl::vec<T, 2> fmax_nan(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b) {
return {fmax_nan(a[0], b[0]), fmax_nan(a[1], b[1])};
}
/// Performs 2 elements comparison and returns the smaller one. If either of
/// inputs is NaN, then return NaN.
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns the smaller value
template <typename T> inline T fmin_nan(const T a, const T b) {
if (detail::isnan(a) || detail::isnan(b))
return NAN;
return sycl::fmin(a, b);
}
template <typename T>
inline sycl::vec<T, 2> fmin_nan(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b) {
return {fmin_nan(a[0], b[0]), fmin_nan(a[1], b[1])};
}
/// A sycl::abs wrapper functors.
struct abs {
template <typename T> auto operator()(const T x) const {
return sycl::abs(x);
}
};
/// A sycl::abs_diff wrapper functors.
struct abs_diff {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::abs_diff(x, y);
}
};
/// A sycl::add_sat wrapper functors.
struct add_sat {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::add_sat(x, y);
}
};
/// A sycl::rhadd wrapper functors.
struct rhadd {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::rhadd(x, y);
}
};
/// A sycl::hadd wrapper functors.
struct hadd {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::hadd(x, y);
}
};
/// A sycl::max wrapper functors.
struct maximum {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::max(x, y);
}
};
/// A sycl::min wrapper functors.
struct minimum {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::min(x, y);
}
};
/// A sycl::sub_sat wrapper functors.
struct sub_sat {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::sub_sat(x, y);
}
};
/// Compute vectorized binary operation value for two values, with each value
/// treated as a vector type \p VecT.
/// \tparam [in] VecT The type of the vector
/// \tparam [in] BinaryOperation The binary operation class
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized binary operation value of the two values
template <typename VecT, class BinaryOperation>
inline unsigned vectorized_binary(unsigned a, unsigned b,
const BinaryOperation binary_op) {
sycl::vec<unsigned, 1> v0{a}, v1{b};
auto v2 = v0.as<VecT>();
auto v3 = v1.as<VecT>();
auto v4 =
detail::vectorized_binary<VecT, BinaryOperation>()(v2, v3, binary_op);
v0 = v4.template as<sycl::vec<unsigned, 1>>();
return v0;
}
/// Compute vectorized isgreater for two values, with each value treated as a
/// vector type \p S.
/// \tparam [in] S The type of the vector
/// \tparam [in] T The type of the original values
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized greater than of the two values
template <typename S, typename T> inline T vectorized_isgreater(T a, T b) {
sycl::vec<T, 1> v0{a}, v1{b};
auto v2 = v0.template as<S>();
auto v3 = v1.template as<S>();
auto v4 = v2 > v3;
v0 = v4.template as<sycl::vec<T, 1>>();
return v0;
}
/// Compute vectorized max for two values, with each value treated as a vector
/// type \p S.
/// \tparam [in] S The type of the vector
/// \tparam [in] T The type of the original values
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized max of the two values
template <typename S, typename T> inline T vectorized_max(T a, T b) {
sycl::vec<T, 1> v0{a}, v1{b};
auto v2 = v0.template as<S>();
auto v3 = v1.template as<S>();
auto v4 = sycl::max(v2, v3);
v0 = v4.template as<sycl::vec<T, 1>>();
return v0;
}
/// Compute vectorized min for two values, with each value treated as a vector
/// type \p S.
/// \tparam [in] S The type of the vector
/// \tparam [in] T The type of the original values
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized min of the two values
template <typename S, typename T> inline T vectorized_min(T a, T b) {
sycl::vec<T, 1> v0{a}, v1{b};
auto v2 = v0.template as<S>();
auto v3 = v1.template as<S>();
auto v4 = sycl::min(v2, v3);
v0 = v4.template as<sycl::vec<T, 1>>();
return v0;
}
/// Compute vectorized unary operation for a value, with the value treated as a
/// vector type \p VecT.
/// \tparam [in] VecT The type of the vector
/// \tparam [in] UnaryOperation The unary operation class
/// \param [in] a The input value
/// \returns The vectorized unary operation value of the input value
template <typename VecT, class UnaryOperation>
inline unsigned vectorized_unary(unsigned a, const UnaryOperation unary_op) {
sycl::vec<unsigned, 1> v0{a};
auto v1 = v0.as<VecT>();
auto v2 = unary_op(v1);
v0 = v2.template as<sycl::vec<unsigned, 1>>();
return v0;
}
/// Compute vectorized absolute difference for two values without modulo
/// overflow, with each value treated as a vector type \p VecT.
/// \tparam [in] VecT The type of the vector
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized absolute difference of the two values
template <typename VecT>
inline unsigned vectorized_sum_abs_diff(unsigned a, unsigned b) {
sycl::vec<unsigned, 1> v0{a}, v1{b};
auto v2 = v0.as<VecT>();
auto v3 = v1.as<VecT>();
auto v4 = sycl::abs_diff(v2, v3);
unsigned sum = 0;
for (size_t i = 0; i < v4.size(); ++i) {
sum += v4[i];
}
return sum;
}
} // namespace dpct
#endif // __DPCT_MATH_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/blas_utils.hpp | //==---- blas_utils.hpp----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_BLAS_UTILS_HPP__
#define __DPCT_BLAS_UTILS_HPP__
#include "memory.hpp"
#include "util.hpp"
#include "lib_common_utils.hpp"
#include <sycl/sycl.hpp>
#include <oneapi/mkl.hpp>
#include <utility>
#include <vector>
#include <thread>
namespace dpct {
/// Get the value of \p s.
/// Copy the data to host synchronously, then return the data.
/// \param [in] p The pointer points the data.
/// \param [in] q The queue where the memory copy should be executed.
template <typename T>
inline auto get_value(const T *s, sycl::queue &q) {
return detail::get_value(s, q);
}
namespace detail {
inline void mem_free(sycl::queue *exec_queue,
std::vector<void *> pointers_array, sycl::event e) {
e.wait();
for (auto p : pointers_array)
sycl::free(p, *exec_queue);
}
inline int stride_for(int num_elems, int mem_align_in_elems) {
return ((num_elems - 1) / mem_align_in_elems + 1) * mem_align_in_elems;
}
#ifndef DPCT_USM_LEVEL_NONE
template<typename T>
class working_memory {
T *_input_ptr;
T *_temp_ptr;
bool _is_sycl_malloced = false;
bool _is_scalar_value = false;
sycl::queue _q;
sycl::event _e;
public:
working_memory(size_t size, sycl::queue q) : _q(q) {
_is_scalar_value = false;
_temp_ptr = (T *)sycl::malloc_device(size, q);
}
working_memory(T *result_ptr, sycl::queue q) : _input_ptr(result_ptr), _q(q) {
_is_scalar_value = true;
_is_sycl_malloced = sycl::get_pointer_type(_input_ptr, _q.get_context()) !=
sycl::usm::alloc::unknown;
if (!_is_sycl_malloced)
_temp_ptr = sycl::malloc_shared<T>(1, _q);
}
auto get_ptr() {
if (_is_scalar_value && _is_sycl_malloced)
return _input_ptr;
return _temp_ptr;
}
void set_event(sycl::event e) { _e = e; }
~working_memory() {
if (_is_scalar_value) {
if (!_is_sycl_malloced) {
_q.memcpy(_input_ptr, _temp_ptr, sizeof(T)).wait();
sycl::free(_temp_ptr, _q);
}
} else {
std::vector<void *> ptrs{_temp_ptr};
dpct::async_dpct_free(ptrs, {_e});
}
}
};
#endif
template <typename Tx, typename Tr>
inline void nrm2_impl(sycl::queue &q, int n, const void *x, int incx,
void *result) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
#ifdef DPCT_USM_LEVEL_NONE
auto x_buffer = dpct::get_buffer<Tx>(x);
auto r_buffer =
sycl::buffer<Tr, 1>(reinterpret_cast<Tr *>(result), sycl::range<1>(1));
if (dpct::is_device_ptr(result))
r_buffer = dpct::get_buffer<Tr>(result);
oneapi::mkl::blas::column_major::nrm2(q, n, x_buffer, incx, r_buffer);
#else
working_memory<Tr> res_mem(reinterpret_cast<Tr *>(result), q);
oneapi::mkl::blas::column_major::nrm2(q, n, reinterpret_cast<const Tx *>(x),
incx, res_mem.get_ptr());
#endif
#endif
}
template <bool is_conjugate, class Txy, class Tr>
inline void dotuc_impl(sycl::queue &q, int n, const Txy *x, int incx,
const Txy *y, int incy, Tr *result) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
#ifdef DPCT_USM_LEVEL_NONE
auto x_buffer = dpct::get_buffer<Txy>(x);
auto y_buffer = dpct::get_buffer<Txy>(y);
auto r_buffer = sycl::buffer<Tr, 1>((Tr *)result, sycl::range<1>(1));
if (dpct::is_device_ptr(result))
r_buffer = dpct::get_buffer<Tr>(result);
if constexpr (std::is_same_v<Txy, std::complex<float>> ||
std::is_same_v<Txy, std::complex<double>>) {
if constexpr (is_conjugate)
oneapi::mkl::blas::column_major::dotc(q, n, x_buffer, incx, y_buffer,
incy, r_buffer);
else
oneapi::mkl::blas::column_major::dotu(q, n, x_buffer, incx, y_buffer,
incy, r_buffer);
} else
oneapi::mkl::blas::column_major::dot(q, n, x_buffer, incx, y_buffer, incy,
r_buffer);
#else
working_memory<Tr> res_mem(result, q);
if constexpr (std::is_same_v<Txy, std::complex<float>> ||
std::is_same_v<Txy, std::complex<double>>) {
if constexpr (is_conjugate)
oneapi::mkl::blas::column_major::dotc(q, n, x, incx, y, incy, res_mem.get_ptr());
else
oneapi::mkl::blas::column_major::dotu(q, n, x, incx, y, incy, res_mem.get_ptr());
} else
oneapi::mkl::blas::column_major::dot(q, n, x, incx, y, incy, res_mem.get_ptr());
#endif
#endif
}
template <bool is_conjugate>
inline void dotuc(sycl::queue &q, int n, const void *x,
library_data_t x_type, int incx, const void *y,
library_data_t y_type, int incy, void *result,
library_data_t result_type) {
std::uint64_t key = detail::get_type_combination_id(x_type, y_type, result_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const float *>(x), incx,
reinterpret_cast<const float *>(y), incy,
reinterpret_cast<float *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const double *>(x), incx,
reinterpret_cast<const double *>(y), incy,
reinterpret_cast<double *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float,
library_data_t::complex_float): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const std::complex<float> *>(x), incx,
reinterpret_cast<const std::complex<float> *>(y), incy,
reinterpret_cast<std::complex<float> *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double,
library_data_t::complex_double): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const std::complex<double> *>(x), incx,
reinterpret_cast<const std::complex<double> *>(y), incy,
reinterpret_cast<std::complex<double> *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const sycl::half *>(x), incx,
reinterpret_cast<const sycl::half *>(y), incy,
reinterpret_cast<sycl::half *>(result));
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
template <class Tx, class Te>
inline void scal_impl(sycl::queue &q, int n, const void *alpha, void *x,
int incx) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Te alpha_val = dpct::get_value(reinterpret_cast<const Te *>(alpha), q);
auto data_x = get_memory(reinterpret_cast<Tx *>(x));
oneapi::mkl::blas::column_major::scal(q, n, alpha_val,
data_x, incx);
#endif
}
template <class Txy, class Te>
inline void axpy_impl(sycl::queue &q, int n, const void *alpha, const void *x,
int incx, void *y, int incy) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Te alpha_val = dpct::get_value(reinterpret_cast<const Te *>(alpha), q);
auto data_x = get_memory(reinterpret_cast<const Txy *>(x));
auto data_y = get_memory(reinterpret_cast<Txy *>(y));
oneapi::mkl::blas::column_major::axpy(q, n, alpha_val,
data_x, incx,
data_y, incy);
#endif
}
template <class Txy, class Tc, class Ts>
inline void rot_impl(sycl::queue &q, int n, void *x, int incx, void *y,
int incy, const void *c, const void *s) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Tc c_value = dpct::get_value(reinterpret_cast<const Tc *>(c), q);
Ts s_value = dpct::get_value(reinterpret_cast<const Ts *>(s), q);
auto data_x = get_memory(reinterpret_cast<Txy *>(x));
auto data_y = get_memory(reinterpret_cast<Txy *>(y));
oneapi::mkl::blas::column_major::rot(q, n, data_x, incx,
data_y, incy, c_value,
s_value);
#endif
}
template <class Ta, class Tb, class Tc, class Ts>
inline void gemm_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a, int lda, const void *b,
int ldb, const void *beta, void *c, int ldc) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
auto data_a = get_memory(reinterpret_cast<const Ta *>(a));
auto data_b = get_memory(reinterpret_cast<const Tb *>(b));
auto data_c = get_memory(reinterpret_cast<Tc *>(c));
oneapi::mkl::blas::column_major::gemm(
q, a_trans, b_trans, m, n, k, alpha_value, data_a, lda,
data_b, ldb, beta_value, data_c, ldc);
#endif
}
template <class Ta, class Tb, class Tc, class Ts>
inline void gemm_batch_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void **a, int lda,
const void **b, int ldb, const void *beta, void **c,
int ldc, int batch_size) {
struct matrix_info_t {
oneapi::mkl::transpose transpose_info[2];
Ts value_info[2];
std::int64_t size_info[3];
std::int64_t ld_info[3];
std::int64_t groupsize_info;
};
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
matrix_info_t *matrix_info =
(matrix_info_t *)std::malloc(sizeof(matrix_info_t));
matrix_info->transpose_info[0] = a_trans;
matrix_info->transpose_info[1] = b_trans;
matrix_info->value_info[0] = alpha_value;
matrix_info->value_info[1] = beta_value;
matrix_info->size_info[0] = m;
matrix_info->size_info[1] = n;
matrix_info->size_info[2] = k;
matrix_info->ld_info[0] = lda;
matrix_info->ld_info[1] = ldb;
matrix_info->ld_info[2] = ldc;
matrix_info->groupsize_info = batch_size;
sycl::event e = oneapi::mkl::blas::column_major::gemm_batch(
q, matrix_info->transpose_info, matrix_info->transpose_info + 1,
matrix_info->size_info, matrix_info->size_info + 1,
matrix_info->size_info + 2, matrix_info->value_info,
reinterpret_cast<const Ta **>(a), matrix_info->ld_info,
reinterpret_cast<const Tb **>(b), matrix_info->ld_info + 1,
matrix_info->value_info + 1, reinterpret_cast<Tc **>(c),
matrix_info->ld_info + 2, 1, &(matrix_info->groupsize_info));
q.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { std::free(matrix_info); });
});
}
template <class Ta, class Tb, class Tc, class Ts>
inline void
gemm_batch_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n,
int k, const void *alpha, const void *a, int lda,
long long int stride_a, const void *b, int ldb,
long long int stride_b, const void *beta, void *c,
int ldc, long long int stride_c, int batch_size) {
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
auto data_a = get_memory(reinterpret_cast<const Ta *>(a));
auto data_b = get_memory(reinterpret_cast<const Tb *>(b));
auto data_c = get_memory(reinterpret_cast<Tc *>(c));
oneapi::mkl::blas::column_major::gemm_batch(
q, a_trans, b_trans, m, n, k, alpha_value, data_a, lda,
stride_a, data_b, ldb, stride_b, beta_value,
data_c, ldc, stride_c, batch_size);
}
template <bool is_hermitian, class T, class Tbeta>
inline void rk_impl(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::transpose trans, int n, int k,
const T *alpha, const T *a, int lda, const T *b,
int ldb, const Tbeta *beta, T *c, int ldc) {
// For symmetric matrix, this function performs: C = alpha*OP(A)*(OP(B))^T + beta*C
// For Hermitian matrix, this function performs: C = alpha*OP(A)*(OP(B))^H + beta*C
// The gemmt() function performs: C = alpha*OPA(A)*OPB(B) + beta*C
// So the OPB need be updated before we call gemmt().
using Ty = typename dpct::DataType<T>::T2;
using Ts = typename dpct::DataType<Tbeta>::T2;
Ty alpha_value = dpct::get_value(reinterpret_cast<const Ty *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
oneapi::mkl::transpose trans_A = trans, trans_B = trans;
int origin_b_rows = trans == oneapi::mkl::transpose::nontrans ? n : k;
int origin_b_cols = trans == oneapi::mkl::transpose::nontrans ? k : n;
if ((is_hermitian && trans == oneapi::mkl::transpose::trans) ||
(!is_hermitian && !std::is_floating_point_v<Ty> && trans == oneapi::mkl::transpose::conjtrans)) {
// In this case, OPB need be a conjugate operation,
// but only notrans, conjtrans and trans are available.
// So we need do a conjtrans operation first, then do a trans operation.
trans_B = oneapi::mkl::transpose::trans;
auto data_a = get_memory(reinterpret_cast<const Ty *>(a));
auto data_c = get_memory(reinterpret_cast<Ty *>(c));
#ifdef DPCT_USM_LEVEL_NONE
auto new_B_buffer = sycl::buffer<Ty, 1>(sycl::range<1>(origin_b_rows * origin_b_cols));
auto from_buffer = dpct::get_buffer<Ty>(b);
oneapi::mkl::blas::column_major::omatcopy_batch(
q, oneapi::mkl::transpose::conjtrans, origin_b_rows, origin_b_cols,
Ts(1.0), from_buffer, ldb, origin_b_rows * ldb, new_B_buffer,
origin_b_cols, origin_b_rows * origin_b_cols, 1);
oneapi::mkl::blas::column_major::gemmt(
q, uplo, trans_A, trans_B, n, k, alpha_value,
data_a, lda, new_B_buffer, origin_b_cols, beta_value, data_c, ldc);
#else
working_memory<T> new_B(origin_b_rows * origin_b_cols * sizeof(T), q);
oneapi::mkl::blas::column_major::omatcopy_batch(
q, oneapi::mkl::transpose::conjtrans, origin_b_rows, origin_b_cols,
Ts(1.0), reinterpret_cast<const Ty *>(b), ldb, origin_b_rows * ldb,
reinterpret_cast<Ty *>(new_B.get_ptr()), origin_b_cols,
origin_b_rows * origin_b_cols, 1);
sycl::event e = oneapi::mkl::blas::column_major::gemmt(
q, uplo, trans_A, trans_B, n, k, alpha_value,
data_a, lda, reinterpret_cast<Ty *>(new_B.get_ptr()), origin_b_cols,
beta_value, data_c, ldc);
new_B.set_event(e);
#endif
} else {
if constexpr (is_hermitian) {
trans_B = trans == oneapi::mkl::transpose::nontrans
? oneapi::mkl::transpose::conjtrans
: oneapi::mkl::transpose::nontrans;
} else {
trans_B = trans == oneapi::mkl::transpose::nontrans
? oneapi::mkl::transpose::trans
: oneapi::mkl::transpose::nontrans;
}
auto data_a = get_memory(reinterpret_cast<const Ty *>(a));
auto data_b = get_memory(reinterpret_cast<const Ty *>(b));
auto data_c = get_memory(reinterpret_cast<Ty *>(c));
oneapi::mkl::blas::column_major::gemmt(
q, uplo, trans_A, trans_B, n, k, alpha_value,
data_a, lda, data_b, ldb, beta_value, data_c, ldc);
}
}
template <class Ta, class Tb, class Ts>
inline void
trsm_batch_impl(sycl::queue &q, oneapi::mkl::side left_right,
oneapi::mkl::uplo upper_lower, oneapi::mkl::transpose trans,
oneapi::mkl::diag unit_diag, int m, int n, const void *alpha,
const void **a, int lda, void **b, int ldb, int batch_size) {
struct matrix_info_t {
matrix_info_t(oneapi::mkl::side side_info, oneapi::mkl::uplo uplo_info,
oneapi::mkl::transpose transpose_info,
oneapi::mkl::diag diag_info, Ts value_info, std::int64_t m,
std::int64_t n, std::int64_t lda, std::int64_t ldb,
std::int64_t groupsize_info)
: side_info(side_info), uplo_info(uplo_info),
transpose_info(transpose_info), diag_info(diag_info),
value_info(value_info), groupsize_info(groupsize_info) {
size_info[0] = m;
size_info[1] = n;
ld_info[0] = lda;
ld_info[1] = ldb;
}
oneapi::mkl::side side_info;
oneapi::mkl::uplo uplo_info;
oneapi::mkl::transpose transpose_info;
oneapi::mkl::diag diag_info;
Ts value_info;
std::int64_t size_info[2];
std::int64_t ld_info[2];
std::int64_t groupsize_info;
};
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
matrix_info_t *matrix_info =
new matrix_info_t(left_right, upper_lower, trans, unit_diag, alpha_value,
m, n, lda, ldb, batch_size);
sycl::event e = oneapi::mkl::blas::column_major::trsm_batch(
q, &(matrix_info->side_info), &(matrix_info->uplo_info),
&(matrix_info->transpose_info), &(matrix_info->diag_info),
matrix_info->size_info, matrix_info->size_info + 1,
&(matrix_info->value_info), reinterpret_cast<const Ta **>(a),
matrix_info->ld_info, reinterpret_cast<Tb **>(b),
matrix_info->ld_info + 1, 1, &(matrix_info->groupsize_info));
q.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { delete matrix_info; });
});
}
template <typename T>
inline void getrfnp_batch_wrapper(sycl::queue &exec_queue, int n, T *a[],
int lda, int *info, int batch_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using Ty = typename DataType<T>::T2;
// Set the info array value to 0
detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size);
std::int64_t stride_a = n * lda;
std::int64_t scratchpad_size =
oneapi::mkl::lapack::getrfnp_batch_scratchpad_size<Ty>(
exec_queue, n, n, lda, stride_a, batch_size);
Ty *a_strided_mem =
(Ty *)dpct::dpct_malloc(stride_a * batch_size * sizeof(Ty), exec_queue);
T **host_a = (T **)malloc(batch_size * sizeof(T *));
dpct::dpct_memcpy(host_a, a, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i)
dpct::dpct_memcpy(a_strided_mem + i * stride_a, host_a[i],
n * lda * sizeof(T));
#ifdef DPCT_USM_LEVEL_NONE
{
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
auto a_buffer = get_buffer<Ty>(a_strided_mem);
oneapi::mkl::lapack::getrfnp_batch(exec_queue, n, n, a_buffer, lda,
stride_a, batch_size, scratchpad,
scratchpad_size);
}
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_strided_mem + i * stride_a,
n * lda * sizeof(T), automatic));
#else
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
sycl::event e = oneapi::mkl::lapack::getrfnp_batch(
exec_queue, n, n, a_strided_mem, lda, stride_a, batch_size, scratchpad,
scratchpad_size);
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_strided_mem + i * stride_a,
n * lda * sizeof(T), automatic, {e}));
std::vector<void *> ptrs{scratchpad, a_strided_mem};
dpct::async_dpct_free(ptrs, events, exec_queue);
#endif
exec_queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(events);
cgh.host_task([=] { free(host_a); });
});
#endif
}
} // namespace detail
inline oneapi::mkl::transpose get_transpose(int t) {
if (t == 0) {
return oneapi::mkl::transpose::nontrans;
} else if (t == 1) {
return oneapi::mkl::transpose::trans;
} else {
return oneapi::mkl::transpose::conjtrans;
}
}
/// Computes the LU factorizations of a batch of general matrices.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] n The order of the matrices.
/// \param [in, out] a Array of pointers to matrices. These matrices will be
/// overwritten by lower triangulars with unit diagonal elements and upper
/// triangulars.
/// \param [in] lda The leading dimension of the matrices.
/// \param [out] ipiv An array stores the pivot indices. If \p ipiv is nullptr,
/// non-pivoting LU factorization is computed.
/// \param [out] info An array stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void getrf_batch_wrapper(sycl::queue &exec_queue, int n, T *a[],
int lda, int *ipiv, int *info, int batch_size) {
if (ipiv == nullptr) {
detail::getrfnp_batch_wrapper(exec_queue, n, a, lda, info, batch_size);
return;
}
using Ty = typename DataType<T>::T2;
// Set the info array value to 0
detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size);
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_a = n * lda;
std::int64_t stride_ipiv = n;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getrf_batch_scratchpad_size<Ty>(
exec_queue, n, n, lda, stride_a, stride_ipiv, batch_size);
T *a_buffer_ptr;
a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i)
dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T));
{
sycl::buffer<std::int64_t, 1> ipiv_buf(
sycl::range<1>(batch_size * stride_ipiv));
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
auto a_buffer = get_buffer<Ty>(a_buffer_ptr);
oneapi::mkl::lapack::getrf_batch(exec_queue, n, n, a_buffer, lda, stride_a,
ipiv_buf, stride_ipiv, batch_size, scratchpad,
scratchpad_size);
auto to_buffer = get_buffer<int>(ipiv);
exec_queue.submit([&](sycl::handler &cgh) {
auto from_acc = ipiv_buf.get_access<sycl::access_mode::read>(cgh);
auto to_acc = to_buffer.get_access<sycl::access_mode::write>(cgh);
cgh.parallel_for<dpct_kernel_name<class getrf_device_int64_to_int, T>>(
sycl::range<2>(batch_size, n), [=](sycl::id<2> id) {
to_acc[id.get(0) * n + id.get(1)] =
static_cast<int>(from_acc[id.get(0) * stride_ipiv + id.get(1)]);
});
});
}
// Copy back to the original buffers
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_buffer_ptr + i * stride_a,
n * lda * sizeof(T), automatic));
std::vector<void *> ptrs{host_a};
std::thread mem_free_thread(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptrs, events);
mem_free_thread.detach();
#else
std::int64_t m_int64 = n;
std::int64_t n_int64 = n;
std::int64_t lda_int64 = lda;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getrf_batch_scratchpad_size<Ty>(
exec_queue, &m_int64, &n_int64, &lda_int64, 1, &group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
std::int64_t *ipiv_int64 =
sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue);
std::int64_t **ipiv_int64_ptr =
sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue);
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *)).wait();
for (std::int64_t i = 0; i < batch_size; ++i)
ipiv_int64_ptr[i] = ipiv_int64 + n * i;
oneapi::mkl::lapack::getrf_batch(exec_queue, &m_int64, &n_int64, (Ty **)a_shared, &lda_int64,
ipiv_int64_ptr, 1, &group_sizes, scratchpad,
scratchpad_size);
sycl::event e = exec_queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for<dpct_kernel_name<class getrf_device_int64_to_int, T>>(
sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) {
ipiv[idx] = static_cast<int>(ipiv_int64[idx]);
});
});
std::vector<void *> ptrs{scratchpad, ipiv_int64, ipiv_int64_ptr, a_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Solves a system of linear equations with a batch of LU-factored square
/// coefficient matrices, with multiple right-hand sides.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] trans Indicates the form of the linear equations.
/// \param [in] n The order of the matrices.
/// \param [in] nrhs The number of right hand sides.
/// \param [in] a Array of pointers to matrices.
/// \param [in] lda The leading dimension of the matrices in \p a.
/// \param [in] ipiv An array stores the pivots.
/// \param [in, out] b Array of pointers to matrices, whose columns are
/// the right-hand sides for the systems of equations.
/// \param [in] ldb The leading dimension of the matrices in \p b.
/// \param [out] info A value stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void getrs_batch_wrapper(sycl::queue &exec_queue,
oneapi::mkl::transpose trans, int n, int nrhs,
const T *a[], int lda, int *ipiv, T *b[],
int ldb, int *info, int batch_size) {
using Ty = typename DataType<T>::T2;
// Set the info value to 0
*info = 0;
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_a = n * lda;
std::int64_t stride_b = nrhs * ldb;
std::int64_t stride_ipiv = n;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getrs_batch_scratchpad_size<Ty>(
exec_queue, trans, n, nrhs, lda, stride_a, stride_ipiv, ldb, stride_b,
batch_size);
T *a_buffer_ptr, *b_buffer_ptr;
a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T));
b_buffer_ptr = (T *)dpct_malloc(stride_b * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
T **host_b = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
dpct_memcpy(host_b, b, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i) {
dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T));
dpct_memcpy(b_buffer_ptr + i * stride_b, host_b[i], nrhs * ldb * sizeof(T));
}
{
auto a_buffer = get_buffer<Ty>(a_buffer_ptr);
auto b_buffer = get_buffer<Ty>(b_buffer_ptr);
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
sycl::buffer<std::int64_t, 1> ipiv_buf(
sycl::range<1>(batch_size * stride_ipiv));
auto from_buf = get_buffer<int>(ipiv);
exec_queue.submit([&](sycl::handler &cgh) {
auto from_acc = from_buf.get_access<sycl::access_mode::read>(cgh);
auto to_acc = ipiv_buf.get_access<sycl::access_mode::write>(cgh);
cgh.parallel_for<dpct_kernel_name<class getrs_device_int64_to_int, T>>(
sycl::range<2>(batch_size, n), [=](sycl::id<2> id) {
to_acc[id.get(0) * stride_ipiv + id.get(1)] =
static_cast<std::int64_t>(from_acc[id.get(0) * n + id.get(1)]);
});
});
oneapi::mkl::lapack::getrs_batch(exec_queue, trans, n, nrhs, a_buffer, lda,
stride_a, ipiv_buf, stride_ipiv, b_buffer, ldb,
stride_b, batch_size, scratchpad, scratchpad_size);
}
// Copy back to the original buffers
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_b[i],
b_buffer_ptr + i * stride_b,
nrhs * ldb * sizeof(T), automatic));
std::vector<void *> ptrs{host_a, host_b};
std::thread mem_free_thread(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptrs, events);
mem_free_thread.detach();
#else
std::int64_t n_int64 = n;
std::int64_t nrhs_int64 = nrhs;
std::int64_t lda_int64 = lda;
std::int64_t ldb_int64 = ldb;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getrs_batch_scratchpad_size<Ty>(
exec_queue, &trans, &n_int64, &nrhs_int64, &lda_int64, &ldb_int64, 1,
&group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
std::int64_t *ipiv_int64 =
sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue);
std::int64_t **ipiv_int64_ptr =
sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue);
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
T **b_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *));
exec_queue.memcpy(b_shared, b, batch_size * sizeof(T *));
exec_queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for<dpct_kernel_name<class getrs_device_int64_to_int, T>>(
sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) {
ipiv_int64[idx] = static_cast<std::int64_t>(ipiv[idx]);
});
}).wait();
for (std::int64_t i = 0; i < batch_size; ++i)
ipiv_int64_ptr[i] = ipiv_int64 + n * i;
sycl::event e = oneapi::mkl::lapack::getrs_batch(
exec_queue, &trans, &n_int64, &nrhs_int64, (Ty **)a_shared, &lda_int64,
ipiv_int64_ptr, (Ty **)b_shared, &ldb_int64, 1, &group_sizes, scratchpad,
scratchpad_size);
std::vector<void *> ptrs{scratchpad, ipiv_int64_ptr, ipiv_int64, a_shared, b_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Computes the inverses of a batch of LU-factored matrices.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] n The order of the matrices.
/// \param [in] a Array of pointers to matrices.
/// \param [in] lda The leading dimension of the matrices in \p a.
/// \param [in] ipiv An array stores the pivots.
/// \param [out] b Array of pointers to inverse matrices.
/// \param [in] ldb The leading dimension of the matrices in \p b.
/// \param [out] info An array stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void getri_batch_wrapper(sycl::queue &exec_queue, int n,
const T *a[], int lda, int *ipiv, T *b[],
int ldb, int *info, int batch_size) {
using Ty = typename DataType<T>::T2;
// Set the info array value to 0
detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size);
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_b = n * ldb;
std::int64_t stride_ipiv = n;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getri_batch_scratchpad_size<Ty>(
exec_queue, n, ldb, stride_b, stride_ipiv, batch_size);
T *b_buffer_ptr;
b_buffer_ptr = (T *)dpct_malloc(stride_b * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
T **host_b = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
dpct_memcpy(host_b, b, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i) {
// Need to create a copy of input matrices "a" to keep them unchanged.
// Matrices "b" (copy of matrices "a") will be used as input and output
// parameter in oneapi::mkl::lapack::getri_batch call.
matrix_mem_copy(b_buffer_ptr + i * stride_b, host_a[i], ldb, lda, n, n,
dpct::device_to_device, exec_queue);
}
{
auto b_buffer = get_buffer<Ty>(b_buffer_ptr);
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
sycl::buffer<std::int64_t, 1> ipiv_buf(
sycl::range<1>(batch_size * stride_ipiv));
auto from_buf = get_buffer<int>(ipiv);
exec_queue.submit([&](sycl::handler &cgh) {
auto from_acc = from_buf.get_access<sycl::access_mode::read>(cgh);
auto to_acc = ipiv_buf.get_access<sycl::access_mode::write>(cgh);
cgh.parallel_for<dpct_kernel_name<class getri_device_int64_to_int, T>>(
sycl::range<2>(batch_size, n), [=](sycl::id<2> id) {
to_acc[id.get(0) * stride_ipiv + id.get(1)] =
static_cast<std::int64_t>(from_acc[id.get(0) * n + id.get(1)]);
});
});
oneapi::mkl::lapack::getri_batch(exec_queue, n, b_buffer, ldb, stride_b, ipiv_buf,
stride_ipiv, batch_size, scratchpad,
scratchpad_size);
}
// Copy back to the original buffers
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_b[i],
b_buffer_ptr + i * stride_b,
n * ldb * sizeof(T), automatic));
std::vector<void *> ptrs{host_a, host_b};
std::thread mem_free_thread(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptrs, events);
mem_free_thread.detach();
#else
std::int64_t n_int64 = n;
std::int64_t ldb_int64 = ldb;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getri_batch_scratchpad_size<Ty>(
exec_queue, &n_int64, &ldb_int64, 1, &group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
std::int64_t *ipiv_int64 =
sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue);
std::int64_t **ipiv_int64_ptr =
sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue);
exec_queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for<dpct_kernel_name<class getri_device_int64_to_int, T>>(
sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) {
ipiv_int64[idx] = static_cast<std::int64_t>(ipiv[idx]);
});
});
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
T **b_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *));
exec_queue.memcpy(b_shared, b, batch_size * sizeof(T *)).wait();
for (std::int64_t i = 0; i < batch_size; ++i) {
ipiv_int64_ptr[i] = ipiv_int64 + n * i;
// Need to create a copy of input matrices "a" to keep them unchanged.
// Matrices "b" (copy of matrices "a") will be used as input and output
// parameter in oneapi::mkl::lapack::getri_batch call.
matrix_mem_copy(b_shared[i], a_shared[i], ldb, lda, n, n, dpct::device_to_device,
exec_queue);
}
sycl::event e = oneapi::mkl::lapack::getri_batch(
exec_queue, &n_int64, (Ty **)b_shared, &ldb_int64, ipiv_int64_ptr, 1,
&group_sizes, scratchpad, scratchpad_size);
std::vector<void *> ptrs{scratchpad, ipiv_int64_ptr, ipiv_int64, a_shared, b_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Computes the QR factorizations of a batch of general matrices.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] m The number of rows in the matrices.
/// \param [in] n The number of columns in the matrices.
/// \param [in, out] a Array of pointers to matrices. These
/// matrices will be overwritten by the factorization data.
/// \param [in] lda The leading dimension of the matrices in \p a.
/// \param [out] tau An array stores the scalars.
/// \param [out] info A value stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void geqrf_batch_wrapper(sycl::queue exec_queue, int m, int n,
T *a[], int lda, T *tau[], int *info,
int batch_size) {
using Ty = typename DataType<T>::T2;
// Set the info value to 0
*info = 0;
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_a = n * lda;
std::int64_t stride_tau = std::max(1, std::min(m, n));
std::int64_t scratchpad_size = oneapi::mkl::lapack::geqrf_batch_scratchpad_size<Ty>(
exec_queue, m, n, lda, stride_a, stride_tau, batch_size);
T *a_buffer_ptr, *tau_buffer_ptr;
a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T));
tau_buffer_ptr = (T *)dpct_malloc(stride_tau * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
T **host_tau = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
dpct_memcpy(host_tau, tau, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i)
dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T));
{
auto a_buffer = get_buffer<Ty>(a_buffer_ptr);
auto tau_buffer = get_buffer<Ty>(tau_buffer_ptr);
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
oneapi::mkl::lapack::geqrf_batch(exec_queue, m, n, a_buffer, lda, stride_a,
tau_buffer, stride_tau, batch_size, scratchpad,
scratchpad_size);
}
// Copy back to the original buffers
std::vector<sycl::event> events_a;
std::vector<sycl::event> events_tau;
for (std::int64_t i = 0; i < batch_size; ++i) {
events_a.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_buffer_ptr + i * stride_a,
n * lda * sizeof(T), automatic));
events_tau.push_back(detail::dpct_memcpy(
exec_queue, host_tau[i], tau_buffer_ptr + i * stride_tau,
std::max(1, std::min(m, n)) * sizeof(T), automatic));
}
std::vector<void *> ptr_a{host_a};
std::vector<void *> ptr_tau{host_tau};
std::thread mem_free_thread_a(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptr_a, events_a);
std::thread mem_free_thread_tau(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptr_tau, events_tau);
mem_free_thread_a.detach();
mem_free_thread_tau.detach();
#else
std::int64_t m_int64 = n;
std::int64_t n_int64 = n;
std::int64_t lda_int64 = lda;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size = oneapi::mkl::lapack::geqrf_batch_scratchpad_size<Ty>(
exec_queue, &m_int64, &n_int64, &lda_int64, 1, &group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
T **tau_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *));
exec_queue.memcpy(tau_shared, tau, batch_size * sizeof(T *)).wait();
sycl::event e = oneapi::mkl::lapack::geqrf_batch(
exec_queue, &m_int64, &n_int64, (Ty **)a_shared, &lda_int64, (Ty **)tau_shared, 1,
&group_sizes, scratchpad, scratchpad_size);
std::vector<void *> ptrs{scratchpad, a_shared, tau_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Computes the Euclidean norm of a vector.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [out] result The result scalar.
/// \param [in] result_type Data type of the result.
inline void nrm2(sycl::queue &q, int n, const void *x, library_data_t x_type,
int incx, void *result, library_data_t result_type) {
std::uint64_t key = detail::get_type_combination_id(x_type, result_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float): {
detail::nrm2_impl<float, float>(q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double): {
detail::nrm2_impl<double, double>(q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::real_float): {
detail::nrm2_impl<std::complex<float>, float>(
q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::real_double): {
detail::nrm2_impl<std::complex<double>, double>(
q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::real_half,
library_data_t::real_half): {
detail::nrm2_impl<sycl::half, sycl::half>(
q, n, x, incx, result);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes the dot product of two vectors.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in] y Input vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
/// \param [out] result The result scalar.
/// \param [in] result_type Data type of the result.
inline void dot(sycl::queue &q, int n, const void *x, library_data_t x_type,
int incx, const void *y, library_data_t y_type, int incy,
void *result, library_data_t result_type) {
detail::dotuc<false>(q, n, x, x_type, incx, y, y_type, incy, result,
result_type);
}
/// Computes the dot product of two vectors, conjugating the first vector.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in] y Input vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
/// \param [out] result The result scalar.
/// \param [in] result_type Data type of the result.
inline void dotc(sycl::queue &q, int n, const void *x, library_data_t x_type,
int incx, const void *y, library_data_t y_type, int incy,
void *result, library_data_t result_type) {
detail::dotuc<true>(q, n, x, x_type, incx, y, y_type, incy, result,
result_type);
}
/// Computes the product of a vector by a scalar.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] alpha The scale factor alpha.
/// \param [in] alpha_type The data type of alpha.
/// \param [in, out] x Input/Output vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
inline void scal(sycl::queue &q, int n, const void *alpha,
library_data_t alpha_type, void *x, library_data_t x_type,
int incx) {
std::uint64_t key = detail::get_type_combination_id(x_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float): {
detail::scal_impl<float, float>(q, n, alpha, x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::real_double): {
detail::scal_impl<double, double>(q, n, alpha, x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float): {
detail::scal_impl<std::complex<float>, std::complex<float>>(q, n, alpha,
x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double): {
detail::scal_impl<std::complex<double>, std::complex<double>>(
q, n, alpha, x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::real_half): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
sycl::half alaph_half(alpha_value);
detail::scal_impl<sycl::half, sycl::half>(q, n, &alaph_half, x, incx);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes a vector-scalar product and adds the result to a vector.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] alpha The scale factor alpha.
/// \param [in] alpha_type The data type of alpha.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in, out] y Input/Output vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
inline void axpy(sycl::queue &q, int n, const void *alpha,
library_data_t alpha_type, const void *x, library_data_t x_type,
int incx, void *y, library_data_t y_type, int incy) {
std::uint64_t key = detail::get_type_combination_id(x_type, alpha_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float): {
detail::axpy_impl<float, float>(q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double): {
detail::axpy_impl<double, double>(q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float): {
detail::axpy_impl<std::complex<float>, std::complex<float>>(
q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double): {
detail::axpy_impl<std::complex<double>, std::complex<double>>(
q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::real_half,
library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
sycl::half alaph_half(alpha_value);
detail::axpy_impl<sycl::half, sycl::half>(q, n, &alaph_half, x, incx, y, incy);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Performs rotation of points in the plane.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in, out] x Input/Output vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in, out] y Input/Output vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
/// \param [in] c Scaling factor.
/// \param [in] s Scaling factor.
/// \param [in] cs_type Data type of the scaling factors.
inline void rot(sycl::queue &q, int n, void *x, library_data_t x_type,
int incx, void *y, library_data_t y_type, int incy,
const void *c, const void *s, library_data_t cs_type) {
std::uint64_t key = detail::get_type_combination_id(x_type, cs_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float): {
detail::rot_impl<float, float, float>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double): {
detail::rot_impl<double, double, double>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::real_float): {
detail::rot_impl<std::complex<float>, float, float>(q, n, x, incx, y, incy, c,
s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::real_double): {
detail::rot_impl<std::complex<double>, double, double>(q, n, x, incx, y, incy, c,
s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float): {
detail::rot_impl<std::complex<float>, float, std::complex<float>>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double): {
detail::rot_impl<std::complex<double>, double, std::complex<double>>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::real_half,
library_data_t::real_half): {
detail::rot_impl<sycl::half, sycl::half, sycl::half>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::real_bfloat16,
library_data_t::real_bfloat16): {
detail::rot_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, oneapi::mkl::bfloat16>(q, n, x, incx, y, incy, c, s);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes matrix-matrix product with general matrices.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] a_trans Specifies the operation applied to A.
/// \param [in] b_trans Specifies the operation applied to B.
/// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C.
/// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C.
/// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B).
/// \param [in] alpha Scaling factor for the matrix-matrix product.
/// \param [in] a Input matrix A.
/// \param [in] a_type Data type of the matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] b_type Data type of the matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for matrix C.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] c_type Data type of the matrix C.
/// \param [in] ldc Leading dimension of C.
/// \param [in] scaling_type Data type of the scaling factors.
inline void gemm(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a, library_data_t a_type,
int lda, const void *b, library_data_t b_type, int ldb,
const void *beta, void *c, library_data_t c_type, int ldc,
library_data_t scaling_type) {
bool matched = false;
if (scaling_type == library_data_t::real_float &&
c_type == library_data_t::complex_float) {
scaling_type = library_data_t::complex_float;
} else if (scaling_type == library_data_t::real_double &&
c_type == library_data_t::complex_double) {
scaling_type = library_data_t::complex_double;
}
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(
library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<float, float, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double, library_data_t::real_double): {
detail::gemm_impl<double, double, double, double>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_float, library_data_t::complex_float,
library_data_t::complex_float, library_data_t::complex_float): {
detail::gemm_impl<std::complex<float>, std::complex<float>,
std::complex<float>, std::complex<float>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_double, library_data_t::complex_double,
library_data_t::complex_double, library_data_t::complex_double): {
detail::gemm_impl<std::complex<double>, std::complex<double>,
std::complex<double>, std::complex<double>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_half): {
detail::gemm_impl<sycl::half, sycl::half, sycl::half,
sycl::half>(q, a_trans, b_trans, m, n, k, alpha, a,
lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float,
float>(q, a_trans, b_trans, m, n, k, alpha, a, lda, b,
ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<sycl::half, sycl::half, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
float beta_value =
dpct::get_value(reinterpret_cast<const float *>(beta), q);
sycl::half alpha_half(alpha_value);
sycl::half beta_half(beta_value);
detail::gemm_impl<sycl::half, sycl::half, sycl::half,
sycl::half>(q, a_trans, b_trans, m, n, k, &alpha_half,
a, lda, b, ldb, &beta_half, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<std::int8_t, std::int8_t, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_bfloat16, library_data_t::real_float): {
detail::gemm_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
oneapi::mkl::bfloat16, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_int32, library_data_t::real_int32): {
float alpha_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(alpha), q);
float beta_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(beta), q);
detail::gemm_impl<std::int8_t, std::int8_t, std::int32_t, float>(
q, a_trans, b_trans, m, n, k, &alpha_float, a, lda, b, ldb, &beta_float, c, ldc);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes a batch of matrix-matrix product with general matrices.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] a_trans Specifies the operation applied to A.
/// \param [in] b_trans Specifies the operation applied to B.
/// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C.
/// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C.
/// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B).
/// \param [in] alpha Scaling factor for the matrix-matrix product.
/// \param [in] a Input matrix A.
/// \param [in] a_type Data type of the matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] b_type Data type of the matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for matrix C.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] c_type Data type of the matrix C.
/// \param [in] ldc Leading dimension of C.
/// \param [in] batch_size Specifies the number of matrix multiply operations to perform.
/// \param [in] scaling_type Data type of the scaling factors.
inline void gemm_batch(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a[],
library_data_t a_type, int lda, const void *b[],
library_data_t b_type, int ldb, const void *beta,
void *c[], library_data_t c_type, int ldc,
int batch_size, library_data_t scaling_type) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
bool matched = false;
if (scaling_type == library_data_t::real_float &&
c_type == library_data_t::complex_float) {
scaling_type = library_data_t::complex_float;
} else if (scaling_type == library_data_t::real_double &&
c_type == library_data_t::complex_double) {
scaling_type = library_data_t::complex_double;
}
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(
library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<float, float, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double, library_data_t::real_double): {
detail::gemm_batch_impl<double, double, double, double>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_float, library_data_t::complex_float,
library_data_t::complex_float, library_data_t::complex_float): {
detail::gemm_batch_impl<std::complex<float>, std::complex<float>,
std::complex<float>, std::complex<float>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_double, library_data_t::complex_double,
library_data_t::complex_double, library_data_t::complex_double): {
detail::gemm_batch_impl<std::complex<double>, std::complex<double>,
std::complex<double>, std::complex<double>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_half): {
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half,
sycl::half>(q, a_trans, b_trans, m, n, k, alpha,
a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
#ifdef __INTEL_MKL__
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_bfloat16, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
oneapi::mkl::bfloat16, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float,
float>(q, a_trans, b_trans, m, n, k, alpha, a, lda,
b, ldb, beta, c, ldc, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_int32, library_data_t::real_int32): {
float alpha_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(alpha), q);
float beta_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(beta), q);
detail::gemm_batch_impl<std::int8_t, std::int8_t, std::int32_t,
float>(q, a_trans, b_trans, m, n, k, &alpha_float,
a, lda, b, ldb, &beta_float, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<std::int8_t, std::int8_t, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<sycl::half, sycl::half, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
#endif
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
float beta_value =
dpct::get_value(reinterpret_cast<const float *>(beta), q);
sycl::half alpha_half(alpha_value);
sycl::half beta_half(beta_value);
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, b, ldb, &beta_half, c, ldc,
batch_size);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
#endif
}
/// Computes a batch of matrix-matrix product with general matrices.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] a_trans Specifies the operation applied to A.
/// \param [in] b_trans Specifies the operation applied to B.
/// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C.
/// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C.
/// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B).
/// \param [in] alpha Scaling factor for the matrix-matrix product.
/// \param [in] a Input matrix A.
/// \param [in] a_type Data type of the matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] stride_a Stride between the different A matrices.
/// \param [in] b Input matrix B.
/// \param [in] b_type Data type of the matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] stride_b Stride between the different B matrices.
/// \param [in] beta Scaling factor for matrix C.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] c_type Data type of the matrix C.
/// \param [in] ldc Leading dimension of C.
/// \param [in] stride_c Stride between the different C matrices.
/// \param [in] batch_size Specifies the number of matrix multiply operations to perform.
/// \param [in] scaling_type Data type of the scaling factors.
inline void gemm_batch(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a, library_data_t a_type,
int lda, long long int stride_a, const void *b,
library_data_t b_type, int ldb, long long int stride_b,
const void *beta, void *c, library_data_t c_type,
int ldc, long long int stride_c, int batch_size,
library_data_t scaling_type) {
bool matched = false;
if (scaling_type == library_data_t::real_float &&
c_type == library_data_t::complex_float) {
scaling_type = library_data_t::complex_float;
} else if (scaling_type == library_data_t::real_double &&
c_type == library_data_t::complex_double) {
scaling_type = library_data_t::complex_double;
}
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(
library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<float, float, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double, library_data_t::real_double): {
detail::gemm_batch_impl<double, double, double, double>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_float, library_data_t::complex_float,
library_data_t::complex_float, library_data_t::complex_float): {
detail::gemm_batch_impl<std::complex<float>, std::complex<float>,
std::complex<float>, std::complex<float>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_double, library_data_t::complex_double,
library_data_t::complex_double, library_data_t::complex_double): {
detail::gemm_batch_impl<std::complex<double>, std::complex<double>,
std::complex<double>, std::complex<double>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_half): {
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half,
sycl::half>(q, a_trans, b_trans, m, n, k, alpha,
a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
#ifdef __INTEL_MKL__
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_bfloat16, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
oneapi::mkl::bfloat16, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float,
float>(q, a_trans, b_trans, m, n, k, alpha, a, lda,
stride_a, b, ldb, stride_b, beta, c, ldc,
stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_int32, library_data_t::real_int32): {
detail::gemm_batch_impl<std::int8_t, std::int8_t, std::int32_t,
std::int32_t>(q, a_trans, b_trans, m, n, k, alpha,
a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<std::int8_t, std::int8_t, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<sycl::half, sycl::half, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
#endif
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
float beta_value =
dpct::get_value(reinterpret_cast<const float *>(beta), q);
sycl::half alpha_half(alpha_value);
sycl::half beta_half(beta_value);
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, stride_a, b, ldb, stride_b,
&beta_half, c, ldc, stride_c, batch_size);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// This routines perform a special rank-k update of a symmetric matrix C by
/// general matrices A and B.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] uplo Specifies whether C's data is stored in its upper or lower triangle.
/// \param [in] trans Specifies the operation to apply.
/// \param [in] n The number of rows and columns in C.
/// \param [in] k The inner dimension of matrix multiplications.
/// \param [in] alpha Scaling factor for the rank-k update.
/// \param [in] a Input matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for the rank-k update.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] ldc Leading dimension of C.
template <class T>
inline void syrk(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::transpose trans, int n, int k, const T *alpha,
const T *a, int lda, const T *b, int ldb, const T *beta, T *c,
int ldc) {
detail::rk_impl<false, T, T>(q, uplo, trans, n, k, alpha, a, lda, b,
ldb, beta, c, ldc);
}
/// This routines perform a special rank-k update of a Hermitian matrix C by
/// general matrices A and B.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] uplo Specifies whether C's data is stored in its upper or lower triangle.
/// \param [in] trans Specifies the operation to apply.
/// \param [in] n The number of rows and columns in C.
/// \param [in] k The inner dimension of matrix multiplications.
/// \param [in] alpha Scaling factor for the rank-k update.
/// \param [in] a Input matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for the rank-k update.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] ldc Leading dimension of C.
template <class T, class Tbeta>
inline void herk(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::transpose trans, int n, int k, const T *alpha,
const T *a, int lda, const T *b, int ldb, const Tbeta *beta,
T *c, int ldc) {
detail::rk_impl<true, T, Tbeta>(q, uplo, trans, n, k, alpha, a, lda, b,
ldb, beta, c, ldc);
}
/// This routine performs a group of trsm operations. Each trsm solves an
/// equation of the form op(A) * X = alpha * B or X * op(A) = alpha * B.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] left_right Specifies A multiplies X on the left or on the right.
/// \param [in] upper_lower Specifies A is upper or lower triangular.
/// \param [in] trans Specifies the operation applied to A.
/// \param [in] unit_diag Specifies whether A is unit triangular.
/// \param [in] m Number of rows of the B matrices.
/// \param [in] n Number of columns of the B matrices.
/// \param [in] alpha Scaling factor for the solutions.
/// \param [in] a Input matrices A.
/// \param [in] a_type Data type of the matrices A.
/// \param [in] lda Leading dimension of the matrices A.
/// \param [in, out] b Input and output matrices B.
/// \param [in] b_type Data type of the matrices B.
/// \param [in] ldb Leading dimension of the matrices B.
/// \param [in] batch_size Specifies the number of trsm operations to perform.
/// \param [in] scaling_type Data type of the scaling factors.
inline void trsm_batch(sycl::queue &q, oneapi::mkl::side left_right,
oneapi::mkl::uplo upper_lower,
oneapi::mkl::transpose trans,
oneapi::mkl::diag unit_diag, int m, int n,
const void *alpha, const void **a, library_data_t a_type,
int lda, void **b, library_data_t b_type, int ldb,
int batch_size, library_data_t scaling_type) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float,
library_data_t::real_float): {
detail::trsm_batch_impl<float, float, float>(q, left_right, upper_lower,
trans, unit_diag, m, n, alpha,
a, lda, b, ldb, batch_size);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double,
library_data_t::real_double): {
detail::trsm_batch_impl<double, double, double>(
q, left_right, upper_lower, trans, unit_diag, m, n, alpha, a, lda, b,
ldb, batch_size);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float,
library_data_t::complex_float): {
detail::trsm_batch_impl<std::complex<float>, std::complex<float>,
std::complex<float>>(q, left_right, upper_lower,
trans, unit_diag, m, n, alpha,
a, lda, b, ldb, batch_size);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double,
library_data_t::complex_double): {
detail::trsm_batch_impl<std::complex<double>, std::complex<double>,
std::complex<double>>(q, left_right, upper_lower,
trans, unit_diag, m, n, alpha,
a, lda, b, ldb, batch_size);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
#endif
}
/// Computes a triangular matrix-general matrix product.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] left_right Specifies A is on the left or right side of the
/// multiplication.
/// \param [in] upper_lower Specifies A is upper or lower triangular.
/// \param [in] trans Specifies the operation applied to A.
/// \param [in] unit_diag Specifies whether A is unit triangular.
/// \param [in] m Number of rows of B.
/// \param [in] n Number of columns of B.
/// \param [in] alpha Scaling factor for the matrix-matrix product.
/// \param [in] a Input matrices A.
/// \param [in] lda Leading dimension of the matrices A.
/// \param [in] b Input matrices B.
/// \param [in] ldb Leading dimension of the matrices B.
/// \param [out] c Output matrices C.
/// \param [in] ldc Leading dimension of the matrices C.
template <class T>
inline void trmm(sycl::queue &q, oneapi::mkl::side left_right,
oneapi::mkl::uplo upper_lower, oneapi::mkl::transpose trans,
oneapi::mkl::diag unit_diag, int m, int n, const T *alpha,
const T *a, int lda, const T *b, int ldb, T *c, int ldc) {
using Ty = typename DataType<T>::T2;
auto alpha_val = dpct::get_value(alpha, q);
if (b != c) {
dpct::matrix_mem_copy(c, b, ldc, ldb, m, n, dpct::device_to_device, q);
}
auto data_a = detail::get_memory(reinterpret_cast<const Ty *>(a));
auto data_c = detail::get_memory(reinterpret_cast<Ty *>(c));
oneapi::mkl::blas::column_major::trmm(q, left_right, upper_lower, trans,
unit_diag, m, n, alpha_val, data_a, lda,
data_c, ldc);
}
} // namespace dpct
#endif // __DPCT_BLAS_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/atomic.hpp | //==---- atomic.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_ATOMIC_HPP__
#define __DPCT_ATOMIC_HPP__
#include <sycl/sycl.hpp>
namespace dpct {
/// Atomically add the value operand to the value at the addr and assign the
/// result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to add to the value at \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_add(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_add(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_add(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_add(operand);
}
/// Atomically add the value operand to the value at the addr and assign the
/// result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to add to the value at \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_add(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_add(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_add<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically subtract the value operand from the value at the addr and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to subtract from the value at \p addr
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_sub(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_sub(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_sub(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_sub(operand);
}
/// Atomically subtract the value operand from the value at the addr and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to subtract from the value at \p addr
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_sub(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_sub<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_sub<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_sub<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_sub(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_sub<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically perform a bitwise AND between the value operand and the value at the addr
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise AND operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_and(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_and(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_and(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_and(operand);
}
/// Atomically perform a bitwise AND between the value operand and the value at the addr
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise AND operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_and(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_and<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_and<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_and<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_and(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_and<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically or the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise OR operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_or(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_or(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_or(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_or(operand);
}
/// Atomically or the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise OR operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_or(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_or<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_or<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_or<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_or(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_or<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically xor the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise XOR operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_xor(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_xor(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_xor(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_xor(operand);
}
/// Atomically xor the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise XOR operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_xor(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_xor<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_xor<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_xor<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_xor(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_xor<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically calculate the minimum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_min(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_min(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_min(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_min(operand);
}
/// Atomically calculate the minimum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_min(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_min<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_min<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_min<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_min(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_min<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically calculate the maximum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_max(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_max(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_max(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_max(operand);
}
/// Atomically calculate the maximum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_max(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_max<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_max<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_max<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_max(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_max<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically set \p operand to the value stored in \p addr, if old value stored in
/// \p addr is equal to zero or greater than \p operand, else decrease the value stored
/// in \p addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The threshold value.
/// \param memoryOrder The memory ordering used.
/// \returns The old value stored in \p addr.
template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline unsigned int atomic_fetch_compare_dec(unsigned int *addr,
unsigned int operand) {
auto atm = sycl::atomic_ref<unsigned int, memoryOrder, memoryScope,
addressSpace>(addr[0]);
unsigned int old;
while (true) {
old = atm.load();
if (old == 0 || old > operand) {
if (atm.compare_exchange_strong(old, operand))
break;
} else if (atm.compare_exchange_strong(old, old - 1))
break;
}
return old;
}
/// Atomically increment the value stored in \p addr if old value stored in \p
/// addr is less than \p operand, else set 0 to the value stored in \p addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The threshold value.
/// \param memoryOrder The memory ordering used.
/// \returns The old value stored in \p addr.
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline unsigned int atomic_fetch_compare_inc(unsigned int *addr,
unsigned int operand) {
auto atm = sycl::atomic_ref<unsigned int, memoryOrder, memoryScope,
addressSpace>(addr[0]);
unsigned int old;
while (true) {
old = atm.load();
if (old >= operand) {
if (atm.compare_exchange_strong(old, 0))
break;
} else if (atm.compare_exchange_strong(old, old + 1))
break;
}
return old;
}
/// Atomically increment the value stored in \p addr if old value stored in \p
/// addr is less than \p operand, else set 0 to the value stored in \p addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The threshold value.
/// \param memoryOrder The memory ordering used.
/// \returns The old value stored in \p addr.
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline unsigned int
atomic_fetch_compare_inc(unsigned int *addr, unsigned int operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr,
operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr,
operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr,
operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
/// Atomically exchange the value at the address addr with the value operand.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to be exchanged with the value pointed by \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_exchange(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.exchange(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_exchange(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.exchange(operand);
}
/// Atomically exchange the value at the address addr with the value operand.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to be exchanged with the value pointed by \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_exchange(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_exchange<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_exchange<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_exchange<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_exchange(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_exchange<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically compare the value at \p addr to the value expected and exchange
/// with the value desired if the value at \p addr is equal to the value expected.
/// Returns the value at the \p addr before the call.
/// \param [in, out] addr Multi_ptr.
/// \param expected The value to compare against the value at \p addr.
/// \param desired The value to assign to \p addr if the value at \p addr is expected.
/// \param success The memory ordering used when comparison succeeds.
/// \param fail The memory ordering used when comparison fails.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
T atomic_compare_exchange_strong(
sycl::multi_ptr<T, addressSpace> addr, T expected, T desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(*addr);
atm.compare_exchange_strong(expected, desired, success, fail);
return expected;
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2, typename T3>
T1 atomic_compare_exchange_strong(
sycl::multi_ptr<T1, addressSpace> addr, T2 expected, T3 desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(*addr);
T1 expected_value = expected;
atm.compare_exchange_strong(expected_value, desired, success, fail);
return expected_value;
}
/// Atomically compare the value at \p addr to the value expected and exchange
/// with the value desired if the value at \p addr is equal to the value expected.
/// Returns the value at the \p addr before the call.
/// \param [in] addr The pointer to the data.
/// \param expected The value to compare against the value at \p addr.
/// \param desired The value to assign to \p addr if the value at \p addr is expected.
/// \param success The memory ordering used when comparison succeeds.
/// \param fail The memory ordering used when comparison fails.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
T atomic_compare_exchange_strong(
T *addr, T expected, T desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
atm.compare_exchange_strong(expected, desired, success, fail);
return expected;
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2, typename T3>
T1 atomic_compare_exchange_strong(
T1 *addr, T2 expected, T3 desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
T1 expected_value = expected;
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
atm.compare_exchange_strong(expected_value, desired, success, fail);
return expected_value;
}
/// Atomic extension to implement standard APIs in std::atomic
namespace detail{
template <typename T> struct IsValidAtomicType {
static constexpr bool value =
(std::is_same<T, int>::value || std::is_same<T, unsigned int>::value ||
std::is_same<T, long>::value || std::is_same<T, unsigned long>::value ||
std::is_same<T, long long>::value ||
std::is_same<T, unsigned long long>::value ||
std::is_same<T, float>::value || std::is_same<T, double>::value ||
std::is_pointer<T>::value);
};
} // namespace detail
template <typename T,
sycl::memory_scope DefaultScope = sycl::memory_scope::system,
sycl::memory_order DefaultOrder = sycl::memory_order::seq_cst,
sycl::access::address_space Space =
sycl::access::address_space::generic_space>
class atomic{
static_assert(
detail::IsValidAtomicType<T>::value,
"Invalid atomic type. Valid types are int, unsigned int, long, "
"unsigned long, long long, unsigned long long, float, double "
"and pointer types");
T __d;
public:
/// default memory synchronization order
static constexpr sycl::memory_order default_read_order =
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space>::default_read_order;
static constexpr sycl::memory_order default_write_order =
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space>::default_write_order;
static constexpr sycl::memory_scope default_scope = DefaultScope;
static constexpr sycl::memory_order default_read_modify_write_order =
DefaultOrder;
/// Default constructor.
constexpr atomic() noexcept = default;
/// Constructor with initialize value.
constexpr atomic(T d) noexcept : __d(d){};
/// atomically replaces the value of the referenced object with a non-atomic argument
/// \param operand The value to replace the pointed value.
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
void store(T operand, sycl::memory_order memoryOrder = default_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
atm.store(operand, memoryOrder, memoryScope);
}
/// atomically obtains the value of the referenced object
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object
T load(sycl::memory_order memoryOrder = default_read_order,
sycl::memory_scope memoryScope = default_scope) const noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(
const_cast<T &>(__d));
return atm.load(memoryOrder, memoryScope);
}
/// atomically replaces the value of the referenced object and obtains the value held previously
/// \param operand The value to replace the pointed value.
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object before the call.
T exchange(T operand,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.exchange(operand, memoryOrder, memoryScope);
}
/// atomically compares the value of the referenced object with non-atomic argument
/// and performs atomic exchange if equal or atomic load if not
/// \param expected The value expected to be found in the object referenced by the atomic_ref object
/// \param desired The value to store in the referenced object if it is as expected
/// \param success The memory models for the read-modify-write
/// \param failure The memory models for load operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false otherwise.
bool compare_exchange_weak(
T &expected, T desired,
sycl::memory_order success, sycl::memory_order failure,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_weak(expected, desired, success, failure, memoryScope);
}
/// \param expected The value expected to be found in the object referenced by the atomic_ref object
/// \param desired The value to store in the referenced object if it is as expected
/// \param memoryOrder The memory synchronization ordering for operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false otherwise.
bool compare_exchange_weak(T &expected, T desired,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_weak(expected, desired, memoryOrder, memoryScope);
}
/// atomically compares the value of the referenced object with non-atomic argument
/// and performs atomic exchange if equal or atomic load if not
/// \param expected The value expected to be found in the object referenced by the atomic_ref object
/// \param desired The value to store in the referenced object if it is as expected
/// \param success The memory models for the read-modify-write
/// \param failure The memory models for load operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false otherwise.
bool compare_exchange_strong(
T &expected, T desired,
sycl::memory_order success, sycl::memory_order failure,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_strong(expected, desired, success, failure, memoryScope);
}
/// \param expected The value expected to be found in the object referenced by the atomic_ref object
/// \param desired The value to store in the referenced object if it is as expected
/// \param memoryOrder The memory synchronization ordering for operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false otherwise.
bool compare_exchange_strong(T &expected, T desired,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_strong(expected, desired, memoryOrder, memoryScope);
}
/// atomically adds the argument to the value stored in the atomic object and obtains the value held previously
/// \param operand The other argument of arithmetic addition
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object before the call.
T fetch_add(T operand,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.fetch_add(operand, memoryOrder, memoryScope);
}
/// atomically subtracts the argument from the value stored in the atomic object and obtains the value held previously
/// \param operand The other argument of arithmetic subtraction
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object before the call.
T fetch_sub(T operand,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.fetch_sub(operand, memoryOrder, memoryScope);
}
};
} // namespace dpct
#endif // __DPCT_ATOMIC_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/rng_utils.hpp | //==---- rng_utils.hpp ----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_RNG_UTILS_HPP__
#define __DPCT_RNG_UTILS_HPP__
#include <sycl/sycl.hpp>
#include <oneapi/mkl.hpp>
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
#include <oneapi/mkl/rng/device.hpp>
#endif
#include "device.hpp"
#include "lib_common_utils.hpp"
namespace dpct {
namespace rng {
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
namespace device {
/// The random number generator on device.
/// \tparam engine_t The device random number generator engine. It can only be
/// oneapi::mkl::rng::device::mrg32k3a<1> or
/// oneapi::mkl::rng::device::mrg32k3a<4> or
/// oneapi::mkl::rng::device::philox4x32x10<1> or
/// oneapi::mkl::rng::device::philox4x32x10<4>.
template <typename engine_t> class rng_generator {
static_assert(
std::disjunction_v<
std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<4>>,
std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<4>>,
std::is_same<engine_t, oneapi::mkl::rng::device::mcg59<1>>>,
"engine_t can only be oneapi::mkl::rng::device::mrg32k3a<1> or "
"oneapi::mkl::rng::device::mrg32k3a<4> or "
"oneapi::mkl::rng::device::philox4x32x10<1> or "
"oneapi::mkl::rng::device::philox4x32x10<4> or "
"oneapi::mkl::rng::device::mcg59<1>.");
static constexpr bool _is_engine_vec_size_one = std::disjunction_v<
std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::mcg59<1>>>;
static constexpr std::uint64_t default_seed = 0;
oneapi::mkl::rng::device::bits<std::uint32_t> _distr_bits;
oneapi::mkl::rng::device::uniform_bits<std::uint32_t> _distr_uniform_bits;
oneapi::mkl::rng::device::gaussian<float> _distr_gaussian_float;
oneapi::mkl::rng::device::gaussian<double> _distr_gaussian_double;
oneapi::mkl::rng::device::lognormal<float> _distr_lognormal_float;
oneapi::mkl::rng::device::lognormal<double> _distr_lognormal_double;
oneapi::mkl::rng::device::poisson<std::uint32_t> _distr_poisson;
oneapi::mkl::rng::device::uniform<float> _distr_uniform_float;
oneapi::mkl::rng::device::uniform<double> _distr_uniform_double;
engine_t _engine;
public:
/// Default constructor of rng_generator
rng_generator() { _engine = engine_t(default_seed); }
/// Constructor of rng_generator if engine type is not mcg59
/// \param [in] seed The seed to initialize the engine state.
/// \param [in] num_to_skip Set the number of elements need to be skipped.
/// The number is calculated as: num_to_skip[0] + num_to_skip[1] * 2^64 +
/// num_to_skip[2] * 2^128 + ... + num_to_skip[n-1] * 2^(64*(n-1))
template <typename T = engine_t,
typename std::enable_if<!std::is_same_v<
T, oneapi::mkl::rng::device::mcg59<1>>>::type * = nullptr>
rng_generator(std::uint64_t seed,
std::initializer_list<std::uint64_t> num_to_skip) {
_engine = engine_t(seed, num_to_skip);
}
/// Constructor of rng_generator if engine type is mcg59
/// \param [in] seed The seed to initialize the engine state.
/// \param [in] num_to_skip Set the number of elements need to be skipped.
template <typename T = engine_t,
typename std::enable_if<std::is_same_v<
T, oneapi::mkl::rng::device::mcg59<1>>>::type * = nullptr>
rng_generator(std::uint64_t seed, std::uint64_t num_to_skip) {
_engine = engine_t(seed, num_to_skip);
}
/// Generate random number(s) obeys distribution \tparam distr_t.
/// \tparam T The distribution of the random number. It can only be
/// oneapi::mkl::rng::device::bits<std::uint32_t>,
/// oneapi::mkl::rng::device::uniform_bits<std::uint32_t>,
/// oneapi::mkl::rng::device::gaussian<float>,
/// oneapi::mkl::rng::device::gaussian<double>,
/// oneapi::mkl::rng::device::lognormal<float>,
/// oneapi::mkl::rng::device::lognormal<double>,
/// oneapi::mkl::rng::device::poisson<std::uint32_t>,
/// oneapi::mkl::rng::device::uniform<float> or
/// oneapi::mkl::rng::device::uniform<double>
/// \tparam vec_size The length of the return vector. It can only be 1, 2
/// or 4.
/// \param distr_params The parameter(s) for lognormal or poisson
/// distribution.
/// \return The vector of the random number(s).
template <typename distr_t, int vec_size, class... distr_params_t>
auto generate(distr_params_t... distr_params) {
static_assert(vec_size == 1 || vec_size == 2 || vec_size == 4,
"vec_size is not supported.");
static_assert(
std::disjunction_v<
std::is_same<distr_t,
oneapi::mkl::rng::device::bits<std::uint32_t>>,
std::is_same<distr_t,
oneapi::mkl::rng::device::uniform_bits<std::uint32_t>>,
std::is_same<distr_t, oneapi::mkl::rng::device::gaussian<float>>,
std::is_same<distr_t, oneapi::mkl::rng::device::gaussian<double>>,
std::is_same<distr_t, oneapi::mkl::rng::device::lognormal<float>>,
std::is_same<distr_t, oneapi::mkl::rng::device::lognormal<double>>,
std::is_same<distr_t,
oneapi::mkl::rng::device::poisson<std::uint32_t>>,
std::is_same<distr_t, oneapi::mkl::rng::device::uniform<float>>,
std::is_same<distr_t, oneapi::mkl::rng::device::uniform<double>>>,
"distribution is not supported.");
if constexpr (std::is_same_v<
distr_t, oneapi::mkl::rng::device::bits<std::uint32_t>>) {
return generate_vec<vec_size>(_distr_bits);
}
if constexpr (std::is_same_v<
distr_t,
oneapi::mkl::rng::device::uniform_bits<std::uint32_t>>) {
return generate_vec<vec_size>(_distr_uniform_bits);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::gaussian<float>>) {
return generate_vec<vec_size>(_distr_gaussian_float);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::gaussian<double>>) {
return generate_vec<vec_size>(_distr_gaussian_double);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::lognormal<float>>) {
return generate_vec<vec_size>(_distr_lognormal_float, distr_params...,
0.0f, 1.0f);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::lognormal<double>>) {
return generate_vec<vec_size>(_distr_lognormal_double, distr_params...,
0.0, 1.0);
}
if constexpr (std::is_same_v<distr_t, oneapi::mkl::rng::device::poisson<
std::uint32_t>>) {
return generate_vec<vec_size>(_distr_poisson, distr_params...);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::uniform<float>>) {
return generate_vec<vec_size>(_distr_uniform_float);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::uniform<double>>) {
return generate_vec<vec_size>(_distr_uniform_double);
}
}
/// Get the random number generator engine.
/// \return The reference of the internal random number generator engine.
engine_t &get_engine() { return _engine; }
private:
template <int vec_size, typename distr_t, class... distr_params_t>
auto generate_vec(distr_t &distr, distr_params_t... distr_params) {
if constexpr (sizeof...(distr_params_t)) {
typename distr_t::param_type pt(distr_params...);
distr.param(pt);
}
if constexpr (vec_size == 4) {
if constexpr (_is_engine_vec_size_one) {
sycl::vec<typename distr_t::result_type, 4> res;
res.x() = oneapi::mkl::rng::device::generate(distr, _engine);
res.y() = oneapi::mkl::rng::device::generate(distr, _engine);
res.z() = oneapi::mkl::rng::device::generate(distr, _engine);
res.w() = oneapi::mkl::rng::device::generate(distr, _engine);
return res;
} else {
return oneapi::mkl::rng::device::generate(distr, _engine);
}
} else if constexpr (vec_size == 1) {
if constexpr (_is_engine_vec_size_one) {
return oneapi::mkl::rng::device::generate(distr, _engine);
} else {
return oneapi::mkl::rng::device::generate_single(distr, _engine);
}
} else if constexpr (vec_size == 2) {
if constexpr (_is_engine_vec_size_one) {
sycl::vec<typename distr_t::result_type, 2> res;
res.x() = oneapi::mkl::rng::device::generate(distr, _engine);
res.y() = oneapi::mkl::rng::device::generate(distr, _engine);
return res;
} else {
sycl::vec<typename distr_t::result_type, 2> res;
res.x() = oneapi::mkl::rng::device::generate_single(distr, _engine);
res.y() = oneapi::mkl::rng::device::generate_single(distr, _engine);
return res;
}
}
}
};
} // namespace device
#endif
namespace host {
namespace detail {
class rng_generator_base {
public:
/// Set the seed of host rng_generator.
/// \param seed The engine seed.
virtual void set_seed(const std::uint64_t seed) = 0;
/// Set the dimensions of host rng_generator.
/// \param dimensions The engine dimensions.
virtual void set_dimensions(const std::uint32_t dimensions) = 0;
/// Set the queue of host rng_generator.
/// \param queue The engine queue.
virtual void set_queue(sycl::queue *queue) = 0;
/// Generate unsigned int random number(s) with 'uniform_bits' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform_bits(unsigned int *output,
std::int64_t n) = 0;
/// Generate unsigned long long random number(s) with 'uniform_bits'
/// distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform_bits(unsigned long long *output,
std::int64_t n) = 0;
/// Generate float random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
virtual inline void generate_lognormal(float *output, std::int64_t n, float m,
float s) = 0;
/// Generate double random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
virtual inline void generate_lognormal(double *output, std::int64_t n,
double m, double s) = 0;
/// Generate float random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
virtual inline void generate_gaussian(float *output, std::int64_t n,
float mean, float stddev) = 0;
/// Generate double random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
virtual inline void generate_gaussian(double *output, std::int64_t n,
double mean, double stddev) = 0;
/// Generate unsigned int random number(s) with 'poisson' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param lambda Lambda for the Poisson distribution.
virtual inline void generate_poisson(unsigned int *output, std::int64_t n,
double lambda) = 0;
/// Generate float random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform(float *output, std::int64_t n) = 0;
/// Generate double random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform(double *output, std::int64_t n) = 0;
/// Skip ahead several random number(s).
/// \param num_to_skip The number of random numbers to be skipped.
virtual void skip_ahead(const std::uint64_t num_to_skip) = 0;
protected:
sycl::queue *_queue{&dpct::get_default_queue()};
std::uint64_t _seed{0};
std::uint32_t _dimensions{1};
};
/// The random number generator on host.
template <typename engine_t = oneapi::mkl::rng::philox4x32x10>
class rng_generator : public rng_generator_base {
public:
/// Constructor of rng_generator.
rng_generator() : _engine(create_engine(_queue, _seed, _dimensions)) {}
/// Set the seed of host rng_generator.
/// \param seed The engine seed.
void set_seed(const std::uint64_t seed) {
if (seed == _seed) {
return;
}
_seed = seed;
_engine = create_engine(_queue, _seed, _dimensions);
}
/// Set the dimensions of host rng_generator.
/// \param dimensions The engine dimensions.
void set_dimensions(const std::uint32_t dimensions) {
if (dimensions == _dimensions) {
return;
}
_dimensions = dimensions;
_engine = create_engine(_queue, _seed, _dimensions);
}
/// Set the queue of host rng_generator.
/// \param queue The engine queue.
void set_queue(sycl::queue *queue) {
if (queue == _queue) {
return;
}
_queue = queue;
_engine = create_engine(_queue, _seed, _dimensions);
}
/// Generate unsigned int random number(s) with 'uniform_bits' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform_bits(unsigned int *output, std::int64_t n) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
static_assert(sizeof(unsigned int) == sizeof(std::uint32_t));
generate<oneapi::mkl::rng::uniform_bits<std::uint32_t>>(
(std::uint32_t *)output, n);
#endif
}
/// Generate unsigned long long random number(s) with 'uniform_bits'
/// distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform_bits(unsigned long long *output,
std::int64_t n) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
static_assert(sizeof(unsigned long long) == sizeof(std::uint64_t));
generate<oneapi::mkl::rng::uniform_bits<std::uint64_t>>(
(std::uint64_t *)output, n);
#endif
}
/// Generate float random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
inline void generate_lognormal(float *output, std::int64_t n, float m,
float s) {
generate<oneapi::mkl::rng::lognormal<float>>(output, n, m, s);
}
/// Generate double random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
inline void generate_lognormal(double *output, std::int64_t n, double m,
double s) {
generate<oneapi::mkl::rng::lognormal<double>>(output, n, m, s);
}
/// Generate float random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
inline void generate_gaussian(float *output, std::int64_t n, float mean,
float stddev) {
generate<oneapi::mkl::rng::gaussian<float>>(output, n, mean, stddev);
}
/// Generate double random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
inline void generate_gaussian(double *output, std::int64_t n, double mean,
double stddev) {
generate<oneapi::mkl::rng::gaussian<double>>(output, n, mean, stddev);
}
/// Generate unsigned int random number(s) with 'poisson' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param lambda Lambda for the Poisson distribution.
inline void generate_poisson(unsigned int *output, std::int64_t n,
double lambda) {
generate<oneapi::mkl::rng::poisson<unsigned int>>(output, n, lambda);
}
/// Generate float random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform(float *output, std::int64_t n) {
generate<oneapi::mkl::rng::uniform<float>>(output, n);
}
/// Generate double random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform(double *output, std::int64_t n) {
generate<oneapi::mkl::rng::uniform<double>>(output, n);
}
/// Skip ahead several random number(s).
/// \param num_to_skip The number of random numbers to be skipped.
void skip_ahead(const std::uint64_t num_to_skip) {
#ifndef __INTEL_MKL__
oneapi::mkl::rng::skip_ahead(_engine, num_to_skip);
#else
if constexpr (std::is_same_v<engine_t, oneapi::mkl::rng::mt2203>)
throw std::runtime_error("no skip_ahead method of mt2203 engine.");
else
oneapi::mkl::rng::skip_ahead(_engine, num_to_skip);
#endif
}
private:
static inline engine_t create_engine(sycl::queue *queue,
const std::uint64_t seed,
const std::uint32_t dimensions) {
#ifdef __INTEL_MKL__
return std::is_same_v<engine_t, oneapi::mkl::rng::sobol>
? engine_t(*queue, dimensions)
: engine_t(*queue, seed);
#else
return engine_t(*queue, seed);
#endif
}
template <typename distr_t, typename buffer_t, class... distr_params_t>
void generate(buffer_t *output, const std::int64_t n,
const distr_params_t... distr_params) {
auto output_buf = dpct::detail::get_memory(output);
oneapi::mkl::rng::generate(distr_t(distr_params...), _engine, n,
output_buf);
}
engine_t _engine{};
};
} // namespace detail
} // namespace host
enum class random_engine_type {
philox4x32x10,
mrg32k3a,
mt2203,
mt19937,
sobol,
mcg59
};
typedef std::shared_ptr<rng::host::detail::rng_generator_base> host_rng_ptr;
/// Create a host random number generator.
/// \param type The random engine type.
/// \return The pointer of random number generator.
inline host_rng_ptr create_host_rng(const random_engine_type type) {
switch (type) {
case random_engine_type::philox4x32x10:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::philox4x32x10>>();
case random_engine_type::mrg32k3a:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mrg32k3a>>();
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
case random_engine_type::mt2203:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mt2203>>();
case random_engine_type::mt19937:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mt19937>>();
case random_engine_type::sobol:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::sobol>>();
case random_engine_type::mcg59:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mcg59>>();
#endif
}
}
} // namespace rng
} // namespace dpct
#endif // __DPCT_RNG_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/dpl_extras/numeric.h | //==---- numeric.h --------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_NUMERIC_H__
#define __DPCT_NUMERIC_H__
namespace dpct {
template <typename Policy, typename InputIt1, typename InputIt2, typename T>
T inner_product(Policy &&policy, InputIt1 first1, InputIt1 last1,
InputIt2 first2, T init) {
return std::transform_reduce(std::forward<Policy>(policy), first1, last1,
first2, init);
}
template <typename Policy, typename InputIt1, typename InputIt2, typename T,
typename BinaryOperation1, typename BinaryOperation2>
T inner_product(Policy &&policy, InputIt1 first1, InputIt1 last1,
InputIt2 first2, T init, BinaryOperation1 op1,
BinaryOperation2 op2) {
return std::transform_reduce(std::forward<Policy>(policy), first1, last1,
first2, init, op1, op2);
}
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/dpl_extras/iterators.h | //==---- iterators.h ------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_ITERATORS_H__
#define __DPCT_ITERATORS_H__
#include <oneapi/dpl/iterator>
#include "functional.h"
namespace dpct {
namespace internal {
// Wrapper class returned from a dereferenced transform_iterator which was
// created using
// make_transform_output_iterator(). Used to apply the supplied transform
// function when writing into an object of this class.
//
// Example:
// int a[] = {0, 1, 2, 3, 4};
// int* p = a;
// auto f = [](auto v) {return v*v;};
// auto tr_out = dpct::make_transform_output_iterator(p+1, f);
// auto wrap = *tr_out; // wrap is a transform_output_ref_wrapper
// std::cout<<*(p+1)<<std::endl; // '1'
// wrap = 2; // apply function, store 2*2=4
// std::cout<<*(p+1)<<std::endl; // '4'
template <typename T, typename _UnaryFunc> class transform_output_ref_wrapper {
private:
T __my_reference_;
_UnaryFunc __my_unary_func_;
public:
template <typename U>
transform_output_ref_wrapper(U &&__reference, _UnaryFunc __unary_func)
: __my_reference_(std::forward<U>(__reference)),
__my_unary_func_(__unary_func) {}
// When writing to an object of this type, apply the supplied unary function,
// then write to the wrapped reference
template <typename UnaryInputType>
transform_output_ref_wrapper &operator=(const UnaryInputType &e) {
__my_reference_ = __my_unary_func_(e);
return *this;
}
};
// Unary functor to create a transform_output_reference_wrapper when a
// transform_iterator is dereferenced, so that a
// the supplied unary function may be applied on write, resulting in a
// transform_output_iterator
template <typename _UnaryFunc> struct _Unary_Out {
_Unary_Out(_UnaryFunc __f_) : __f(__f_) {}
_UnaryFunc __f;
template <typename T> auto operator()(T &&val) const {
return transform_output_ref_wrapper<T, _UnaryFunc>(std::forward<T>(val),
__f);
}
};
} // end namespace internal
using std::advance;
using std::distance;
template <typename T>
oneapi::dpl::counting_iterator<T> make_counting_iterator(const T &input) {
return oneapi::dpl::counting_iterator<T>(input);
}
template <typename _Tp> class constant_iterator {
public:
typedef std::false_type is_hetero;
typedef std::true_type is_passed_directly;
typedef std::ptrdiff_t difference_type;
typedef _Tp value_type;
typedef _Tp *pointer;
// There is no storage behind the iterator, so we return a value instead of
// reference.
typedef const _Tp reference;
typedef const _Tp const_reference;
typedef std::random_access_iterator_tag iterator_category;
explicit constant_iterator(_Tp __init)
: __my_value_(__init), __my_counter_(0) {}
private:
// used to construct iterator instances with different counter values required
// by arithmetic operators
constant_iterator(const _Tp &__value, const difference_type &__offset)
: __my_value_(__value), __my_counter_(__offset) {}
public:
// non-const variants of access operators are not provided so unintended
// writes are caught at compile time.
const_reference operator*() const { return __my_value_; }
const_reference operator[](difference_type) const { return __my_value_; }
difference_type operator-(const constant_iterator &__it) const {
return __my_counter_ - __it.__my_counter_;
}
constant_iterator &operator+=(difference_type __forward) {
__my_counter_ += __forward;
return *this;
}
constant_iterator &operator-=(difference_type __backward) {
return *this += -__backward;
}
constant_iterator &operator++() { return *this += 1; }
constant_iterator &operator--() { return *this -= 1; }
constant_iterator operator++(int) {
constant_iterator __it(*this);
++(*this);
return __it;
}
constant_iterator operator--(int) {
constant_iterator __it(*this);
--(*this);
return __it;
}
constant_iterator operator-(difference_type __backward) const {
return constant_iterator(__my_value_, __my_counter_ - __backward);
}
constant_iterator operator+(difference_type __forward) const {
return constant_iterator(__my_value_, __my_counter_ + __forward);
}
friend constant_iterator operator+(difference_type __forward,
const constant_iterator __it) {
return __it + __forward;
}
bool operator==(const constant_iterator &__it) const {
return __my_value_ == __it.__my_value_ &&
this->__my_counter_ == __it.__my_counter_;
}
bool operator!=(const constant_iterator &__it) const {
return !(*this == __it);
}
bool operator<(const constant_iterator &__it) const {
return *this - __it < 0;
}
bool operator>(const constant_iterator &__it) const { return __it < *this; }
bool operator<=(const constant_iterator &__it) const {
return !(*this > __it);
}
bool operator>=(const constant_iterator &__it) const {
return !(*this < __it);
}
private:
_Tp __my_value_;
uint64_t __my_counter_;
};
template <typename _Tp>
constant_iterator<_Tp> make_constant_iterator(_Tp __value) {
return constant_iterator<_Tp>(__value);
}
// key_value_pair class to represent a key and value, specifically a
// dereferenced arg_index_input_iterator
template <typename _KeyTp, typename _ValueTp> class key_value_pair {
public:
key_value_pair() = default;
key_value_pair(const _KeyTp &_key, const _ValueTp &_value)
: key(_key), value(_value) {}
bool operator==(const key_value_pair<_KeyTp, _ValueTp> &_kvp) const {
return (key == _kvp.key) && (value == _kvp.value);
}
bool operator!=(const key_value_pair<_KeyTp, _ValueTp> &_kvp) const {
return (key != _kvp.key) || (value != _kvp.value);
}
_KeyTp key;
_ValueTp value;
};
namespace detail {
template <typename KeyTp, typename _ValueTp> struct make_key_value_pair {
template <typename ValRefTp>
key_value_pair<KeyTp, _ValueTp>
operator()(const oneapi::dpl::__internal::tuple<KeyTp, ValRefTp> &tup) const {
return ::dpct::key_value_pair<KeyTp, _ValueTp>(::std::get<0>(tup),
::std::get<1>(tup));
}
};
template <class T> struct __zip_iterator_impl;
template <class... Ts> struct __zip_iterator_impl<std::tuple<Ts...>> {
using type = oneapi::dpl::zip_iterator<Ts...>;
};
} // end namespace detail
// dpct::zip_iterator can only accept std::tuple type as template argument for
// compatibility purpose. Please use oneapi::dpl::zip_iterator if you want to
// pass iterator's types directly.
template <typename... Ts>
using zip_iterator = typename detail::__zip_iterator_impl<Ts...>::type;
// arg_index_input_iterator is an iterator over a input iterator, with a index.
// When dereferenced, it returns a key_value_pair, which can be interrogated for
// the index key or the value from the input iterator
template <typename InputIteratorT, typename OffsetT = ptrdiff_t,
typename OutputValueT =
typename ::std::iterator_traits<InputIteratorT>::value_type>
class arg_index_input_iterator
: public oneapi::dpl::transform_iterator<
oneapi::dpl::zip_iterator<oneapi::dpl::counting_iterator<OffsetT>,
InputIteratorT>,
detail::make_key_value_pair<OffsetT, OutputValueT>> {
using arg_index_input_iterator_wrap = oneapi::dpl::transform_iterator<
oneapi::dpl::zip_iterator<oneapi::dpl::counting_iterator<OffsetT>,
InputIteratorT>,
detail::make_key_value_pair<OffsetT, OutputValueT>>;
public:
typedef OffsetT difference_type;
// signal to __get_sycl_range that this iterator is as a direct pass iterator
using is_zip = ::std::true_type;
arg_index_input_iterator(const arg_index_input_iterator_wrap &__arg_wrap)
: arg_index_input_iterator_wrap(__arg_wrap) {}
arg_index_input_iterator(InputIteratorT __iter)
: arg_index_input_iterator_wrap(
oneapi::dpl::make_zip_iterator(
oneapi::dpl::counting_iterator(OffsetT(0)), __iter),
detail::make_key_value_pair<OffsetT, OutputValueT>()) {}
arg_index_input_iterator &operator=(const arg_index_input_iterator &__input) {
arg_index_input_iterator_wrap::operator=(__input);
return *this;
}
arg_index_input_iterator &operator++() {
arg_index_input_iterator_wrap::operator++();
return *this;
}
arg_index_input_iterator &operator--() {
arg_index_input_iterator_wrap::operator--();
return *this;
}
arg_index_input_iterator operator++(int) {
arg_index_input_iterator __it(*this);
++(*this);
return __it;
}
arg_index_input_iterator operator--(int) {
arg_index_input_iterator __it(*this);
--(*this);
return __it;
}
arg_index_input_iterator operator+(difference_type __forward) const {
return arg_index_input_iterator(
arg_index_input_iterator_wrap::operator+(__forward));
}
arg_index_input_iterator operator-(difference_type __backward) const {
return arg_index_input_iterator(
arg_index_input_iterator_wrap::operator-(__backward));
}
arg_index_input_iterator &operator+=(difference_type __forward) {
arg_index_input_iterator_wrap::operator+=(__forward);
return *this;
}
arg_index_input_iterator &operator-=(difference_type __backward) {
arg_index_input_iterator_wrap::operator-=(__backward);
return *this;
}
friend arg_index_input_iterator
operator+(difference_type __forward, const arg_index_input_iterator &__it) {
return __it + __forward;
}
difference_type operator-(const arg_index_input_iterator &__it) const {
return arg_index_input_iterator_wrap::operator-(__it);
}
bool operator==(const arg_index_input_iterator &__it) const {
return arg_index_input_iterator_wrap::operator==(__it);
}
bool operator!=(const arg_index_input_iterator &__it) const {
return !(*this == __it);
}
bool operator<(const arg_index_input_iterator &__it) const {
return *this - __it < 0;
}
bool operator>(const arg_index_input_iterator &__it) const {
return __it < *this;
}
bool operator<=(const arg_index_input_iterator &__it) const {
return !(*this > __it);
}
bool operator>=(const arg_index_input_iterator &__it) const {
return !(*this < __it);
}
// returns an arg_index_input_iterator with the same iter position, but a
// count reset to 0
arg_index_input_iterator create_normalized() {
return arg_index_input_iterator(
::std::get<1>(arg_index_input_iterator_wrap::base().base()));
}
};
template <typename IterT> struct io_iterator_pair {
inline io_iterator_pair() : selector(false) {}
inline io_iterator_pair(const IterT &first, const IterT &second)
: selector(false) {
iter[0] = first;
iter[1] = second;
}
inline IterT first() const { return selector ? iter[1] : iter[0]; }
inline IterT second() const { return selector ? iter[0] : iter[1]; }
inline void swap() { selector = !selector; }
bool selector;
IterT iter[2];
};
template <typename _Iter, typename _UnaryFunc>
auto make_transform_output_iterator(_Iter __it, _UnaryFunc __unary_func) {
return oneapi::dpl::transform_iterator(
__it, internal::_Unary_Out<_UnaryFunc>(__unary_func));
}
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/dpl_extras/algorithm.h | //==---- algorithm.h ------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_ALGORITHM_H__
#define __DPCT_ALGORITHM_H__
#include <oneapi/dpl/execution>
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/numeric>
#include "functional.h"
#include "iterators.h"
#include "vector.h"
namespace dpct {
template <typename Policy, typename Iter1, typename Iter2, typename Pred,
typename T>
void replace_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p,
const T &new_value) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
std::transform(std::forward<Policy>(policy), first, last, mask, first,
internal::replace_if_fun<typename std::iterator_traits<Iter1>::value_type, Pred>(p, new_value));
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Pred, typename T>
Iter3 replace_copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, Pred p, const T &new_value) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return std::transform(std::forward<Policy>(policy), first, last, mask, result,
internal::replace_if_fun<typename std::iterator_traits<Iter3>::value_type, Pred>(p, new_value));
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
internal::enable_if_hetero_execution_policy<Policy, Iter1>
remove_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using oneapi::dpl::make_zip_iterator;
using policy_type = typename std::decay<Policy>::type;
using internal::__buffer;
using ValueType = typename std::iterator_traits<Iter1>::value_type;
__buffer<ValueType> _tmp(std::distance(first, last));
auto end = std::copy_if(
std::forward<Policy>(policy), make_zip_iterator(first, mask),
make_zip_iterator(last, mask + std::distance(first, last)),
make_zip_iterator(_tmp.get(), oneapi::dpl::discard_iterator()),
internal::negate_predicate_key_fun<Pred>(p));
return std::copy(std::forward<Policy>(policy), _tmp.get(),
std::get<0>(end.base()), first);
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
typename std::enable_if<!internal::is_hetero_execution_policy<
typename std::decay<Policy>::type>::value,
Iter1>::type
remove_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using oneapi::dpl::make_zip_iterator;
using policy_type = typename std::decay<Policy>::type;
using ValueType = typename std::iterator_traits<Iter1>::value_type;
std::vector<ValueType> _tmp(std::distance(first, last));
auto end = std::copy_if(
policy, make_zip_iterator(first, mask),
make_zip_iterator(last, mask + std::distance(first, last)),
make_zip_iterator(_tmp.begin(), oneapi::dpl::discard_iterator()),
internal::negate_predicate_key_fun<Pred>(p));
return std::copy(policy, _tmp.begin(), std::get<0>(end.base()), first);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Pred>
Iter3 remove_copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using oneapi::dpl::make_zip_iterator;
auto ret_val = std::remove_copy_if(
std::forward<Policy>(policy), make_zip_iterator(first, mask),
make_zip_iterator(last, mask + std::distance(first, last)),
make_zip_iterator(result, oneapi::dpl::discard_iterator()),
internal::predicate_key_fun<Pred>(p));
return std::get<0>(ret_val.base());
}
template <class Policy, class Iter1, class Iter2, class BinaryPred>
std::pair<Iter1, Iter2> unique(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first,
BinaryPred binary_pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::unique(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first, values_first),
oneapi::dpl::make_zip_iterator(
keys_last, values_first + std::distance(keys_first, keys_last)),
internal::compare_key_fun<BinaryPred>(binary_pred));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_first, values_first), ret_val);
return std::make_pair(keys_first + n1, values_first + n1);
}
template <class Policy, class Iter1, class Iter2>
std::pair<Iter1, Iter2> unique(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
return unique(std::forward<Policy>(policy), keys_first, keys_last,
values_first, std::equal_to<T>());
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class BinaryPred>
std::pair<Iter3, Iter4> unique_copy(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first,
Iter3 keys_result, Iter4 values_result,
BinaryPred binary_pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::unique_copy(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first, values_first),
oneapi::dpl::make_zip_iterator(
keys_last, values_first + std::distance(keys_first, keys_last)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::unique_fun<BinaryPred>(binary_pred));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4>
std::pair<Iter3, Iter4> unique_copy(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first,
Iter3 keys_result, Iter4 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
auto comp = std::equal_to<T>();
return unique_copy(std::forward<Policy>(policy), keys_first, keys_last,
values_first, keys_result, values_result, comp);
}
template <typename Policy, typename Iter, typename Pred>
Iter partition_point(Policy &&policy, Iter first, Iter last, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
if (std::is_partitioned(std::forward<Policy>(policy), first, last, p))
return std::find_if_not(std::forward<Policy>(policy), first, last, p);
else
return first;
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Pred>
Iter3 copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::copy_if(
std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, mask),
oneapi::dpl::make_zip_iterator(last, mask + std::distance(first, last)),
oneapi::dpl::make_zip_iterator(result, oneapi::dpl::discard_iterator()),
internal::predicate_key_fun<Pred>(pred));
return std::get<0>(ret_val.base());
}
template <class Policy, class Iter1, class Iter2, class UnaryOperation,
class Pred>
Iter2 transform_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 result,
UnaryOperation unary_op, Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
const auto n = std::distance(first, last);
std::for_each(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, result),
oneapi::dpl::make_zip_iterator(first, result) + n,
internal::transform_if_fun<T, Pred, UnaryOperation>(pred, unary_op));
return result + n;
}
template <class Policy, class Iter1, class Iter2, class Iter3,
class UnaryOperation, class Pred>
Iter3 transform_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, UnaryOperation unary_op, Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
using Ref1 = typename std::iterator_traits<Iter1>::reference;
using Ref2 = typename std::iterator_traits<Iter2>::reference;
const auto n = std::distance(first, last);
std::for_each(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, mask, result),
oneapi::dpl::make_zip_iterator(first, mask, result) + n,
internal::transform_if_unary_zip_mask_fun<T, Pred, UnaryOperation>(
pred, unary_op));
return result + n;
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class BinaryOperation, class Pred>
Iter4 transform_if(Policy &&policy, Iter1 first1, Iter1 last1, Iter2 first2,
Iter3 mask, Iter4 result, BinaryOperation binary_op,
Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
const auto n = std::distance(first1, last1);
using ZipIterator =
typename oneapi::dpl::zip_iterator<Iter1, Iter2, Iter3, Iter4>;
using T = typename std::iterator_traits<ZipIterator>::value_type;
std::for_each(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first1, first2, mask, result),
oneapi::dpl::make_zip_iterator(last1, first2 + n, mask + n, result + n),
internal::transform_if_zip_mask_fun<T, Pred, BinaryOperation>(pred,
binary_op));
return result + n;
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename OutputIter>
void scatter(Policy &&policy, InputIter1 first, InputIter1 last, InputIter2 map,
OutputIter result) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
oneapi::dpl::copy(policy, first, last,
oneapi::dpl::make_permutation_iterator(result, map));
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename OutputIter>
OutputIter gather(Policy &&policy, InputIter1 map_first, InputIter1 map_last,
InputIter2 input_first, OutputIter result) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto perm_begin =
oneapi::dpl::make_permutation_iterator(input_first, map_first);
const int n = ::std::distance(map_first, map_last);
return oneapi::dpl::copy(policy, perm_begin, perm_begin + n, result);
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename InputIter3, typename OutputIter, typename Predicate>
void scatter_if(Policy &&policy, InputIter1 first, InputIter1 last,
InputIter2 map, InputIter3 mask, OutputIter result,
Predicate pred) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
transform_if(policy, first, last, mask,
oneapi::dpl::make_permutation_iterator(result, map),
[=](auto &&v) { return v; }, [=](auto &&m) { return pred(m); });
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename InputIter3, typename OutputIter, typename Predicate>
OutputIter gather_if(Policy &&policy, InputIter1 map_first, InputIter1 map_last,
InputIter2 mask, InputIter3 input_first, OutputIter result,
Predicate pred) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto perm_begin =
oneapi::dpl::make_permutation_iterator(input_first, map_first);
const int n = std::distance(map_first, map_last);
return transform_if(policy, perm_begin, perm_begin + n, mask, result,
[=](auto &&v) { return v; },
[=](auto &&m) { return pred(m); });
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Iter5, typename Iter6>
std::pair<Iter5, Iter6>
merge(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto n1 = std::distance(keys_first1, keys_last1);
auto n2 = std::distance(keys_first2, keys_last2);
std::merge(std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(keys_last1, values_first1 + n1),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(keys_last2, values_first2 + n2),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
return std::make_pair(keys_result + n1 + n2, values_result + n1 + n2);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Iter5, typename Iter6, typename Comp>
std::pair<Iter5, Iter6>
merge(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto n1 = std::distance(keys_first1, keys_last1);
auto n2 = std::distance(keys_first2, keys_last2);
std::merge(std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(keys_last1, values_first1 + n1),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(keys_last2, values_first2 + n2),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
return std::make_pair(keys_result + n1 + n2, values_result + n1 + n2);
}
template <class Policy, class Iter, class T>
void iota(Policy &&policy, Iter first, Iter last, T init, T step) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using DiffSize = typename std::iterator_traits<Iter>::difference_type;
std::transform(
std::forward<Policy>(policy), oneapi::dpl::counting_iterator<DiffSize>(0),
oneapi::dpl::counting_iterator<DiffSize>(std::distance(first, last)),
first, internal::sequence_fun<T>(init, step));
}
template <class Policy, class Iter, class T>
void iota(Policy &&policy, Iter first, Iter last, T init) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
iota(std::forward<Policy>(policy), first, last, init, T(1));
}
template <class Policy, class Iter>
void iota(Policy &&policy, Iter first, Iter last) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using DiffSize = typename std::iterator_traits<Iter>::difference_type;
iota(std::forward<Policy>(policy), first, last, DiffSize(0), DiffSize(1));
}
template <class Policy, class Iter1, class Iter2, class Comp>
void sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto first = oneapi::dpl::make_zip_iterator(keys_first, values_first);
auto last = first + std::distance(keys_first, keys_last);
std::sort(std::forward<Policy>(policy), first, last,
internal::compare_key_fun<Comp>(comp));
}
template <class Policy, class Iter1, class Iter2>
void sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
sort(std::forward<Policy>(policy), keys_first, keys_last, values_first,
internal::__less());
}
template <class Policy, class Iter1, class Iter2, class Comp>
void stable_sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
std::stable_sort(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first, values_first),
oneapi::dpl::make_zip_iterator(
keys_last, values_first + std::distance(keys_first, keys_last)),
internal::compare_key_fun<Comp>(comp));
}
template <class Policy, class Iter1, class Iter2>
void stable_sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
stable_sort(std::forward<Policy>(policy), keys_first, keys_last, values_first,
internal::__less());
}
template <class Policy, class Iter, class Operator>
void for_each_index(Policy &&policy, Iter first, Iter last, Operator unary_op) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using DiffSize = typename std::iterator_traits<Iter>::difference_type;
std::transform(
std::forward<Policy>(policy), oneapi::dpl::counting_iterator<DiffSize>(0),
oneapi::dpl::counting_iterator<DiffSize>(std::distance(first, last)),
first, unary_op);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5>
std::pair<Iter4, Iter5>
set_intersection(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 keys_result, Iter5 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_intersection(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_last2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Comp>
std::pair<Iter4, Iter5>
set_intersection(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 keys_result, Iter5 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_intersection(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_last2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6>
std::pair<Iter5, Iter6>
set_symmetric_difference(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2,
Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_symmetric_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6, class Comp>
std::pair<Iter5, Iter6>
set_symmetric_difference(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2,
Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_symmetric_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6>
std::pair<Iter5, Iter6>
set_difference(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6, class Comp>
std::pair<Iter5, Iter6> set_difference(Policy &&policy, Iter1 keys_first1,
Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result,
Iter6 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6>
internal::enable_if_execution_policy<Policy, std::pair<Iter5, Iter6>>
set_union(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_union(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6, class Comp>
internal::enable_if_execution_policy<Policy, std::pair<Iter5, Iter6>>
set_union(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result, Iter6 values_result,
Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_union(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Pred>
internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>>
stable_partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 out_true, Iter4 out_false, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::partition_copy(
std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, mask),
oneapi::dpl::make_zip_iterator(last, mask + std::distance(first, last)),
oneapi::dpl::make_zip_iterator(out_true, oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(out_false,
oneapi::dpl::discard_iterator()),
internal::predicate_key_fun<Pred>(p));
return std::make_pair(std::get<0>(ret_val.first.base()),
std::get<0>(ret_val.second.base()));
}
template <typename Policy, typename Iter1, typename Iter3, typename Iter4,
typename Pred>
internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>>
stable_partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter3 out_true,
Iter4 out_false, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return std::partition_copy(std::forward<Policy>(policy), first, last,
out_true, out_false, p);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Pred>
internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>>
partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 out_true, Iter4 out_false, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return stable_partition_copy(std::forward<Policy>(policy), first, last, mask,
out_true, out_false, p);
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
internal::enable_if_hetero_execution_policy<Policy, Iter1>
stable_partition(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
typedef typename std::decay<Policy>::type policy_type;
internal::__buffer<typename std::iterator_traits<Iter1>::value_type> _tmp(
std::distance(first, last));
std::copy(std::forward<Policy>(policy), mask,
mask + std::distance(first, last), _tmp.get());
auto ret_val =
std::stable_partition(std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, _tmp.get()),
oneapi::dpl::make_zip_iterator(
last, _tmp.get() + std::distance(first, last)),
internal::predicate_key_fun<Pred>(p));
return std::get<0>(ret_val.base());
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
typename std::enable_if<!internal::is_hetero_execution_policy<
typename std::decay<Policy>::type>::value,
Iter1>::type
stable_partition(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
typedef typename std::decay<Policy>::type policy_type;
std::vector<typename std::iterator_traits<Iter1>::value_type> _tmp(
std::distance(first, last));
std::copy(std::forward<Policy>(policy), mask,
mask + std::distance(first, last), _tmp.begin());
auto ret_val = std::stable_partition(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, _tmp.begin()),
oneapi::dpl::make_zip_iterator(last,
_tmp.begin() + std::distance(first, last)),
internal::predicate_key_fun<Pred>(p));
return std::get<0>(ret_val.base());
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
internal::enable_if_execution_policy<Policy, Iter1>
partition(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return stable_partition(std::forward<Policy>(policy), first, last, mask, p);
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value &&
dpct::internal::is_iterator<value_t>::value &&
dpct::internal::is_iterator<value_out_t>::value>
sort_pairs(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending = false, int begin_bit = 0,
int end_bit =
sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8);
template <typename _ExecutionPolicy, typename key_t, typename key_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value>
sort_keys(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
int64_t n, bool descending = false, int begin_bit = 0,
int end_bit =
sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8);
namespace internal {
// Transforms key to a specific bit range and sorts the transformed key
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename transformed_key_t>
inline void transform_and_sort(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, int64_t n, bool descending,
int begin_bit, int end_bit) {
using key_t_value_t = typename std::iterator_traits<key_t>::value_type;
auto trans_key =
translate_key<key_t_value_t, transformed_key_t>(begin_bit, end_bit);
// Use of the comparison operator that is not simply std::greater() or
// std::less() will result in
// not using radix sort which will cost some performance. However, this is
// necessary to provide the transformation of the key to the bitrange
// desired.
auto partial_sort_with_comp = [&](const auto &comp) {
return oneapi::dpl::partial_sort_copy(
std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n, keys_out,
keys_out + n, [=](const auto a, const auto b) {
return comp(trans_key(a), trans_key(b));
});
};
if (descending)
partial_sort_with_comp(::std::greater<transformed_key_t>());
else
partial_sort_with_comp(::std::less<transformed_key_t>());
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t>
inline void sort_only(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, int64_t n, bool descending) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
if constexpr (::std::is_floating_point<key_t_value_t>::value) {
if (descending) {
// Comparison operator that is not std::greater() ensures stability of
// -0.0 and 0.0
// at the cost of some performance because radix sort will not be used.
auto comp_descending = [=](const auto a, const auto b) { return a > b; };
oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy),
keys_in, keys_in + n, keys_out,
keys_out + n, comp_descending);
} else {
// Comparison operator that is not std::less() ensures stability of -0.0
// and 0.0
// at the cost of some performance because radix sort will not be used.
auto comp_ascending = [=](const auto a, const auto b) { return a < b; };
oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy),
keys_in, keys_in + n, keys_out,
keys_out + n, comp_ascending);
}
} else {
if (descending) {
oneapi::dpl::partial_sort_copy(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n,
keys_out, keys_out + n, ::std::greater<key_t_value_t>());
} else {
oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy),
keys_in, keys_in + n, keys_out,
keys_out + n);
}
}
}
// Transforms key from a pair to a specific bit range and sorts the pairs by the
// transformed key
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename transform_key_t, typename value_t, typename value_out_t>
inline void transform_and_sort_pairs(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, value_t values_in,
value_out_t values_out, int64_t n,
bool descending, int begin_bit,
int end_bit) {
using key_t_value_t = typename std::iterator_traits<key_t>::value_type;
auto zip_input = oneapi::dpl::zip_iterator(keys_in, values_in);
auto zip_output = oneapi::dpl::zip_iterator(keys_out, values_out);
auto trans_key =
translate_key<key_t_value_t, transform_key_t>(begin_bit, end_bit);
// Use of the comparison operator that is not simply std::greater() or
// std::less() will result in
// not using radix sort which will cost some performance. However, this is
// necessary to provide the transformation of the key to the bitrange desired
// and also to select the key from the zipped pair.
auto load_val = [=](const auto a) { return trans_key(std::get<0>(a)); };
auto partial_sort_with_comp = [&](const auto &comp) {
return oneapi::dpl::partial_sort_copy(
std::forward<_ExecutionPolicy>(policy), zip_input, zip_input + n,
zip_output, zip_output + n, [=](const auto a, const auto b) {
return comp(load_val(a), load_val(b));
});
};
if (descending)
partial_sort_with_comp(::std::greater<key_t_value_t>());
else
partial_sort_with_comp(::std::less<key_t_value_t>());
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
inline void sort_only_pairs(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, value_t values_in,
value_out_t values_out, int64_t n,
bool descending) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
auto zip_input = oneapi::dpl::zip_iterator(keys_in, values_in);
auto zip_output = oneapi::dpl::zip_iterator(keys_out, values_out);
// Use of the comparison operator that is not simply std::greater() or
// std::less() will result in
// not using radix sort which will cost some performance. However, this is
// necessary to select the key from the zipped pair.
auto load_val = [=](const auto a) { return std::get<0>(a); };
auto partial_sort_with_comp = [&](const auto &comp) {
return oneapi::dpl::partial_sort_copy(
std::forward<_ExecutionPolicy>(policy), zip_input, zip_input + n,
zip_output, zip_output + n, [=](const auto a, const auto b) {
return comp(load_val(a), load_val(b));
});
};
if (descending)
partial_sort_with_comp(::std::greater<key_t_value_t>());
else
partial_sort_with_comp(::std::less<key_t_value_t>());
}
// overload for key_out_t != std::nullptr_t
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
typename ::std::enable_if<!::std::is_null_pointer<key_out_t>::value>::type
sort_pairs_impl(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending, int begin_bit, int end_bit) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
int clipped_begin_bit = ::std::max(begin_bit, 0);
int clipped_end_bit =
::std::min((::std::uint64_t)end_bit, sizeof(key_t_value_t) * 8);
int num_bytes = (clipped_end_bit - clipped_begin_bit - 1) / 8 + 1;
auto transform_and_sort_pairs_f = [&](auto x) {
using T = typename ::std::decay_t<decltype(x)>;
internal::transform_and_sort_pairs<decltype(policy), key_t, key_out_t, T,
value_t, value_out_t>(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, descending, clipped_begin_bit, clipped_end_bit);
};
if (clipped_end_bit - clipped_begin_bit == sizeof(key_t_value_t) * 8) {
internal::sort_only_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in,
keys_out, values_in, values_out, n, descending);
} else if (num_bytes == 1) {
transform_and_sort_pairs_f.template operator()<uint8_t>(0);
} else if (num_bytes == 2) {
transform_and_sort_pairs_f.template operator()<uint16_t>(0);
} else if (num_bytes <= 4) {
transform_and_sort_pairs_f.template operator()<uint32_t>(0);
} else // if (num_bytes <= 8)
{
transform_and_sort_pairs_f.template operator()<uint64_t>(0);
}
}
// overload for key_out_t == std::nullptr_t
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
typename ::std::enable_if<::std::is_null_pointer<key_out_t>::value>::type
sort_pairs_impl(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending, int begin_bit, int end_bit) {
// create temporary keys_out to discard, memory footprint could be improved by
// a specialized iterator with a single
// unchanging dummy key_t element
using key_t_value_t = typename std::iterator_traits<key_t>::value_type;
sycl::buffer<key_t_value_t, 1> temp_keys_out{sycl::range<1>(n)};
internal::sort_pairs_impl(std::forward<_ExecutionPolicy>(policy), keys_in,
oneapi::dpl::begin(temp_keys_out), values_in,
values_out, n, descending, begin_bit, end_bit);
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline void segmented_sort_pairs_by_parallel_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_out_t values_in, value_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
using offset_type =
typename ::std::iterator_traits<OffsetIteratorT>::value_type;
::std::vector<offset_type> host_accessible_offset_starts(nsegments);
::std::vector<offset_type> host_accessible_offset_ends(nsegments);
// make offsets accessible on host
::std::copy(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
begin_offsets + nsegments, host_accessible_offset_starts.begin());
::std::copy(::std::forward<_ExecutionPolicy>(policy), end_offsets,
end_offsets + nsegments, host_accessible_offset_ends.begin());
for (::std::uint64_t i = 0; i < nsegments; i++) {
uint64_t segment_begin = host_accessible_offset_starts[i];
uint64_t segment_end =
::std::min(n, (int64_t)host_accessible_offset_ends[i]);
if (segment_begin < segment_end) {
::dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy),
keys_in + segment_begin, keys_out + segment_begin,
values_in + segment_begin, values_out + segment_begin,
segment_end - segment_begin, descending, begin_bit,
end_bit);
}
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline void segmented_sort_keys_by_parallel_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
using offset_type =
typename ::std::iterator_traits<OffsetIteratorT>::value_type;
::std::vector<offset_type> host_accessible_offset_starts(nsegments);
::std::vector<offset_type> host_accessible_offset_ends(nsegments);
// make offsets accessible on host
::std::copy(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
begin_offsets + nsegments, host_accessible_offset_starts.begin());
::std::copy(::std::forward<_ExecutionPolicy>(policy), end_offsets,
end_offsets + nsegments, host_accessible_offset_ends.begin());
for (::std::uint64_t i = 0; i < nsegments; i++) {
uint64_t segment_begin = host_accessible_offset_starts[i];
uint64_t segment_end =
::std::min(n, (int64_t)host_accessible_offset_ends[i]);
if (segment_begin < segment_end) {
::dpct::sort_keys(::std::forward<_ExecutionPolicy>(policy),
keys_in + segment_begin, keys_out + segment_begin,
segment_end - segment_begin, descending, begin_bit,
end_bit);
}
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline void segmented_sort_pairs_by_parallel_for_of_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
uint64_t segment_begin = begin_offsets[i];
uint64_t segment_end = ::std::min(n, (int64_t)end_offsets[i]);
if (segment_begin == segment_end) {
return;
}
::dpct::sort_pairs(::std::execution::seq, keys_in + segment_begin,
keys_out + segment_begin, values_in + segment_begin,
values_out + segment_begin,
segment_end - segment_begin, descending, begin_bit,
end_bit);
});
});
policy.queue().wait();
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline void segmented_sort_keys_by_parallel_for_of_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
uint64_t segment_begin = begin_offsets[i];
uint64_t segment_end = ::std::min(n, (int64_t)end_offsets[i]);
if (segment_begin == segment_end) {
return;
}
::dpct::sort_keys(::std::execution::seq, keys_in + segment_begin,
keys_out + segment_begin, segment_end - segment_begin,
descending, begin_bit, end_bit);
});
});
policy.queue().wait();
}
template <typename _ExecutionPolicy, typename OffsetIteratorT>
inline void
mark_segments(_ExecutionPolicy &&policy, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, int64_t n, int64_t nsegments,
sycl::buffer<::std::size_t, 1> segments) {
::std::size_t work_group_size =
policy.queue()
.get_device()
.template get_info<sycl::info::device::max_work_group_size>();
auto sg_sizes = policy.queue()
.get_device()
.template get_info<sycl::info::device::sub_group_sizes>();
::std::size_t sub_group_size = sg_sizes.empty() ? 0 : sg_sizes.back();
float avg_seg_size = (float)n / (float)nsegments;
if (avg_seg_size > work_group_size) {
// If average segment size is larger than workgroup, use workgroup to
// coordinate to mark segments
policy.queue()
.submit([&](sycl::handler &h) {
auto segments_acc = segments.get_access<sycl::access_mode::write>(h);
h.parallel_for(work_group_size, ([=](sycl::id<1> id) {
for (::std::size_t seg = 0; seg < nsegments; seg++) {
::std::size_t i = begin_offsets[seg];
::std::size_t end = end_offsets[seg];
while (i + id < end) {
segments_acc[i + id] = seg;
i += work_group_size;
}
}
}));
})
.wait();
} else if (sub_group_size > 0 && avg_seg_size > sub_group_size / 2) {
// If average segment size is larger than half a subgroup, use subgroup to
// coordinate to mark segments
policy.queue()
.submit([&](sycl::handler &h) {
auto segments_acc = segments.get_access<sycl::access_mode::write>(h);
h.parallel_for(
sycl::nd_range<1>{work_group_size, work_group_size},
([=](sycl::nd_item<1> item) {
auto sub_group = item.get_sub_group();
::std::size_t num_subgroups =
sub_group.get_group_range().size();
::std::size_t local_size = sub_group.get_local_range().size();
::std::size_t sub_group_id = sub_group.get_group_id();
while (sub_group_id < nsegments) {
::std::size_t subgroup_local_id = sub_group.get_local_id();
::std::size_t i = begin_offsets[sub_group_id];
::std::size_t end = end_offsets[sub_group_id];
while (i + subgroup_local_id < end) {
segments_acc[i + subgroup_local_id] = sub_group_id;
i += local_size;
}
sub_group_id += num_subgroups;
}
}));
})
.wait();
} else {
// If average segment size is small as compared to subgroup, use single
// work item to mark each segment
policy.queue()
.submit([&](sycl::handler &h) {
auto segments_acc = segments.get_access<sycl::access_mode::write>(h);
h.parallel_for(nsegments, ([=](sycl::id<1> seg) {
for (::std::size_t i = begin_offsets[seg];
i < end_offsets[seg]; i++) {
segments_acc[i] = seg;
}
}));
})
.wait();
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline void segmented_sort_keys_by_two_pair_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sycl::buffer<::std::size_t, 1> segments{sycl::range<1>(n)};
sycl::buffer<::std::size_t, 1> segments_sorted{sycl::range<1>(n)};
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
sycl::buffer<key_t_value_t, 1> keys_temp{sycl::range<1>(n)};
mark_segments(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
end_offsets, n, nsegments, segments);
// Part 1: Sort by keys keeping track of which segment were in
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in,
oneapi::dpl::begin(keys_temp), oneapi::dpl::begin(segments),
oneapi::dpl::begin(segments_sorted), n, descending);
// Part 2: Sort the segments with a stable sort to get back sorted segments.
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy),
oneapi::dpl::begin(segments_sorted),
oneapi::dpl::begin(segments), oneapi::dpl::begin(keys_temp),
keys_out, n, false);
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline void segmented_sort_pairs_by_two_pair_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_out_t values_in, value_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sycl::buffer<::std::size_t, 1> segments{sycl::range<1>(n)};
sycl::buffer<::std::size_t, 1> segments_sorted{sycl::range<1>(n)};
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
sycl::buffer<key_t_value_t, 1> keys_temp{sycl::range<1>(n)};
using value_t_value_t = typename ::std::iterator_traits<value_t>::value_type;
sycl::buffer<value_t_value_t, 1> values_temp{sycl::range<1>(n)};
mark_segments(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
end_offsets, n, nsegments, segments);
auto zip_seg_vals =
oneapi::dpl::make_zip_iterator(oneapi::dpl::begin(segments), values_in);
auto zip_seg_vals_out = oneapi::dpl::make_zip_iterator(
oneapi::dpl::begin(segments_sorted), oneapi::dpl::begin(values_temp));
// Part 1: Sort by keys keeping track of which segment were in
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in,
oneapi::dpl::begin(keys_temp), zip_seg_vals,
zip_seg_vals_out, n, descending);
auto zip_keys_vals = oneapi::dpl::make_zip_iterator(
oneapi::dpl::begin(keys_temp), oneapi::dpl::begin(values_temp));
auto zip_keys_vals_out = oneapi::dpl::make_zip_iterator(keys_out, values_out);
// Part 2: Sort the segments with a stable sort to get back sorted segments.
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy),
oneapi::dpl::begin(segments_sorted),
oneapi::dpl::begin(segments), zip_keys_vals,
zip_keys_vals_out, n, false);
}
} // end namespace internal
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value &&
dpct::internal::is_iterator<value_t>::value &&
dpct::internal::is_iterator<value_out_t>::value>
sort_pairs(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending, int begin_bit, int end_bit) {
internal::sort_pairs_impl(std::forward<_ExecutionPolicy>(policy), keys_in,
keys_out, values_in, values_out, n, descending,
begin_bit, end_bit);
}
template <typename _ExecutionPolicy, typename key_t, typename value_t>
inline void sort_pairs(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys,
io_iterator_pair<value_t> &values, int64_t n, bool descending = false,
bool do_swap_iters = false,
int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys.first(),
keys.second(), values.first(), values.second(), n, descending,
begin_bit, end_bit);
if (do_swap_iters) {
keys.swap();
values.swap();
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value>
sort_keys(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
int64_t n, bool descending, int begin_bit, int end_bit) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
int clipped_begin_bit = ::std::max(begin_bit, 0);
int clipped_end_bit =
::std::min((::std::uint64_t)end_bit, sizeof(key_t_value_t) * 8);
int num_bytes = (clipped_end_bit - clipped_begin_bit - 1) / 8 + 1;
auto transform_and_sort_f = [&](auto x) {
using T = typename ::std::decay_t<decltype(x)>;
internal::transform_and_sort<decltype(policy), key_t, key_out_t, T>(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
descending, clipped_begin_bit, clipped_end_bit);
};
if (clipped_end_bit - clipped_begin_bit == sizeof(key_t_value_t) * 8) {
internal::sort_only(::std::forward<_ExecutionPolicy>(policy), keys_in,
keys_out, n, descending);
} else if (num_bytes == 1) {
transform_and_sort_f.template operator()<uint8_t>(0);
} else if (num_bytes == 2) {
transform_and_sort_f.template operator()<uint16_t>(0);
} else if (num_bytes <= 4) {
transform_and_sort_f.template operator()<uint32_t>(0);
} else // if (num_bytes <= 8)
{
transform_and_sort_f.template operator()<uint64_t>(0);
}
}
template <typename _ExecutionPolicy, typename key_t>
inline void sort_keys(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys, int64_t n,
bool descending = false,
bool do_swap_iters = false,
int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sort_keys(std::forward<_ExecutionPolicy>(policy), keys.first(), keys.second(),
n, descending, begin_bit, end_bit);
if (do_swap_iters)
keys.swap();
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value>
segmented_sort_keys(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
int compute_units =
policy.queue()
.get_device()
.template get_info<sycl::info::device::max_compute_units>();
auto sg_sizes = policy.queue()
.get_device()
.template get_info<sycl::info::device::sub_group_sizes>();
int subgroup_size = sg_sizes.empty() ? 1 : sg_sizes.back();
// parallel for of serial sorts when we have sufficient number of segments for
// load balance when number of segments is large as compared to our target
// compute capability
if (nsegments >
compute_units *
(policy.queue().get_device().is_gpu() ? subgroup_size : 1)) {
dpct::internal::segmented_sort_keys_by_parallel_for_of_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit);
} else if (nsegments < 512) // for loop of parallel sorts when we have a small
// number of total sorts to limit total overhead
{
dpct::internal::segmented_sort_keys_by_parallel_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit);
} else // decent catch all using 2 full sorts
{
dpct::internal::segmented_sort_keys_by_two_pair_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit);
}
}
template <typename _ExecutionPolicy, typename key_t, typename OffsetIteratorT>
inline void segmented_sort_keys(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false,
bool do_swap_iters = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
segmented_sort_keys(::std::forward<_ExecutionPolicy>(policy), keys.first(),
keys.second(), n, nsegments, begin_offsets, end_offsets,
descending, begin_bit, end_bit);
if (do_swap_iters) {
keys.swap();
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value &&
dpct::internal::is_iterator<value_t>::value &&
dpct::internal::is_iterator<value_out_t>::value>
segmented_sort_pairs(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
int compute_units =
policy.queue()
.get_device()
.template get_info<sycl::info::device::max_compute_units>();
auto sg_sizes = policy.queue()
.get_device()
.template get_info<sycl::info::device::sub_group_sizes>();
int subgroup_size = sg_sizes.empty() ? 1 : sg_sizes.back();
// parallel for of serial sorts when we have sufficient number of segments for
// load balance when number of segments is large as compared to our target
// compute capability
if (nsegments >
compute_units *
(policy.queue().get_device().is_gpu() ? subgroup_size : 1)) {
dpct::internal::segmented_sort_pairs_by_parallel_for_of_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
} else if (nsegments < 512) // for loop of parallel sorts when we have a small
// number of total sorts to limit total overhead
{
dpct::internal::segmented_sort_pairs_by_parallel_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
} else // decent catch all using 2 full sorts
{
dpct::internal::segmented_sort_pairs_by_two_pair_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
}
}
template <typename _ExecutionPolicy, typename key_t, typename value_t,
typename OffsetIteratorT>
inline void segmented_sort_pairs(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys,
io_iterator_pair<value_t> &values, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, bool do_swap_iters = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
segmented_sort_pairs(std::forward<_ExecutionPolicy>(policy), keys.first(),
keys.second(), values.first(), values.second(), n,
nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
if (do_swap_iters) {
keys.swap();
values.swap();
}
}
template <typename _ExecutionPolicy, typename Iter1, typename Iter2>
inline void reduce_argmax(_ExecutionPolicy &&policy, Iter1 input, Iter2 output,
::std::size_t n) {
dpct::arg_index_input_iterator<decltype(input), int> input_arg_idx(input);
auto ret = ::std::max_element(
::std::forward<_ExecutionPolicy>(policy), input_arg_idx,
input_arg_idx + n,
[](const auto &a, const auto &b) { return (a.value < b.value); });
::std::copy(::std::forward<_ExecutionPolicy>(policy), ret, ret + 1, output);
}
template <typename _ExecutionPolicy, typename Iter1, typename Iter2>
inline void reduce_argmin(_ExecutionPolicy &&policy, Iter1 input, Iter2 output,
::std::size_t n) {
dpct::arg_index_input_iterator<decltype(input), int> input_arg_idx(input);
auto ret = ::std::min_element(
::std::forward<_ExecutionPolicy>(policy), input_arg_idx,
input_arg_idx + n,
[](const auto &a, const auto &b) { return (a.value < b.value); });
::std::copy(::std::forward<_ExecutionPolicy>(policy), ret, ret + 1, output);
}
template <typename _ExecutionPolicy, typename Iter1,
typename ValueLessComparable, typename StrictWeakOrdering>
inline ::std::pair<Iter1, Iter1>
equal_range(_ExecutionPolicy &&policy, Iter1 start, Iter1 end,
const ValueLessComparable &value, StrictWeakOrdering comp) {
::std::vector<::std::int64_t> res_lower(1);
::std::vector<::std::int64_t> res_upper(1);
::std::vector<ValueLessComparable> value_vec(1, value);
::oneapi::dpl::lower_bound(policy, start, end, value_vec.begin(),
value_vec.end(), res_lower.begin(), comp);
::oneapi::dpl::upper_bound(::std::forward<_ExecutionPolicy>(policy), start,
end, value_vec.begin(), value_vec.end(),
res_upper.begin(), comp);
auto result = ::std::make_pair(start + res_lower[0], start + res_upper[0]);
return result;
}
template <typename _ExecutionPolicy, typename Iter1,
typename ValueLessComparable>
inline ::std::pair<Iter1, Iter1> equal_range(_ExecutionPolicy &&policy,
Iter1 start, Iter1 end,
const ValueLessComparable &value) {
return equal_range(::std::forward<_ExecutionPolicy>(policy), start, end,
value, internal::__less());
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3>
inline ::std::enable_if_t<
dpct::internal::is_iterator<Iter1>::value &&
dpct::internal::is_iterator<Iter2>::value &&
internal::is_hetero_execution_policy<::std::decay_t<Policy>>::value>
segmented_reduce_argmin(Policy &&policy, Iter1 keys_in, Iter2 keys_out,
::std::int64_t nsegments, Iter3 begin_offsets,
Iter3 end_offsets) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
if (end_offsets[i] <= begin_offsets[i]) {
keys_out[i] = dpct::key_value_pair(
1, ::std::numeric_limits<
typename ::std::iterator_traits<Iter1>::value_type>::max());
} else {
dpct::arg_index_input_iterator<Iter1, int> arg_index(keys_in +
begin_offsets[i]);
keys_out[i] = *::std::min_element(
arg_index, arg_index + (end_offsets[i] - begin_offsets[i]),
[](const auto &a, const auto &b) { return a.value < b.value; });
}
});
});
policy.queue().wait();
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3>
inline ::std::enable_if_t<
dpct::internal::is_iterator<Iter1>::value &&
dpct::internal::is_iterator<Iter2>::value &&
internal::is_hetero_execution_policy<::std::decay_t<Policy>>::value>
segmented_reduce_argmax(Policy &&policy, Iter1 keys_in, Iter2 keys_out,
::std::int64_t nsegments, Iter3 begin_offsets,
Iter3 end_offsets) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
if (end_offsets[i] <= begin_offsets[i]) {
keys_out[i] = dpct::key_value_pair(
1,
::std::numeric_limits<
typename ::std::iterator_traits<Iter1>::value_type>::lowest());
} else {
dpct::arg_index_input_iterator<Iter1, int> arg_index(keys_in +
begin_offsets[i]);
keys_out[i] = *::std::max_element(
arg_index, arg_index + (end_offsets[i] - begin_offsets[i]),
[](const auto &a, const auto &b) { return a.value < b.value; });
}
});
});
policy.queue().wait();
}
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/dpl_extras/memory.h | //==---- memory.h ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MEMORY_H__
#define __DPCT_MEMORY_H__
#include <sycl/sycl.hpp>
// Memory management section:
// device_pointer, device_reference, swap, device_iterator, malloc_device,
// device_new, free_device, device_delete
namespace dpct {
namespace detail {
template <typename T>
struct make_allocatable
{
using type = T;
};
template <>
struct make_allocatable<void>
{
using type = dpct::byte_t;
};
#if defined(__LIBSYCL_MAJOR_VERSION) && defined(__LIBSYCL_MINOR_VERSION) && \
defined(__LIBSYCL_PATCH_VERSION)
#define _DPCT_LIBSYCL_VERSION \
(__LIBSYCL_MAJOR_VERSION * 10000 + __LIBSYCL_MINOR_VERSION * 100 + \
__LIBSYCL_PATCH_VERSION)
#else
#define _DPCT_LIBSYCL_VERSION 0
#endif
template <typename _DataT>
using __buffer_allocator =
#if _DPCT_LIBSYCL_VERSION >= 60000
sycl::buffer_allocator<typename make_allocatable<_DataT>::type>;
#else
sycl::buffer_allocator;
#endif
} // namespace detail
#ifdef DPCT_USM_LEVEL_NONE
template <typename T, sycl::access_mode Mode = sycl::access_mode::read_write,
typename Allocator = detail::__buffer_allocator<T>>
class device_pointer;
#else
template <typename T> class device_pointer;
#endif
template <typename T> struct device_reference {
using pointer = device_pointer<T>;
using value_type = T;
template <typename OtherT>
device_reference(const device_reference<OtherT> &input)
: value(input.value) {}
device_reference(const pointer &input) : value((*input).value) {}
device_reference(value_type &input) : value(input) {}
template <typename OtherT>
device_reference &operator=(const device_reference<OtherT> &input) {
value = input;
return *this;
};
device_reference &operator=(const device_reference &input) {
T val = input.value;
value = val;
return *this;
};
device_reference &operator=(const value_type &x) {
value = x;
return *this;
};
pointer operator&() const { return pointer(&value); };
operator value_type() const { return T(value); }
device_reference &operator++() {
++value;
return *this;
};
device_reference &operator--() {
--value;
return *this;
};
device_reference operator++(int) {
device_reference ref(*this);
++(*this);
return ref;
};
device_reference operator--(int) {
device_reference ref(*this);
--(*this);
return ref;
};
device_reference &operator+=(const T &input) {
value += input;
return *this;
};
device_reference &operator-=(const T &input) {
value -= input;
return *this;
};
device_reference &operator*=(const T &input) {
value *= input;
return *this;
};
device_reference &operator/=(const T &input) {
value /= input;
return *this;
};
device_reference &operator%=(const T &input) {
value %= input;
return *this;
};
device_reference &operator&=(const T &input) {
value &= input;
return *this;
};
device_reference &operator|=(const T &input) {
value |= input;
return *this;
};
device_reference &operator^=(const T &input) {
value ^= input;
return *this;
};
device_reference &operator<<=(const T &input) {
value <<= input;
return *this;
};
device_reference &operator>>=(const T &input) {
value >>= input;
return *this;
};
void swap(device_reference &input) {
T tmp = (*this);
*this = (input);
input = (tmp);
}
T &value;
};
template <typename T>
void swap(device_reference<T> &x, device_reference<T> &y) {
x.swap(y);
}
template <typename T> void swap(T &x, T &y) {
T tmp = x;
x = y;
y = tmp;
}
namespace internal {
// struct for checking if iterator is heterogeneous or not
template <typename Iter,
typename Void = void> // for non-heterogeneous iterators
struct is_hetero_iterator : std::false_type {};
template <typename Iter> // for heterogeneous iterators
struct is_hetero_iterator<
Iter, typename std::enable_if<Iter::is_hetero::value, void>::type>
: std::true_type {};
} // namespace internal
#ifdef DPCT_USM_LEVEL_NONE
template <typename T, sycl::access_mode Mode, typename Allocator>
class device_iterator;
template <typename ValueType, typename Allocator, typename Derived>
class device_pointer_base {
protected:
sycl::buffer<ValueType, 1, Allocator> buffer;
std::size_t idx;
public:
using pointer = ValueType *;
using difference_type = std::make_signed<std::size_t>::type;
device_pointer_base(sycl::buffer<ValueType, 1> in, std::size_t i = 0)
: buffer(in), idx(i) {}
#ifdef __USE_DPCT
template <typename OtherT>
device_pointer_base(OtherT *ptr)
: buffer(
dpct::detail::mem_mgr::instance()
.translate_ptr(ptr)
.buffer.template reinterpret<ValueType, 1>(sycl::range<1>(
dpct::detail::mem_mgr::instance().translate_ptr(ptr).size /
sizeof(ValueType)))),
idx(ptr - (ValueType*)dpct::detail::mem_mgr::instance()
.translate_ptr(ptr).alloc_ptr) {}
#endif
device_pointer_base(const std::size_t count)
: buffer(sycl::range<1>(count / sizeof(ValueType))), idx() {}
// buffer has no default ctor we pass zero-range to create an empty buffer
device_pointer_base() : buffer(sycl::range<1>(0)) {}
device_pointer_base(const device_pointer_base &in)
: buffer(in.buffer), idx(in.idx) {}
pointer get() const {
auto res =
(const_cast<device_pointer_base *>(this)
->buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
return res + idx;
}
operator ValueType *() {
auto res = (buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
return res + idx;
}
operator ValueType *() const {
auto res =
(const_cast<device_pointer_base *>(this)
->buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
return res + idx;
}
Derived operator+(difference_type forward) const {
return Derived{buffer, idx + forward};
}
Derived operator-(difference_type backward) const {
return Derived{buffer, idx - backward};
}
Derived operator++(int) {
Derived p(buffer, idx);
idx += 1;
return p;
}
Derived operator--(int) {
Derived p(buffer, idx);
idx -= 1;
return p;
}
difference_type operator-(const Derived &it) const { return idx - it.idx; }
template <typename OtherIterator>
typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value,
difference_type>::type
operator-(const OtherIterator &it) const {
return idx - std::distance(oneapi::dpl::begin(buffer), it);
}
std::size_t get_idx() const { return idx; } // required
sycl::buffer<ValueType, 1, Allocator> get_buffer() {
return buffer;
} // required
};
template <typename T, sycl::access_mode Mode, typename Allocator>
class device_pointer
: public device_pointer_base<T, Allocator,
device_pointer<T, Mode, Allocator>> {
private:
using base_type = device_pointer_base<T, Allocator, device_pointer>;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = T *;
using reference = T &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::true_type; // required
using is_passed_directly = std::false_type;
static constexpr sycl::access_mode mode = Mode; // required
device_pointer(sycl::buffer<T, 1> in, std::size_t i = 0) : base_type(in, i) {}
#ifdef __USE_DPCT
template <typename OtherT> device_pointer(OtherT *ptr) : base_type(ptr) {}
#endif
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
device_pointer(const device_pointer &in) : base_type(in) {}
device_pointer &operator+=(difference_type forward) {
this->idx += forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->idx -= backward;
return *this;
}
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
this->idx += 1;
return *this;
}
device_pointer &operator--() {
this->idx -= 1;
return *this;
}
};
template <sycl::access_mode Mode, typename Allocator>
class device_pointer<void, Mode, Allocator>
: public device_pointer_base<dpct::byte_t, Allocator,
device_pointer<void, Mode, Allocator>> {
private:
using base_type =
device_pointer_base<dpct::byte_t, Allocator, device_pointer>;
public:
using value_type = dpct::byte_t;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = void *;
using reference = value_type &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::true_type; // required
using is_passed_directly = std::false_type;
static constexpr sycl::access_mode mode = Mode; // required
device_pointer(sycl::buffer<value_type, 1> in, std::size_t i = 0)
: base_type(in, i) {}
#ifdef __USE_DPCT
template <typename OtherT> device_pointer(OtherT *ptr) : base_type(ptr) {}
#endif
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
device_pointer(const device_pointer &in) : base_type(in) {}
device_pointer &operator+=(difference_type forward) {
this->idx += forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->idx -= backward;
return *this;
}
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
this->idx += 1;
return *this;
}
device_pointer &operator--() {
this->idx -= 1;
return *this;
}
};
#else
template <typename T> class device_iterator;
template <typename ValueType, typename Derived> class device_pointer_base {
protected:
ValueType *ptr;
public:
using pointer = ValueType *;
using difference_type = std::make_signed<std::size_t>::type;
device_pointer_base(ValueType *p) : ptr(p) {}
device_pointer_base(const std::size_t count) {
sycl::queue default_queue = dpct::get_default_queue();
ptr = static_cast<ValueType *>(sycl::malloc_shared(
count, default_queue.get_device(), default_queue.get_context()));
}
device_pointer_base() {}
pointer get() const { return ptr; }
operator ValueType *() { return ptr; }
operator ValueType *() const { return ptr; }
ValueType &operator[](difference_type idx) { return ptr[idx]; }
ValueType &operator[](difference_type idx) const { return ptr[idx]; }
Derived operator+(difference_type forward) const {
return Derived{ptr + forward};
}
Derived operator-(difference_type backward) const {
return Derived{ptr - backward};
}
Derived operator++(int) {
Derived p(ptr);
++ptr;
return p;
}
Derived operator--(int) {
Derived p(ptr);
--ptr;
return p;
}
difference_type operator-(const Derived &it) const { return ptr - it.ptr; }
};
template <typename T>
class device_pointer : public device_pointer_base<T, device_pointer<T>> {
private:
using base_type = device_pointer_base<T, device_pointer<T>>;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = T *;
using reference = T &;
using const_reference = const T &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::false_type; // required
using is_passed_directly = std::true_type; // required
device_pointer(T *p) : base_type(p) {}
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
device_pointer &operator=(const device_iterator<T> &in) {
this->ptr = static_cast<device_pointer<T>>(in).ptr;
return *this;
}
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
++(this->ptr);
return *this;
}
device_pointer &operator--() {
--(this->ptr);
return *this;
}
device_pointer &operator+=(difference_type forward) {
this->ptr = this->ptr + forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->ptr = this->ptr - backward;
return *this;
}
};
template <>
class device_pointer<void>
: public device_pointer_base<dpct::byte_t, device_pointer<void>> {
private:
using base_type = device_pointer_base<dpct::byte_t, device_pointer<void>>;
public:
using value_type = dpct::byte_t;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = void *;
using reference = value_type &;
using const_reference = const value_type &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::false_type; // required
using is_passed_directly = std::true_type; // required
device_pointer(void *p) : base_type(static_cast<value_type *>(p)) {}
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
pointer get() const { return static_cast<pointer>(this->ptr); }
operator void *() { return this->ptr; }
operator void *() const { return this->ptr; }
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
++(this->ptr);
return *this;
}
device_pointer &operator--() {
--(this->ptr);
return *this;
}
device_pointer &operator+=(difference_type forward) {
this->ptr = this->ptr + forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->ptr = this->ptr - backward;
return *this;
}
};
#endif
#ifdef DPCT_USM_LEVEL_NONE
template <typename T, sycl::access_mode Mode = sycl::access_mode::read_write,
typename Allocator = detail::__buffer_allocator<T>>
class device_iterator : public device_pointer<T, Mode, Allocator> {
using Base = device_pointer<T, Mode, Allocator>;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = T *;
using reference = T &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::true_type; // required
using is_passed_directly = std::false_type; // required
static constexpr sycl::access_mode mode = Mode; // required
device_iterator() : Base() {}
device_iterator(sycl::buffer<T, 1, Allocator> vec, std::size_t index)
: Base(vec, index) {}
template <sycl::access_mode inMode>
device_iterator(const device_iterator<T, inMode, Allocator> &in)
: Base(in.buffer, in.idx) {} // required for iter_mode
device_iterator &operator=(const device_iterator &in) {
Base::buffer = in.buffer;
Base::idx = in.idx;
return *this;
}
reference operator*() const {
return const_cast<device_iterator *>(this)
->buffer.template get_access<mode>()[Base::idx];
}
reference operator[](difference_type i) const { return *(*this + i); }
device_iterator &operator++() {
++Base::idx;
return *this;
}
device_iterator &operator--() {
--Base::idx;
return *this;
}
device_iterator operator++(int) {
device_iterator it(*this);
++(*this);
return it;
}
device_iterator operator--(int) {
device_iterator it(*this);
--(*this);
return it;
}
device_iterator operator+(difference_type forward) const {
const auto new_idx = Base::idx + forward;
return {Base::buffer, new_idx};
}
device_iterator &operator+=(difference_type forward) {
Base::idx += forward;
return *this;
}
device_iterator operator-(difference_type backward) const {
return {Base::buffer, Base::idx - backward};
}
device_iterator &operator-=(difference_type backward) {
Base::idx -= backward;
return *this;
}
friend device_iterator operator+(difference_type forward,
const device_iterator &it) {
return it + forward;
}
difference_type operator-(const device_iterator &it) const {
return Base::idx - it.idx;
}
template <typename OtherIterator>
typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value,
difference_type>::type
operator-(const OtherIterator &it) const {
return Base::idx - std::distance(oneapi::dpl::begin(Base::buffer), it);
}
bool operator==(const device_iterator &it) const { return *this - it == 0; }
bool operator!=(const device_iterator &it) const { return !(*this == it); }
bool operator<(const device_iterator &it) const { return *this - it < 0; }
bool operator>(const device_iterator &it) const { return it < *this; }
bool operator<=(const device_iterator &it) const { return !(*this > it); }
bool operator>=(const device_iterator &it) const { return !(*this < it); }
std::size_t get_idx() const { return Base::idx; } // required
sycl::buffer<T, 1, Allocator> get_buffer() {
return Base::buffer;
} // required
};
#else
template <typename T> class device_iterator : public device_pointer<T> {
using Base = device_pointer<T>;
protected:
std::size_t idx;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = typename Base::pointer;
using reference = typename Base::reference;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::false_type; // required
using is_passed_directly = std::true_type; // required
static constexpr sycl::access_mode mode =
sycl::access_mode::read_write; // required
device_iterator() : Base(nullptr), idx(0) {}
device_iterator(T *vec, std::size_t index) : Base(vec), idx(index) {}
template <sycl::access_mode inMode>
device_iterator(const device_iterator<T> &in)
: Base(in.ptr), idx(in.idx) {} // required for iter_mode
device_iterator &operator=(const device_iterator &in) {
Base::operator=(in);
idx = in.idx;
return *this;
}
reference operator*() const { return *(Base::ptr + idx); }
reference operator[](difference_type i) { return Base::ptr[idx + i]; }
reference operator[](difference_type i) const { return Base::ptr[idx + i]; }
device_iterator &operator++() {
++idx;
return *this;
}
device_iterator &operator--() {
--idx;
return *this;
}
device_iterator operator++(int) {
device_iterator it(*this);
++(*this);
return it;
}
device_iterator operator--(int) {
device_iterator it(*this);
--(*this);
return it;
}
device_iterator operator+(difference_type forward) const {
const auto new_idx = idx + forward;
return {Base::ptr, new_idx};
}
device_iterator &operator+=(difference_type forward) {
idx += forward;
return *this;
}
device_iterator operator-(difference_type backward) const {
return {Base::ptr, idx - backward};
}
device_iterator &operator-=(difference_type backward) {
idx -= backward;
return *this;
}
friend device_iterator operator+(difference_type forward,
const device_iterator &it) {
return it + forward;
}
difference_type operator-(const device_iterator &it) const {
return idx - it.idx;
}
template <typename OtherIterator>
typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value,
difference_type>::type
operator-(const OtherIterator &it) const {
return idx - it.get_idx();
}
bool operator==(const device_iterator &it) const { return *this - it == 0; }
bool operator!=(const device_iterator &it) const { return !(*this == it); }
bool operator<(const device_iterator &it) const { return *this - it < 0; }
bool operator>(const device_iterator &it) const { return it < *this; }
bool operator<=(const device_iterator &it) const { return !(*this > it); }
bool operator>=(const device_iterator &it) const { return !(*this < it); }
std::size_t get_idx() const { return idx; } // required
device_iterator &get_buffer() { return *this; } // required
std::size_t size() const { return idx; }
};
#endif
template <typename T>
device_pointer<T> malloc_device(const std::size_t num_elements) {
return device_pointer<T>(num_elements * sizeof(T));
}
static inline device_pointer<void> malloc_device(const std::size_t num_bytes) {
return device_pointer<void>(num_bytes);
}
template <typename T>
device_pointer<T> device_new(device_pointer<T> p, const T &value,
const std::size_t count = 1) {
std::vector<T> result(count, value);
p.buffer = sycl::buffer<T, 1>(result.begin(), result.end());
return p + count;
}
template <typename T>
device_pointer<T> device_new(device_pointer<T> p, const std::size_t count = 1) {
return device_new(p, T{}, count);
}
template <typename T>
device_pointer<T> device_new(const std::size_t count = 1) {
return device_pointer<T>(count);
}
template <typename T> void free_device(device_pointer<T> ptr) {}
template <typename T>
typename std::enable_if<!std::is_trivially_destructible<T>::value, void>::type
device_delete(device_pointer<T> p, const std::size_t count = 1) {
for (std::size_t i = 0; i < count; ++i) {
p[i].~T();
}
}
template <typename T>
typename std::enable_if<std::is_trivially_destructible<T>::value, void>::type
device_delete(device_pointer<T>, const std::size_t count = 1) {}
template <typename T> device_pointer<T> get_device_pointer(T *ptr) {
return device_pointer<T>(ptr);
}
template <typename T>
device_pointer<T> get_device_pointer(const device_pointer<T> &ptr) {
return device_pointer<T>(ptr);
}
template <typename T> T *get_raw_pointer(const device_pointer<T> &ptr) {
return ptr.get();
}
template <typename Pointer> Pointer get_raw_pointer(const Pointer &ptr) {
return ptr;
}
template <typename T> const T &get_raw_reference(const device_reference<T> &ref) {
return ref.value;
}
template <typename T> T &get_raw_reference(device_reference<T> &ref) {
return ref.value;
}
template <typename T> const T &get_raw_reference(const T &ref) {
return ref;
}
template <typename T> T &get_raw_reference(T &ref) {
return ref;
}
} // namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/dpl_extras/vector.h | //==---- vector.h ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_VECTOR_H__
#define __DPCT_VECTOR_H__
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <sycl/sycl.hpp>
#include "memory.h"
#include <algorithm>
#include <iterator>
#include <vector>
#include "../device.hpp"
namespace dpct {
namespace internal {
template <typename Iter, typename Void = void> // for non-iterators
struct is_iterator : std::false_type {};
template <typename Iter> // For iterators
struct is_iterator<
Iter,
typename std::enable_if<
!std::is_void<typename Iter::iterator_category>::value, void>::type>
: std::true_type {};
template <typename T> // For pointers
struct is_iterator<T *> : std::true_type {};
} // end namespace internal
#ifndef DPCT_USM_LEVEL_NONE
template <typename T,
typename Allocator = sycl::usm_allocator<T, sycl::usm::alloc::shared>>
class device_vector {
public:
using iterator = device_iterator<T>;
using const_iterator = const iterator;
using reference = device_reference<T>;
using const_reference = const reference;
using value_type = T;
using pointer = T *;
using const_pointer = const T *;
using difference_type =
typename ::std::iterator_traits<iterator>::difference_type;
using size_type = ::std::size_t;
private:
Allocator _alloc;
size_type _size;
size_type _capacity;
pointer _storage;
size_type _min_capacity() const { return size_type(1); }
void _set_capacity_and_alloc() {
_capacity = ::std::max(_size * 2, _min_capacity());
_storage = _alloc.allocate(_capacity);
}
public:
template <typename OtherA> operator ::std::vector<T, OtherA>() const {
auto __tmp = ::std::vector<T, OtherA>(this->size());
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
this->begin(), this->end(), __tmp.begin());
return __tmp;
}
device_vector()
: _alloc(get_default_queue()), _size(0), _capacity(_min_capacity()) {
_set_capacity_and_alloc();
}
~device_vector() /*= default*/ { _alloc.deallocate(_storage, _capacity); };
explicit device_vector(size_type n) : device_vector(n, T()) {}
explicit device_vector(size_type n, const T &value)
: _alloc(get_default_queue()), _size(n) {
_set_capacity_and_alloc();
if (_size > 0) {
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), end(), T(value));
}
}
device_vector(const device_vector &other) : _alloc(get_default_queue()) {
_size = other.size();
_capacity = other.capacity();
_storage = _alloc.allocate(_capacity);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
other.begin(), other.end(), begin());
}
device_vector(device_vector &&other)
: _alloc(get_default_queue()), _size(other.size()),
_capacity(other.capacity()), _storage(other._storage) {
other._size = 0;
other._capacity = 0;
other._storage = nullptr;
}
template <typename InputIterator>
device_vector(InputIterator first,
typename ::std::enable_if<
internal::is_iterator<InputIterator>::value &&
!::std::is_pointer<InputIterator>::value &&
::std::is_same<typename ::std::iterator_traits<
InputIterator>::iterator_category,
::std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _alloc(get_default_queue()) {
_size = ::std::distance(first, last);
_set_capacity_and_alloc();
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, begin());
}
}
template <typename InputIterator>
device_vector(InputIterator first,
typename ::std::enable_if<::std::is_pointer<InputIterator>::value,
InputIterator>::type last)
: _alloc(get_default_queue()) {
_size = ::std::distance(first, last);
_set_capacity_and_alloc();
if (_size > 0) {
auto ptr_type = sycl::get_pointer_type(first, get_default_context());
if (ptr_type != sycl::usm::alloc::host &&
ptr_type != sycl::usm::alloc::unknown) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, begin());
} else {
sycl::buffer<T, 1> buf(first, last);
auto buf_first = oneapi::dpl::begin(buf);
auto buf_last = oneapi::dpl::end(buf);
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
buf_first, buf_last, begin());
}
}
}
template <typename InputIterator>
device_vector(InputIterator first,
typename ::std::enable_if<
internal::is_iterator<InputIterator>::value &&
!::std::is_pointer<InputIterator>::value &&
!::std::is_same<typename ::std::iterator_traits<
InputIterator>::iterator_category,
::std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _alloc(get_default_queue()), _size(::std::distance(first, last)) {
_set_capacity_and_alloc();
::std::vector<T> _tmp(first, last);
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
_tmp.begin(), _tmp.end(), this->begin());
}
}
template <typename OtherAllocator>
device_vector(const device_vector<T, OtherAllocator> &v)
: _alloc(get_default_queue()), _storage(v.real_begin()), _size(v.size()),
_capacity(v.capacity()) {}
template <typename OtherAllocator>
device_vector(::std::vector<T, OtherAllocator> &v)
: _alloc(get_default_queue()), _size(v.size()) {
_set_capacity_and_alloc();
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
v.begin(), v.end(), this->begin());
}
}
template <typename OtherAllocator>
device_vector &operator=(const ::std::vector<T, OtherAllocator> &v) {
resize(v.size());
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
v.begin(), v.end(), begin());
}
return *this;
}
device_vector &operator=(const device_vector &other) {
// Copy assignment operator:
resize(other.size());
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
other.begin(), other.end(), begin());
}
return *this;
}
device_vector &operator=(device_vector &&other) {
// Move assignment operator:
device_vector dummy(::std::move(other));
this->swap(dummy);
return *this;
}
size_type size() const { return _size; }
iterator begin() noexcept { return device_iterator<T>(_storage, 0); }
iterator end() { return device_iterator<T>(_storage, size()); }
const_iterator begin() const noexcept {
return device_iterator<T>(_storage, 0);
}
const_iterator cbegin() const noexcept { return begin(); }
const_iterator end() const { return device_iterator<T>(_storage, size()); }
const_iterator cend() const { return end(); }
T *real_begin() { return _storage; }
const T *real_begin() const { return _storage; }
void swap(device_vector &v) {
::std::swap(_size, v._size);
::std::swap(_capacity, v._capacity);
::std::swap(_storage, v._storage);
::std::swap(_alloc, v._alloc);
}
reference operator[](size_type n) { return _storage[n]; }
const_reference operator[](size_type n) const { return _storage[n]; }
void reserve(size_type n) {
if (n > capacity()) {
// allocate buffer for new size
auto tmp = _alloc.allocate(2 * n);
// copy content (old buffer to new buffer)
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), end(), tmp);
// deallocate old memory
_alloc.deallocate(_storage, _capacity);
_storage = tmp;
_capacity = 2 * n;
}
}
void resize(size_type new_size, const T &x = T()) {
reserve(new_size);
if (_size < new_size) {
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin() + _size, begin() + new_size, x);
}
_size = new_size;
}
size_type max_size(void) const {
return ::std::numeric_limits<size_type>::max() / sizeof(T);
}
size_type capacity() const { return _capacity; }
const_reference front() const { return *begin(); }
reference front() { return *begin(); }
const_reference back(void) const { return *(end() - 1); }
reference back(void) { return *(end() - 1); }
pointer data(void) { return _storage; }
const_pointer data(void) const { return _storage; }
void shrink_to_fit(void) {
if (_size != capacity()) {
size_type tmp_capacity = ::std::max(_size, _min_capacity());
auto tmp = _alloc.allocate(tmp_capacity);
if (_size > 0) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), end(), tmp);
}
_alloc.deallocate(_storage, _capacity);
_storage = tmp;
_capacity = tmp_capacity;
}
}
void assign(size_type n, const T &x) {
resize(n);
if (_size > 0) {
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), begin() + n, x);
}
}
template <typename InputIterator>
void
assign(InputIterator first,
typename ::std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = ::std::distance(first, last);
resize(n);
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, begin());
}
}
void clear(void) { _size = 0; }
bool empty(void) const { return (size() == 0); }
void push_back(const T &x) { insert(end(), size_type(1), x); }
void pop_back(void) {
if (_size > 0)
--_size;
}
iterator erase(iterator first, iterator last) {
auto n = ::std::distance(first, last);
if (last == end()) {
_size = _size - n;
return end();
}
auto m = ::std::distance(last, end());
if (m <= 0) {
return end();
}
auto tmp = _alloc.allocate(m);
// copy remainder to temporary buffer.
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
last, end(), tmp);
// override (erase) subsequence in storage.
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
tmp, tmp + m, first);
_alloc.deallocate(tmp, m);
_size -= n;
return begin() + first.get_idx() + n;
}
iterator erase(iterator pos) { return erase(pos, pos + 1); }
iterator insert(iterator position, const T &x) {
auto n = ::std::distance(begin(), position);
insert(position, size_type(1), x);
return begin() + n;
}
void insert(iterator position, size_type n, const T &x) {
if (position == end()) {
resize(size() + n);
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
end() - n, end(), x);
} else {
auto i_n = ::std::distance(begin(), position);
// allocate temporary storage
auto m = ::std::distance(position, end());
// will throw if position is not inside active vector
auto tmp = _alloc.allocate(m);
// copy remainder
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
position, end(), tmp);
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
position, position + n, x);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
tmp, tmp + m, position + n);
_alloc.deallocate(tmp, m);
}
}
template <typename InputIterator>
void
insert(iterator position, InputIterator first,
typename ::std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = ::std::distance(first, last);
if (position == end()) {
resize(size() + n);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, end());
} else {
auto m = ::std::distance(position, end());
// will throw if position is not inside active vector
auto tmp = _alloc.allocate(m);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
position, end(), tmp);
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, position);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
tmp, tmp + m, position + n);
_alloc.deallocate(tmp, m);
}
}
Allocator get_allocator() const { return _alloc; }
};
#else
template <typename T, typename Allocator = detail::__buffer_allocator<T>>
class device_vector {
static_assert(
std::is_same<Allocator, detail::__buffer_allocator<T>>::value,
"device_vector doesn't support custom allocator when USM is not used.");
public:
using iterator = device_iterator<T>;
using const_iterator = const iterator;
using reference = device_reference<T>;
using const_reference = const reference;
using value_type = T;
using pointer = T *;
using const_pointer = const T *;
using difference_type =
typename std::iterator_traits<iterator>::difference_type;
using size_type = std::size_t;
private:
using Buffer = sycl::buffer<T, 1>;
using Range = sycl::range<1>;
// Using mem_mgr to handle memory allocation
void *_storage;
size_type _size;
size_type _min_capacity() const { return size_type(1); }
void *alloc_store(size_type num_bytes) {
return detail::mem_mgr::instance().mem_alloc(num_bytes);
}
public:
template <typename OtherA> operator std::vector<T, OtherA>() const {
auto __tmp = std::vector<T, OtherA>(this->size());
std::copy(oneapi::dpl::execution::dpcpp_default, this->begin(), this->end(),
__tmp.begin());
return __tmp;
}
device_vector()
: _storage(alloc_store(_min_capacity() * sizeof(T))), _size(0) {}
~device_vector() = default;
explicit device_vector(size_type n) : device_vector(n, T()) {}
explicit device_vector(size_type n, const T &value)
: _storage(alloc_store(std::max(n, _min_capacity()) * sizeof(T))),
_size(n) {
auto buf = get_buffer();
std::fill(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(buf),
oneapi::dpl::begin(buf) + n, T(value));
}
device_vector(const device_vector &other)
: _storage(other._storage), _size(other.size()) {}
device_vector(device_vector &&other)
: _storage(std::move(other._storage)), _size(other.size()) {}
template <typename InputIterator>
device_vector(InputIterator first,
typename std::enable_if<
internal::is_iterator<InputIterator>::value &&
!std::is_pointer<InputIterator>::value &&
std::is_same<typename std::iterator_traits<
InputIterator>::iterator_category,
std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _storage(alloc_store(std::distance(first, last) * sizeof(T))),
_size(std::distance(first, last)) {
auto buf = get_buffer();
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, dst);
}
template <typename InputIterator>
device_vector(InputIterator first,
typename std::enable_if<std::is_pointer<InputIterator>::value,
InputIterator>::type last)
: _storage(alloc_store(std::distance(first, last) * sizeof(T))),
_size(std::distance(first, last)) {
auto buf = get_buffer();
Buffer tmp_buf(first, last);
auto start = oneapi::dpl::begin(tmp_buf);
auto end = oneapi::dpl::end(tmp_buf);
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
start, end, dst);
}
template <typename InputIterator>
device_vector(InputIterator first,
typename std::enable_if<
internal::is_iterator<InputIterator>::value &&
!std::is_same<typename std::iterator_traits<
InputIterator>::iterator_category,
std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _storage(alloc_store(std::distance(first, last) * sizeof(T))),
_size(std::distance(first, last)) {
auto buf = get_buffer();
std::vector<T> tmp(first, last);
Buffer tmp_buf(tmp);
auto start = oneapi::dpl::begin(tmp_buf);
auto end = oneapi::dpl::end(tmp_buf);
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
start, end, dst);
}
template <typename OtherAllocator>
device_vector(const device_vector<T, OtherAllocator> &v)
: _storage(alloc_store(v.size() * sizeof(T))), _size(v.size()) {
auto buf = get_buffer();
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
v.real_begin(), v.real_begin() + v.size(), dst);
}
template <typename OtherAllocator>
device_vector(std::vector<T, OtherAllocator> &v)
: _storage(alloc_store(v.size() * sizeof(T))), _size(v.size()) {
std::copy(oneapi::dpl::execution::dpcpp_default, v.begin(), v.end(),
oneapi::dpl::begin(get_buffer()));
}
device_vector &operator=(const device_vector &other) {
// Copy assignment operator:
_size = other.size();
void *tmp = alloc_store(_size * sizeof(T));
auto tmp_buf =
detail::mem_mgr::instance()
.translate_ptr(tmp)
.buffer.template reinterpret<T, 1>(sycl::range<1>(_size));
std::copy(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(other.get_buffer()),
oneapi::dpl::end(other.get_buffer()),
oneapi::dpl::begin(tmp_buf));
detail::mem_mgr::instance().mem_free(_storage);
_storage = tmp;
return *this;
}
device_vector &operator=(device_vector &&other) {
// Move assignment operator:
_size = other.size();
this->_storage = std::move(other._storage);
return *this;
}
template <typename OtherAllocator>
device_vector &operator=(const std::vector<T, OtherAllocator> &v) {
Buffer data(v.begin(), v.end());
_size = v.size();
void *tmp = alloc_store(_size * sizeof(T));
auto tmp_buf =
detail::mem_mgr::instance()
.translate_ptr(tmp)
.buffer.template reinterpret<T, 1>(sycl::range<1>(_size));
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(data),
oneapi::dpl::end(data), oneapi::dpl::begin(tmp_buf));
detail::mem_mgr::instance().mem_free(_storage);
_storage = tmp;
return *this;
}
Buffer get_buffer() const {
return detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.template reinterpret<T, 1>(sycl::range<1>(capacity()));
}
size_type size() const { return _size; }
iterator begin() noexcept { return device_iterator<T>(get_buffer(), 0); }
iterator end() { return device_iterator<T>(get_buffer(), _size); }
const_iterator begin() const noexcept {
return device_iterator<T>(get_buffer(), 0);
}
const_iterator cbegin() const noexcept { return begin(); }
const_iterator end() const { return device_iterator<T>(get_buffer(), _size); }
const_iterator cend() const { return end(); }
T *real_begin() {
return (detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
}
const T *real_begin() const {
return const_cast<device_vector *>(this)
->detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.template get_access<sycl::access_mode::read_write>()
.get_pointer();
}
void swap(device_vector &v) {
void *temp = v._storage;
v._storage = this->_storage;
this->_storage = temp;
std::swap(_size, v._size);
}
reference operator[](size_type n) { return *(begin() + n); }
const_reference operator[](size_type n) const { return *(begin() + n); }
void reserve(size_type n) {
if (n > capacity()) {
// create new buffer (allocate for new size)
void *a = alloc_store(n * sizeof(T));
// copy content (old buffer to new buffer)
if (_storage != nullptr) {
auto tmp = detail::mem_mgr::instance()
.translate_ptr(a)
.buffer.template reinterpret<T, 1>(sycl::range<1>(n));
auto src_buf = get_buffer();
std::copy(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(src_buf), oneapi::dpl::end(src_buf),
oneapi::dpl::begin(tmp));
// deallocate old memory
detail::mem_mgr::instance().mem_free(_storage);
}
_storage = a;
}
}
void resize(size_type new_size, const T &x = T()) {
reserve(new_size);
if (_size < new_size) {
auto src_buf = get_buffer();
std::fill(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(src_buf) + _size,
oneapi::dpl::begin(src_buf) + new_size, x);
}
_size = new_size;
}
size_type max_size(void) const {
return std::numeric_limits<size_type>::max() / sizeof(T);
}
size_type capacity() const {
return _storage != nullptr ? detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.size() /
sizeof(T)
: 0;
}
const_reference front() const { return *begin(); }
reference front() { return *begin(); }
const_reference back(void) const { return *(end() - 1); }
reference back(void) { return *(end() - 1); }
pointer data(void) { return reinterpret_cast<pointer>(_storage); }
const_pointer data(void) const {
return reinterpret_cast<const_pointer>(_storage);
}
void shrink_to_fit(void) {
if (_size != capacity()) {
void *a = alloc_store(_size * sizeof(T));
auto tmp = detail::mem_mgr::instance()
.translate_ptr(a)
.buffer.template reinterpret<T, 1>(sycl::range<1>(_size));
std::copy(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(get_buffer()),
oneapi::dpl::begin(get_buffer()) + _size,
oneapi::dpl::begin(tmp));
detail::mem_mgr::instance().mem_free(_storage);
_storage = a;
}
}
void assign(size_type n, const T &x) {
resize(n);
std::fill(oneapi::dpl::execution::dpcpp_default, begin(), begin() + n, x);
}
template <typename InputIterator>
void
assign(InputIterator first,
typename std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = std::distance(first, last);
resize(n);
if (internal::is_iterator<InputIterator>::value &&
!std::is_pointer<InputIterator>::value)
std::copy(oneapi::dpl::execution::dpcpp_default, first, last, begin());
else {
Buffer tmp(first, last);
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), begin());
}
}
void clear(void) {
_size = 0;
detail::mem_mgr::instance().mem_free(_storage);
_storage = nullptr;
}
bool empty(void) const { return (size() == 0); }
void push_back(const T &x) { insert(end(), size_type(1), x); }
void pop_back(void) {
if (_size > 0)
--_size;
}
iterator erase(iterator first, iterator last) {
auto n = std::distance(first, last);
if (last == end()) {
_size = _size - n;
return end();
}
Buffer tmp{Range(std::distance(last, end()))};
// copy remainder to temporary buffer.
std::copy(oneapi::dpl::execution::dpcpp_default, last, end(),
oneapi::dpl::begin(tmp));
// override (erase) subsequence in storage.
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), first);
resize(_size - n);
return begin() + first.get_idx() + n;
}
iterator erase(iterator pos) { return erase(pos, pos + 1); }
iterator insert(iterator position, const T &x) {
auto n = std::distance(begin(), position);
insert(position, size_type(1), x);
return begin() + n;
}
void insert(iterator position, size_type n, const T &x) {
if (position == end()) {
resize(size() + n);
std::fill(oneapi::dpl::execution::dpcpp_default, end() - n, end(), x);
} else {
auto i_n = std::distance(begin(), position);
// allocate temporary storage
Buffer tmp{Range(std::distance(position, end()))};
// copy remainder
std::copy(oneapi::dpl::execution::dpcpp_default, position, end(),
oneapi::dpl::begin(tmp));
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
std::fill(oneapi::dpl::execution::dpcpp_default, position, position + n,
x);
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), position + n);
}
}
template <typename InputIterator>
void
insert(iterator position, InputIterator first,
typename std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = std::distance(first, last);
if (position == end()) {
resize(size() + n);
std::copy(oneapi::dpl::execution::dpcpp_default, first, last, end());
} else {
Buffer tmp{Range(std::distance(position, end()))};
std::copy(oneapi::dpl::execution::dpcpp_default, position, end(),
oneapi::dpl::begin(tmp));
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
std::copy(oneapi::dpl::execution::dpcpp_default, first, last, position);
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), position + n);
}
}
};
#endif
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/dpl_extras/dpcpp_extensions.h | //==---- dpcpp_extensions.h ------------------*- C++ -*---------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------===//
#ifndef __DPCT_DPCPP_EXTENSIONS_H__
#define __DPCT_DPCPP_EXTENSIONS_H__
#include <sycl/sycl.hpp>
#include <stdexcept>
#ifdef SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS
#include <sycl/ext/oneapi/experimental/user_defined_reductions.hpp>
#endif
#include "../dpct.hpp"
namespace dpct {
namespace group {
namespace detail {
template <typename... _Args>
constexpr auto __reduce_over_group(_Args... __args) {
return sycl::reduce_over_group(__args...);
}
template <typename... _Args> constexpr auto __group_broadcast(_Args... __args) {
return sycl::group_broadcast(__args...);
}
template <typename... _Args>
constexpr auto __exclusive_scan_over_group(_Args... __args) {
return sycl::exclusive_scan_over_group(__args...);
}
template <typename... _Args>
constexpr auto __inclusive_scan_over_group(_Args... __args) {
return sycl::inclusive_scan_over_group(__args...);
}
} // end namespace detail
/// Perform an exclusive scan over the values of inputs from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param inputs Pointer to the input data for the scan operation.
/// \param outputs Pointer to the location where scan results will be stored.
/// \param init initial value of the scan result.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan.
template <typename Item, typename T, class BinaryOperation,
int VALUES_PER_THREAD>
__dpct_inline__ void
exclusive_scan(const Item &item, T (&inputs)[VALUES_PER_THREAD],
T (&outputs)[VALUES_PER_THREAD], T init,
BinaryOperation binary_op) {
T result = inputs[0];
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
result = binary_op(result, inputs[i]);
}
T exclusive_result =
detail::__exclusive_scan_over_group(item.get_group(), result, binary_op);
T input = inputs[0];
if (item.get_local_linear_id() == 0) {
outputs[0] = init;
} else {
outputs[0] = exclusive_result;
}
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
T output = binary_op(input, outputs[i - 1]);
input = inputs[i];
outputs[i] = output;
}
}
/// Perform an exclusive scan over the values of input from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Input data for the scan operation.
/// \param init initial value of the scan result.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param group_aggregate group-wide aggregate of all inputs
/// in the work-items of the group. \returns exclusive scan of the first i
/// work-items where item is the i-th work item.
template <typename Item, typename T, class BinaryOperation>
__dpct_inline__ T
exclusive_scan(const Item &item, T input, T init, BinaryOperation binary_op,
T &group_aggregate) {
T output = detail::__exclusive_scan_over_group(item.get_group(), input, init,
binary_op);
if (item.get_local_linear_id() == item.get_local_range().size() - 1) {
group_aggregate = binary_op(output, input);
}
group_aggregate = detail::__group_broadcast(
item.get_group(), group_aggregate, item.get_local_range().size() - 1);
return output;
}
/// Perform an exclusive scan over the values of input from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Input data for the scan operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param prefix_callback_op functor invoked by the first
/// work-item in the group that returns the
/// initial value in the resulting scan of the work-items in the group.
/// \returns exclusive scan of the input elements assigned to work-items in the
/// group.
template <typename Item, typename T, class BinaryOperation,
class GroupPrefixCallbackOperation>
__dpct_inline__ T
exclusive_scan(const Item &item, T input, BinaryOperation binary_op,
GroupPrefixCallbackOperation &prefix_callback_op) {
T group_aggregate;
T output =
detail::__exclusive_scan_over_group(item.get_group(), input, binary_op);
if (item.get_local_linear_id() == item.get_local_range().size() - 1) {
group_aggregate = binary_op(output, input);
}
group_aggregate = detail::__group_broadcast(
item.get_group(), group_aggregate, item.get_local_range().size() - 1);
T group_prefix = prefix_callback_op(group_aggregate);
if (item.get_local_linear_id() == 0) {
output = group_prefix;
} else {
output = binary_op(group_prefix, output);
}
return output;
}
namespace detail {
typedef uint16_t digit_counter_type;
typedef uint32_t packed_counter_type;
template <int N, int CURRENT_VAL = N, int COUNT = 0> struct log2 {
enum { VALUE = log2<N, (CURRENT_VAL >> 1), COUNT + 1>::VALUE };
};
template <int N, int COUNT> struct log2<N, 0, COUNT> {
enum { VALUE = (1 << (COUNT - 1) < N) ? COUNT : COUNT - 1 };
};
__dpct_inline__ uint32_t bfe(uint32_t source, uint32_t bit_start,
uint32_t num_bits) {
const uint32_t MASK = (1 << num_bits) - 1;
return (source >> bit_start) & MASK;
}
template <int RADIX_BITS, bool DESCENDING = false> class radix_rank {
public:
static size_t get_local_memory_size(size_t group_threads) {
return group_threads * PADDED_COUNTER_LANES * sizeof(packed_counter_type);
}
radix_rank(uint8_t *local_memory) : _local_memory(local_memory) {}
template <typename Item, int VALUES_PER_THREAD>
__dpct_inline__ void
rank_keys(const Item &item, uint32_t (&keys)[VALUES_PER_THREAD],
int (&ranks)[VALUES_PER_THREAD], int current_bit, int num_bits) {
digit_counter_type thread_prefixes[VALUES_PER_THREAD];
digit_counter_type *digit_counters[VALUES_PER_THREAD];
digit_counter_type *buffer =
reinterpret_cast<digit_counter_type *>(_local_memory);
reset_local_memory(item);
item.barrier(sycl::access::fence_space::local_space);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
uint32_t digit = bfe(keys[i], current_bit, num_bits);
uint32_t sub_counter = digit >> LOG_COUNTER_LANES;
uint32_t counter_lane = digit & (COUNTER_LANES - 1);
if (DESCENDING) {
sub_counter = PACKING_RATIO - 1 - sub_counter;
counter_lane = COUNTER_LANES - 1 - counter_lane;
}
digit_counters[i] =
&buffer[counter_lane * item.get_local_range().size() * PACKING_RATIO +
item.get_local_linear_id() * PACKING_RATIO + sub_counter];
thread_prefixes[i] = *digit_counters[i];
*digit_counters[i] = thread_prefixes[i] + 1;
}
item.barrier(sycl::access::fence_space::local_space);
scan_counters(item);
item.barrier(sycl::access::fence_space::local_space);
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
ranks[i] = thread_prefixes[i] + *digit_counters[i];
}
}
private:
template <typename Item>
__dpct_inline__ void reset_local_memory(const Item &item) {
packed_counter_type *ptr =
reinterpret_cast<packed_counter_type *>(_local_memory);
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
ptr[i * item.get_local_range().size() + item.get_local_linear_id()] = 0;
}
}
template <typename Item>
__dpct_inline__ packed_counter_type upsweep(const Item &item) {
packed_counter_type sum = 0;
packed_counter_type *ptr =
reinterpret_cast<packed_counter_type *>(_local_memory);
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; i++) {
cached_segment[i] =
ptr[item.get_local_linear_id() * PADDED_COUNTER_LANES + i];
}
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
sum += cached_segment[i];
}
return sum;
}
template <typename Item>
__dpct_inline__ void
exclusive_downsweep(const Item &item, packed_counter_type raking_partial) {
packed_counter_type *ptr =
reinterpret_cast<packed_counter_type *>(_local_memory);
packed_counter_type sum = raking_partial;
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
packed_counter_type value = cached_segment[i];
cached_segment[i] = sum;
sum += value;
}
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
ptr[item.get_local_linear_id() * PADDED_COUNTER_LANES + i] =
cached_segment[i];
}
}
struct prefix_callback {
__dpct_inline__ packed_counter_type
operator()(packed_counter_type block_aggregate) {
packed_counter_type block_prefix = 0;
#pragma unroll
for (int packed = 1; packed < PACKING_RATIO; packed++) {
block_prefix += block_aggregate
<< (sizeof(digit_counter_type) * 8 * packed);
}
return block_prefix;
}
};
template <typename Item>
__dpct_inline__ void scan_counters(const Item &item) {
packed_counter_type raking_partial = upsweep(item);
prefix_callback callback;
packed_counter_type exclusive_partial = exclusive_scan(
item, raking_partial, sycl::ext::oneapi::plus<packed_counter_type>(),
callback);
exclusive_downsweep(item, exclusive_partial);
}
private:
static constexpr int PACKING_RATIO =
sizeof(packed_counter_type) / sizeof(digit_counter_type);
static constexpr int LOG_PACKING_RATIO = log2<PACKING_RATIO>::VALUE;
static constexpr int LOG_COUNTER_LANES = RADIX_BITS - LOG_PACKING_RATIO;
static constexpr int COUNTER_LANES = 1 << LOG_COUNTER_LANES;
static constexpr int PADDED_COUNTER_LANES = COUNTER_LANES + 1;
packed_counter_type cached_segment[PADDED_COUNTER_LANES];
uint8_t *_local_memory;
};
template <typename T, typename U> struct base_traits {
static __dpct_inline__ U twiddle_in(U key) {
throw std::runtime_error("Not implemented");
}
static __dpct_inline__ U twiddle_out(U key) {
throw std::runtime_error("Not implemented");
}
};
template <typename U> struct base_traits<uint32_t, U> {
static __dpct_inline__ U twiddle_in(U key) { return key; }
static __dpct_inline__ U twiddle_out(U key) { return key; }
};
template <typename U> struct base_traits<int, U> {
static constexpr U HIGH_BIT = U(1) << ((sizeof(U) * 8) - 1);
static __dpct_inline__ U twiddle_in(U key) { return key ^ HIGH_BIT; }
static __dpct_inline__ U twiddle_out(U key) { return key ^ HIGH_BIT; }
};
template <typename U> struct base_traits<float, U> {
static constexpr U HIGH_BIT = U(1) << ((sizeof(U) * 8) - 1);
static __dpct_inline__ U twiddle_in(U key) {
U mask = (key & HIGH_BIT) ? U(-1) : HIGH_BIT;
return key ^ mask;
}
static __dpct_inline__ U twiddle_out(U key) {
U mask = (key & HIGH_BIT) ? HIGH_BIT : U(-1);
return key ^ mask;
}
};
template <typename T> struct traits : base_traits<T, T> {};
template <> struct traits<uint32_t> : base_traits<uint32_t, uint32_t> {};
template <> struct traits<int> : base_traits<int, uint32_t> {};
template <> struct traits<float> : base_traits<float, uint32_t> {};
} // namespace detail
namespace detail {
template <int N> struct power_of_two {
enum { VALUE = ((N & (N - 1)) == 0) };
};
__dpct_inline__ uint32_t shr_add(uint32_t x, uint32_t shift, uint32_t addend) {
return (x >> shift) + addend;
}
} // namespace detail
/// Implements scatter to blocked exchange pattern used in radix sort algorithm.
///
/// \tparam T type of the data elements exchanges
/// \tparam VALUES_PER_THREAD number of data elements assigned to a thread
template <typename T, int VALUES_PER_THREAD> class exchange {
public:
static size_t get_local_memory_size(size_t group_threads) {
size_t padding_values =
(INSERT_PADDING)
? ((group_threads * VALUES_PER_THREAD) >> LOG_LOCAL_MEMORY_BANKS)
: 0;
return (group_threads * VALUES_PER_THREAD + padding_values) * sizeof(T);
}
exchange(uint8_t *local_memory) : _local_memory(local_memory) {}
/// Rearrange elements from rank order to blocked order
template <typename Item>
__dpct_inline__ void
scatter_to_blocked(Item item, T (&keys)[VALUES_PER_THREAD],
int (&ranks)[VALUES_PER_THREAD]) {
T *buffer = reinterpret_cast<T *>(_local_memory);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; i++) {
int offset = ranks[i];
if (INSERT_PADDING)
offset = detail::shr_add(offset, LOG_LOCAL_MEMORY_BANKS, offset);
buffer[offset] = keys[i];
}
item.barrier(sycl::access::fence_space::local_space);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; i++) {
int offset = (item.get_local_id(0) * VALUES_PER_THREAD) + i;
if (INSERT_PADDING)
offset = detail::shr_add(offset, LOG_LOCAL_MEMORY_BANKS, offset);
keys[i] = buffer[offset];
}
}
private:
static constexpr int LOG_LOCAL_MEMORY_BANKS = 5;
static constexpr bool INSERT_PADDING =
(VALUES_PER_THREAD > 4) &&
(detail::power_of_two<VALUES_PER_THREAD>::VALUE);
uint8_t *_local_memory;
};
/// Implements radix sort to sort integer data elements assigned to all threads
/// in the group.
///
/// \tparam T type of the data elements exchanges
/// \tparam VALUES_PER_THREAD number of data elements assigned to a thread
/// \tparam DECENDING boolean value indicating if data elements are sorted in
/// decending order.
template <typename T, int VALUES_PER_THREAD, bool DESCENDING = false>
class radix_sort {
public:
static size_t get_local_memory_size(size_t group_threads) {
size_t ranks_size =
detail::radix_rank<RADIX_BITS>::get_local_memory_size(group_threads);
size_t exchange_size =
exchange<T, VALUES_PER_THREAD>::get_local_memory_size(group_threads);
return sycl::max(ranks_size, exchange_size);
}
radix_sort(uint8_t *local_memory) : _local_memory(local_memory) {}
template <typename Item>
__dpct_inline__ void
sort(const Item &item, T (&keys)[VALUES_PER_THREAD], int begin_bit = 0,
int end_bit = 8 * sizeof(T)) {
uint32_t(&unsigned_keys)[VALUES_PER_THREAD] =
reinterpret_cast<uint32_t(&)[VALUES_PER_THREAD]>(keys);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
unsigned_keys[i] = detail::traits<T>::twiddle_in(unsigned_keys[i]);
}
while (true) {
int pass_bits = sycl::min(RADIX_BITS, end_bit - begin_bit);
int ranks[VALUES_PER_THREAD];
detail::radix_rank<RADIX_BITS, DESCENDING>(_local_memory)
.template rank_keys(item, unsigned_keys, ranks, begin_bit, pass_bits);
begin_bit += RADIX_BITS;
item.barrier(sycl::access::fence_space::local_space);
exchange<T, VALUES_PER_THREAD>(_local_memory)
.scatter_to_blocked(item, keys, ranks);
item.barrier(sycl::access::fence_space::local_space);
if (begin_bit >= end_bit)
break;
}
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
unsigned_keys[i] = detail::traits<T>::twiddle_out(unsigned_keys[i]);
}
}
private:
static constexpr int RADIX_BITS = 4;
uint8_t *_local_memory;
};
/// Perform a reduction of the data elements assigned to all threads in the
/// group.
///
/// \param item A work-item in a group.
/// \param inputs Pointer to the input data for the reduce operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \returns value of the reduction using binary_op
template <typename Item, typename T, class BinaryOperation,
int VALUES_PER_THREAD>
__dpct_inline__ T
reduce(Item item, T (&inputs)[VALUES_PER_THREAD], BinaryOperation binary_op) {
T result = inputs[0];
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; i++) {
result = binary_op(result, inputs[i]);
}
return detail::__reduce_over_group(item.get_group(), result, binary_op);
}
/// Perform a reduction on a limited number of the work items in a subgroup
///
/// \param item A work-item in a group.
/// \param value value per work item which is to be reduced
/// \param items_to_reduce num work items at the start of the subgroup to reduce
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \returns value of the reduction using binary_op
template <typename Item, typename T, class BinaryOperation>
__dpct_inline__
typename ::std::enable_if_t<sycl::has_known_identity_v<BinaryOperation, T>, T>
reduce_over_partial_group(const Item &item, const T &value,
const ::std::uint16_t &items_to_reduce,
BinaryOperation binary_op) {
T value_temp = (item.get_local_linear_id() < items_to_reduce)
? value
: sycl::known_identity_v<BinaryOperation, T>;
return detail::__reduce_over_group(item.get_sub_group(), value_temp,
binary_op);
}
/// Perform an inclusive scan over the values of inputs from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param inputs Pointer to the input data for the scan operation.
/// \param outputs Pointer to the location where scan results will be stored.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \returns inclusive scan of the input elements assigned to
/// work-items in the group.
template <typename Item, typename T, class BinaryOperation,
int VALUES_PER_THREAD>
__dpct_inline__ void
inclusive_scan(const Item &item, T (&inputs)[VALUES_PER_THREAD],
T (&outputs)[VALUES_PER_THREAD], BinaryOperation binary_op) {
T result = inputs[0];
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
result = binary_op(result, inputs[i]);
}
T exclusive_result =
detail::__exclusive_scan_over_group(item.get_group(), result, binary_op);
if (item.get_local_linear_id() == 0) {
outputs[0] = inputs[0];
} else {
outputs[0] = binary_op(inputs[0], exclusive_result);
}
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
outputs[i] = binary_op(inputs[i], outputs[i - 1]);
}
}
/// Perform an inclusive scan over the values of inputs from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Pointer to the input data for the scan operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param group_aggregate group-wide aggregate of all inputs
/// in the work-items of the group. \returns inclusive scan of the input
/// elements assigned to work-items in the group.
template <typename Item, typename T, class BinaryOperation>
__dpct_inline__ T inclusive_scan(const Item &item, T input,
BinaryOperation binary_op,
T &group_aggregate) {
T output =
detail::__inclusive_scan_over_group(item.get_group(), input, binary_op);
if (item.get_local_linear_id() == item.get_local_range().size() - 1) {
group_aggregate = output;
}
group_aggregate = detail::__group_broadcast(
item.get_group(), group_aggregate, item.get_local_range().size() - 1);
return output;
}
/// Perform an inclusive scan over the values of input from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Input data for the scan operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param prefix_callback_op functor invoked by the first
/// work-item in the group that returns the
/// initial value in the resulting scan of the work-items in the group.
/// \returns inclusive scan of the input elements assigned to work-items in the
/// group.
template <typename Item, typename T, class BinaryOperation,
class GroupPrefixCallbackOperation>
__dpct_inline__ T
inclusive_scan(const Item &item, T input, BinaryOperation binary_op,
GroupPrefixCallbackOperation &prefix_callback_op) {
T group_aggregate;
T output = inclusive_scan(item, input, binary_op, group_aggregate);
T group_prefix = prefix_callback_op(group_aggregate);
return binary_op(group_prefix, output);
}
} // namespace group
namespace device {
namespace detail {
template <typename... _Args> constexpr auto __joint_reduce(_Args... __args) {
return sycl::joint_reduce(__args...);
}
} // namespace detail
/// Perform a reduce on each of the segments specified within data stored on
/// the device.
///
/// \param queue Command queue used to access device used for reduction
/// \param inputs Pointer to the data elements on the device to be reduced
/// \param outputs Pointer to the storage where the reduced value for each
/// segment will be stored \param segment_count number of segments to be reduced
/// \param begin_offsets Pointer to the set of indices that are the first
/// element in each segment \param end_offsets Pointer to the set of indices
/// that are one past the last element in each segment \param binary_op functor
/// that implements the binary operation used to perform the scan. \param init
/// initial value of the reduction for each segment.
template <int GROUP_SIZE, typename T, typename OffsetT, class BinaryOperation>
void segmented_reduce(sycl::queue queue, T *inputs, T *outputs,
size_t segment_count, OffsetT *begin_offsets,
OffsetT *end_offsets, BinaryOperation binary_op, T init) {
sycl::range<1> global_size(segment_count * GROUP_SIZE);
sycl::range<1> local_size(GROUP_SIZE);
queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for(
sycl::nd_range<1>(global_size, local_size), [=](sycl::nd_item<1> item) {
OffsetT segment_begin = begin_offsets[item.get_group_linear_id()];
OffsetT segment_end = end_offsets[item.get_group_linear_id()];
if (segment_begin == segment_end) {
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = init;
}
return;
}
sycl::multi_ptr<T, sycl::access::address_space::global_space>
input_ptr = inputs;
T group_aggregate = detail::__joint_reduce(
item.get_group(), input_ptr + segment_begin,
input_ptr + segment_end, init, binary_op);
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = group_aggregate;
}
});
});
}
#ifdef SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS
namespace experimental {
namespace detail {
template <typename _Tp, typename... _Ts> struct __is_any {
constexpr static bool value = std::disjunction_v<
std::is_same<std::remove_cv_t<_Tp>, std::remove_cv_t<_Ts>>...>;
};
template <typename _Tp, typename _Bp> struct __in_native_op_list {
constexpr static bool value =
__is_any<_Bp, sycl::plus<_Tp>, sycl::bit_or<_Tp>, sycl::bit_xor<_Tp>,
sycl::bit_and<_Tp>, sycl::maximum<_Tp>, sycl::minimum<_Tp>,
sycl::multiplies<_Tp>>::value;
};
template <typename _Tp, typename _Bp> struct __is_native_op {
constexpr static bool value = __in_native_op_list<_Tp, _Bp>::value ||
__in_native_op_list<void, _Bp>::value;
};
} // namespace detail
/// Perform a reduce on each of the segments specified within data stored on
/// the device. Compared with dpct::device::segmented_reduce, this experimental
/// feature support user define reductions.
///
/// \param queue Command queue used to access device used for reduction
/// \param inputs Pointer to the data elements on the device to be reduced
/// \param outputs Pointer to the storage where the reduced value for each
/// segment will be stored \param segment_count number of segments to be reduced
/// \param begin_offsets Pointer to the set of indices that are the first
/// element in each segment \param end_offsets Pointer to the set of indices
/// that are one past the last element in each segment \param binary_op functor
/// that implements the binary operation used to perform the scan. \param init
/// initial value of the reduction for each segment.
template <int GROUP_SIZE, typename T, typename OffsetT, class BinaryOperation>
void segmented_reduce(sycl::queue queue, T *inputs, T *outputs,
size_t segment_count, OffsetT *begin_offsets,
OffsetT *end_offsets, BinaryOperation binary_op, T init) {
sycl::range<1> global_size(segment_count * GROUP_SIZE);
sycl::range<1> local_size(GROUP_SIZE);
if constexpr (!detail::__is_native_op<T, BinaryOperation>::value) {
queue.submit([&](sycl::handler &cgh) {
size_t temp_memory_size = GROUP_SIZE * sizeof(T);
auto scratch = sycl::local_accessor<std::byte, 1>(temp_memory_size, cgh);
cgh.parallel_for(
sycl::nd_range<1>(global_size, local_size),
[=](sycl::nd_item<1> item) {
OffsetT segment_begin = begin_offsets[item.get_group_linear_id()];
OffsetT segment_end = end_offsets[item.get_group_linear_id()];
if (segment_begin == segment_end) {
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = init;
}
return;
}
// Create a handle that associates the group with an allocation it
// can use
auto handle =
sycl::ext::oneapi::experimental::group_with_scratchpad(
item.get_group(),
sycl::span(&scratch[0], temp_memory_size));
T group_aggregate = sycl::ext::oneapi::experimental::joint_reduce(
handle, inputs + segment_begin, inputs + segment_end, init,
binary_op);
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = group_aggregate;
}
});
});
} else {
dpct::device::segmented_reduce<GROUP_SIZE>(queue, inputs, outputs,
segment_count, begin_offsets,
end_offsets, binary_op, init);
}
}
} // namespace experimental
#endif // SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS
} // namespace device
} // namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/03_sycl_migrated_optimized/include/dpct/dpl_extras/functional.h | //==---- functional.h -----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_FUNCTIONAL_H__
#define __DPCT_FUNCTIONAL_H__
#include <functional>
#include <oneapi/dpl/functional>
#include <oneapi/dpl/iterator>
#if ONEDPL_USE_DPCPP_BACKEND
#include <oneapi/dpl/pstl/hetero/dpcpp/parallel_backend_sycl_utils.h>
#endif
#include <tuple>
#include <utility>
namespace dpct {
struct null_type {};
namespace internal {
template <class _ExecPolicy, class _T>
using enable_if_execution_policy =
typename std::enable_if<oneapi::dpl::execution::is_execution_policy<
typename std::decay<_ExecPolicy>::type>::value,
_T>::type;
template <typename _T> struct is_hetero_execution_policy : ::std::false_type {};
template <typename... PolicyParams>
struct is_hetero_execution_policy<
oneapi::dpl::execution::device_policy<PolicyParams...>> : ::std::true_type {
};
template <typename _T> struct is_fpga_execution_policy : ::std::false_type {};
#if _ONEDPL_FPGA_DEVICE
template <unsigned int unroll_factor, typename... PolicyParams>
struct is_hetero_execution_policy<
execution::fpga_policy<unroll_factor, PolicyParams...>> : ::std::true_type {
};
#endif
template <class _ExecPolicy, class _T>
using enable_if_hetero_execution_policy = typename std::enable_if<
is_hetero_execution_policy<typename std::decay<_ExecPolicy>::type>::value,
_T>::type;
#if _ONEDPL_CPP14_INTEGER_SEQUENCE_PRESENT
template <std::size_t... _Sp>
using index_sequence = std::index_sequence<_Sp...>;
template <std::size_t _Np>
using make_index_sequence = std::make_index_sequence<_Np>;
#else
template <std::size_t... _Sp> class index_sequence {};
template <std::size_t _Np, std::size_t... _Sp>
struct make_index_sequence_impl
: make_index_sequence_impl<_Np - 1, _Np - 1, _Sp...> {};
template <std::size_t... _Sp> struct make_index_sequence_impl<0, _Sp...> {
using type = index_sequence<_Sp...>;
};
template <std::size_t _Np>
using make_index_sequence = typename make_index_sequence_impl<_Np>::type;
#endif
// Minimal buffer implementations for temporary storage in mapping rules
// Some of our algorithms need to start with raw memory buffer,
// not an initialized array, because initialization/destruction
// would make the span be at least O(N).
#if ONEDPL_USE_DPCPP_BACKEND
template <typename _Tp> class __buffer {
sycl::buffer<_Tp, 1> __buf;
__buffer(const __buffer &) = delete;
void operator=(const __buffer &) = delete;
public:
// Try to obtain buffer of given size to store objects of _Tp type
__buffer(std::size_t __n) : __buf(sycl::range<1>(__n)) {}
// Return pointer to buffer, or NULL if buffer could not be obtained.
auto get() -> decltype(oneapi::dpl::begin(__buf)) const {
return oneapi::dpl::begin(__buf);
}
};
#else
template <typename _Tp> class __buffer {
std::unique_ptr<_Tp> _M_ptr;
__buffer(const __buffer &) = delete;
void operator=(const __buffer &) = delete;
public:
// Try to obtain buffer of given size to store objects of _Tp type
__buffer(const std::size_t __n) : _M_ptr(new _Tp[__n]) {}
// Return pointer to buffer, or NULL if buffer could not be obtained.
_Tp *get() const { return _M_ptr.get(); }
};
#endif
// Implements C++14 std::less<void> specialization to allow parameter type
// deduction.
class __less {
public:
template <typename _Xp, typename _Yp>
bool operator()(_Xp &&__x, _Yp &&__y) const {
return std::forward<_Xp>(__x) < std::forward<_Yp>(__y);
}
};
template <typename Policy, typename NewName> struct rebind_policy {
using type = Policy;
};
template <typename KernelName, typename NewName>
struct rebind_policy<oneapi::dpl::execution::device_policy<KernelName>,
NewName> {
using type = oneapi::dpl::execution::device_policy<NewName>;
};
#if _ONEDPL_FPGA_DEVICE
template <unsigned int factor, typename KernelName, typename NewName>
struct rebind_policy<oneapi::dpl::execution::fpga_policy<factor, KernelName>,
NewName> {
using type = oneapi::dpl::execution::fpga_policy<factor, NewName>;
};
#endif
template <typename T1, typename T2,
typename R1 = typename std::iterator_traits<T1>::reference,
typename R2 = typename std::iterator_traits<T2>::reference>
struct perm_fun {
typedef R2 result_of;
perm_fun(T1 input) : source(input) {}
R2 operator()(R1 x) const { return *(source + x); }
private:
T1 source;
};
// Functor compares first element (key) from tied sequence.
template <typename Compare = class internal::__less> struct compare_key_fun {
typedef bool result_of;
compare_key_fun(Compare _comp = internal::__less()) : comp(_comp) {}
template <typename _T1, typename _T2>
result_of operator()(_T1 &&a, _T2 &&b) const {
using std::get;
return comp(get<0>(a), get<0>(b));
}
private:
mutable Compare comp;
};
// Functor evaluates second element of tied sequence with predicate.
// Used by: copy_if, remove_copy_if, stable_partition_copy
// Lambda:
template <typename Predicate> struct predicate_key_fun {
typedef bool result_of;
predicate_key_fun(Predicate _pred) : pred(_pred) {}
template <typename _T1> result_of operator()(_T1 &&a) const {
using std::get;
return pred(get<1>(a));
}
private:
mutable Predicate pred;
};
// Used by: remove_if
template <typename Predicate> struct negate_predicate_key_fun {
typedef bool result_of;
negate_predicate_key_fun(Predicate _pred) : pred(_pred) {}
template <typename _T1> result_of operator()(_T1 &&a) const {
using std::get;
return !pred(get<1>(a));
}
private:
mutable Predicate pred;
};
template <typename T> struct sequence_fun {
using result_type = T;
sequence_fun(T _init, T _step) : init(_init), step(_step) {}
template <typename _T> result_type operator()(_T &&i) const {
return static_cast<T>(init + step * i);
}
private:
const T init;
const T step;
};
//[binary_pred](Ref a, Ref b){ return(binary_pred(get<0>(a),get<0>(b)));
template <typename Predicate> struct unique_fun {
typedef bool result_of;
unique_fun(Predicate _pred) : pred(_pred) {}
template <typename _T> result_of operator()(_T &&a, _T &&b) const {
using std::get;
return pred(get<0>(a), get<0>(b));
}
private:
mutable Predicate pred;
};
// Lambda: [pred, &new_value](Ref1 a, Ref2 s) {return pred(s) ? new_value : a;
// });
template <typename T, typename Predicate> struct replace_if_fun {
public:
typedef T result_of;
replace_if_fun(Predicate _pred, T _new_value)
: pred(_pred), new_value(_new_value) {}
template <typename _T1, typename _T2> T operator()(_T1 &&a, _T2 &&s) const {
return pred(s) ? new_value : a;
}
private:
mutable Predicate pred;
const T new_value;
};
//[pred,op](Ref a){return pred(a) ? op(a) : a; }
template <typename T, typename Predicate, typename Operator>
struct transform_if_fun {
transform_if_fun(Predicate _pred, Operator _op) : pred(_pred), op(_op) {}
template <typename _T>
void operator()(_T&& t) const {
using std::get;
if (pred(get<0>(t)))
get<1>(t) = op(get<0>(t));
}
private:
mutable Predicate pred;
mutable Operator op;
};
//[pred, op](Ref1 a, Ref2 s) { return pred(s) ? op(a) : a; });
template <typename T, typename Predicate, typename Operator>
struct transform_if_unary_zip_mask_fun {
transform_if_unary_zip_mask_fun(Predicate _pred, Operator _op) : pred(_pred), op(_op) {}
template <typename _T>
void operator()(_T&& t) const {
using std::get;
if (pred(get<1>(t)))
get<2>(t) = op(get<0>(t));
}
private:
mutable Predicate pred;
mutable Operator op;
};
template <typename T, typename Predicate, typename BinaryOperation>
class transform_if_zip_mask_fun {
public:
transform_if_zip_mask_fun(Predicate _pred = oneapi::dpl::identity(),
BinaryOperation _op = oneapi::dpl::identity())
: pred(_pred), op(_op) {}
template <typename _T> void operator()(_T &&t) const {
using std::get;
if (pred(get<2>(t)))
get<3>(t) = op(get<0>(t), get<1>(t));
}
private:
mutable Predicate pred;
mutable BinaryOperation op;
};
// This following code is similar to a section of code in
// oneDPL/include/oneapi/dpl/pstl/hetero/dpcpp/parallel_backend_sycl_radix_sort.h
// It has a similar approach, and could be consolidated.
// Outside of some differences in approach, there are two significant
// differences in function.
//
// 1) This code allows the output type of the bit range translation to be fit
// into to the minimal type required to provide that many bits. The code in
// oneDPL to calculate the bucket for the radix is similar but its output is
// always std::uint32_t. The assumption that the bit range desired will fit in
// 32 bits is not true for this code.
//
// 2) This code ensures that for floating point type, -0.0f and 0.0f map to the
// same value. This allows the output of this translation to be used to provide
// a sort which ensures the stability of these values for floating point types.
template <int N> struct uint_byte_map {};
template <> struct uint_byte_map<1> { using type = uint8_t; };
template <> struct uint_byte_map<2> { using type = uint16_t; };
template <> struct uint_byte_map<4> { using type = uint32_t; };
template <> struct uint_byte_map<8> { using type = uint64_t; };
template <typename T> struct uint_map {
using type = typename uint_byte_map<sizeof(T)>::type;
};
template <typename T, typename OutKeyT> class translate_key {
using uint_type_t = typename uint_map<T>::type;
public:
translate_key(int begin_bit, int end_bit) {
shift = begin_bit;
mask = ~OutKeyT(0); // all ones
mask = mask >> (sizeof(OutKeyT) * 8 -
(end_bit - begin_bit)); // setup appropriate mask
flip_sign = uint_type_t(1) << (sizeof(uint_type_t) * 8 - 1); // sign bit
flip_key = ~uint_type_t(0); // 0xF...F
}
inline OutKeyT operator()(const T &key) const {
uint_type_t intermediate;
if constexpr (std::is_floating_point<T>::value) {
// normal case (both -0.0f and 0.0f equal -0.0f)
if (key != T(-0.0f)) {
uint_type_t is_negative = reinterpret_cast<const uint_type_t &>(key) >>
(sizeof(uint_type_t) * 8 - 1);
intermediate = reinterpret_cast<const uint_type_t &>(key) ^
((is_negative * flip_key) | flip_sign);
} else // special case for -0.0f to keep stability with 0.0f
{
T negzero = T(-0.0f);
intermediate = reinterpret_cast<const uint_type_t &>(negzero);
}
} else if constexpr (std::is_signed<T>::value) {
intermediate = reinterpret_cast<const uint_type_t &>(key) ^ flip_sign;
} else {
intermediate = key;
}
return static_cast<OutKeyT>(intermediate >> shift) &
mask; // shift, cast, and mask
}
private:
uint8_t shift;
OutKeyT mask;
uint_type_t flip_sign;
uint_type_t flip_key;
};
} // end namespace internal
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/01_dpct_output/Common/helper_timer.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Helper Timing Functions
#ifndef COMMON_HELPER_TIMER_H_
#define COMMON_HELPER_TIMER_H_
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// includes, system
#include <vector>
// includes, project
#include <exception.h>
// Definition of the StopWatch Interface, this is used if we don't want to use
// the CUT functions But rather in a self contained class interface
class StopWatchInterface {
public:
StopWatchInterface() {}
virtual ~StopWatchInterface() {}
public:
//! Start time measurement
virtual void start() = 0;
//! Stop time measurement
virtual void stop() = 0;
//! Reset time counters to zero
virtual void reset() = 0;
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
virtual float getTime() = 0;
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
virtual float getAverageTime() = 0;
};
//////////////////////////////////////////////////////////////////
// Begin Stopwatch timer class definitions for all OS platforms //
//////////////////////////////////////////////////////////////////
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// includes, system
#define WINDOWS_LEAN_AND_MEAN
#include <windows.h>
#undef min
#undef max
//! Windows specific implementation of StopWatch
class StopWatchWin : public StopWatchInterface {
public:
//! Constructor, default
StopWatchWin()
: start_time(),
end_time(),
diff_time(0.0f),
total_time(0.0f),
running(false),
clock_sessions(0),
freq(0),
freq_set(false) {
if (!freq_set) {
// helper variable
LARGE_INTEGER temp;
// get the tick frequency from the OS
QueryPerformanceFrequency(reinterpret_cast<LARGE_INTEGER *>(&temp));
// convert to type in which it is needed
freq = (static_cast<double>(temp.QuadPart)) / 1000.0;
// rememeber query
freq_set = true;
}
}
// Destructor
~StopWatchWin() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// member variables
//! Start of measurement
LARGE_INTEGER start_time;
//! End of measurement
LARGE_INTEGER end_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
//! tick frequency
double freq;
//! flag if the frequency has been set
bool freq_set;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::start() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::stop() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&end_time));
diff_time = static_cast<float>(((static_cast<double>(end_time.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
total_time += diff_time;
clock_sessions++;
running = false;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
LARGE_INTEGER temp;
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&temp));
retval += static_cast<float>(((static_cast<double>(temp.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
#else
// Declarations for Stopwatch on Linux and Mac OSX
// includes, system
#include <sys/time.h>
#include <ctime>
//! Windows specific implementation of StopWatch
class StopWatchLinux : public StopWatchInterface {
public:
//! Constructor, default
StopWatchLinux()
: start_time(),
diff_time(0.0),
total_time(0.0),
running(false),
clock_sessions(0) {}
// Destructor
virtual ~StopWatchLinux() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// helper functions
//! Get difference between start time and current time
inline float getDiffTime();
private:
// member variables
//! Start of measurement
struct timeval start_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::start() {
gettimeofday(&start_time, 0);
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::stop() {
diff_time = getDiffTime();
total_time += diff_time;
running = false;
clock_sessions++;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
gettimeofday(&start_time, 0);
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
retval += getDiffTime();
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getDiffTime() {
struct timeval t_time;
gettimeofday(&t_time, 0);
// time difference in milli-seconds
return static_cast<float>(1000.0 * (t_time.tv_sec - start_time.tv_sec) +
(0.001 * (t_time.tv_usec - start_time.tv_usec)));
}
#endif // WIN32
////////////////////////////////////////////////////////////////////////////////
//! Timer functionality exported
////////////////////////////////////////////////////////////////////////////////
//! Create a new timer
//! @return true if a time has been created, otherwise false
//! @param name of the new timer, 0 if the creation failed
////////////////////////////////////////////////////////////////////////////////
inline bool sdkCreateTimer(StopWatchInterface **timer_interface) {
// printf("sdkCreateTimer called object %08x\n", (void *)*timer_interface);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
*timer_interface = reinterpret_cast<StopWatchInterface *>(new StopWatchWin());
#else
*timer_interface =
reinterpret_cast<StopWatchInterface *>(new StopWatchLinux());
#endif
return (*timer_interface != NULL) ? true : false;
}
////////////////////////////////////////////////////////////////////////////////
//! Delete a timer
//! @return true if a time has been deleted, otherwise false
//! @param name of the timer to delete
////////////////////////////////////////////////////////////////////////////////
inline bool sdkDeleteTimer(StopWatchInterface **timer_interface) {
// printf("sdkDeleteTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
delete *timer_interface;
*timer_interface = NULL;
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Start the time with name \a name
//! @param name name of the timer to start
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStartTimer(StopWatchInterface **timer_interface) {
// printf("sdkStartTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->start();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop the time with name \a name. Does not reset.
//! @param name name of the timer to stop
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStopTimer(StopWatchInterface **timer_interface) {
// printf("sdkStopTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->stop();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Resets the timer's counter.
//! @param name name of the timer to reset.
////////////////////////////////////////////////////////////////////////////////
inline bool sdkResetTimer(StopWatchInterface **timer_interface) {
// printf("sdkResetTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->reset();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Return the average time for timer execution as the total time
//! for the timer dividied by the number of completed (stopped) runs the timer
//! has made.
//! Excludes the current running time if the timer is currently running.
//! @param name name of the timer to return the time of
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetAverageTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetAverageTimerValue called object %08x\n", (void
// *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getAverageTime();
} else {
return 0.0f;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Total execution time for the timer over all runs since the last reset
//! or timer creation.
//! @param name name of the timer to obtain the value of.
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetTimerValue called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getTime();
} else {
return 0.0f;
}
}
#endif // COMMON_HELPER_TIMER_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/01_dpct_output/Common/helper_string.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (string parsing, timers, etc)
#ifndef COMMON_HELPER_STRING_H_
#define COMMON_HELPER_STRING_H_
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <string>
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#ifndef _CRT_SECURE_NO_DEPRECATE
#define _CRT_SECURE_NO_DEPRECATE
#endif
#ifndef STRCASECMP
#define STRCASECMP _stricmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP _strnicmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy_s(sFilePath, nLength, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode)
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result != 0)
#endif
#ifndef SSCANF
#define SSCANF sscanf_s
#endif
#ifndef SPRINTF
#define SPRINTF sprintf_s
#endif
#else // Linux Includes
#include <string.h>
#include <strings.h>
#ifndef STRCASECMP
#define STRCASECMP strcasecmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP strncasecmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy(sFilePath, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode))
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result == NULL)
#endif
#ifndef SSCANF
#define SSCANF sscanf
#endif
#ifndef SPRINTF
#define SPRINTF sprintf
#endif
#endif
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// CUDA Utility Helper Functions
inline int stringRemoveDelimiter(char delimiter, const char *string) {
int string_start = 0;
while (string[string_start] == delimiter) {
string_start++;
}
if (string_start >= static_cast<int>(strlen(string) - 1)) {
return 0;
}
return string_start;
}
inline int getFileExtension(char *filename, char **extension) {
int string_length = static_cast<int>(strlen(filename));
while (filename[string_length--] != '.') {
if (string_length == 0) break;
}
if (string_length > 0) string_length += 2;
if (string_length == 0)
*extension = NULL;
else
*extension = &filename[string_length];
return string_length;
}
inline bool checkCmdLineFlag(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
const char *equal_pos = strchr(string_argv, '=');
int argv_length = static_cast<int>(
equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv);
int length = static_cast<int>(strlen(string_ref));
if (length == argv_length &&
!STRNCASECMP(string_argv, string_ref, length)) {
bFound = true;
continue;
}
}
}
return bFound;
}
// This function wraps the CUDA Driver API into a template function
template <class T>
inline bool getCmdLineArgumentValue(const int argc, const char **argv,
const char *string_ref, T *value) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
*value = (T)atoi(&string_argv[length + auto_inc]);
}
bFound = true;
i = argc;
}
}
}
return bFound;
}
inline int getCmdLineArgumentInt(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
int value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = atoi(&string_argv[length + auto_inc]);
} else {
value = 0;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline float getCmdLineArgumentFloat(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
float value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = static_cast<float>(atof(&string_argv[length + auto_inc]));
} else {
value = 0.f;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline bool getCmdLineArgumentString(const int argc, const char **argv,
const char *string_ref,
char **string_retval) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
char *string_argv = const_cast<char *>(&argv[i][string_start]);
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
*string_retval = &string_argv[length + 1];
bFound = true;
continue;
}
}
}
if (!bFound) {
*string_retval = NULL;
}
return bFound;
}
//////////////////////////////////////////////////////////////////////////////
//! Find the path for a file assuming that
//! files are found in the searchPath.
//!
//! @return the path if succeeded, otherwise 0
//! @param filename name of the file
//! @param executable_path optional absolute path of the executable
//////////////////////////////////////////////////////////////////////////////
inline char *sdkFindFilePath(const char *filename,
const char *executable_path) {
// <executable_name> defines a variable that is replaced with the name of the
// executable
// Typical relative search paths to locate needed companion files (e.g. sample
// input data, or JIT source files) The origin for the relative search may be
// the .exe file, a .bat file launching an .exe, a browser .exe launching the
// .exe or .bat, etc
const char *searchPath[] = {
"./", // same dir
"./data/", // same dir
"../../../../Samples/<executable_name>/", // up 4 in tree
"../../../Samples/<executable_name>/", // up 3 in tree
"../../Samples/<executable_name>/", // up 2 in tree
"../../../../Samples/<executable_name>/data/", // up 4 in tree
"../../../Samples/<executable_name>/data/", // up 3 in tree
"../../Samples/<executable_name>/data/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/", // up 4 in tree
"../../../Samples/0_Introduction/<executable_name>/", // up 3 in tree
"../../Samples/0_Introduction/<executable_name>/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/", // up 4 in tree
"../../../Samples/1_Utilities/<executable_name>/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 4 in tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 3 in tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 2 in tree
"../../../../Samples/3_CUDA_Features/<executable_name>/", // up 4 in tree
"../../../Samples/3_CUDA_Features/<executable_name>/", // up 3 in tree
"../../Samples/3_CUDA_Features/<executable_name>/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 4 in tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 3 in tree
"../../Samples/4_CUDA_Libraries/<executable_name>/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/", // up 4 in tree
"../../../Samples/5_Domain_Specific/<executable_name>/", // up 3 in tree
"../../Samples/5_Domain_Specific/<executable_name>/", // up 2 in tree
"../../../../Samples/6_Performance/<executable_name>/", // up 4 in tree
"../../../Samples/6_Performance/<executable_name>/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/data/", // up 4 in tree
"../../../Samples/0_Introduction/<executable_name>/data/", // up 3 in tree
"../../Samples/0_Introduction/<executable_name>/data/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/data/", // up 4 in tree
"../../../Samples/1_Utilities/<executable_name>/data/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/data/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 4 in tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 3 in tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 2 in tree
"../../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 4 in tree
"../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 3 in tree
"../../Samples/3_CUDA_Features/<executable_name>/data/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 4 in tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 3 in tree
"../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 4 in tree
"../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 3 in tree
"../../Samples/5_Domain_Specific/<executable_name>/data/", // up 2 in tree
"../../../../Samples/6_Performance/<executable_name>/data/", // up 4 in tree
"../../../Samples/6_Performance/<executable_name>/data/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/data/", // up 2 in tree
"../../../../Common/data/", // up 4 in tree
"../../../Common/data/", // up 3 in tree
"../../Common/data/" // up 2 in tree
};
// Extract the executable name
std::string executable_name;
if (executable_path != 0) {
executable_name = std::string(executable_path);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// Windows path delimiter
size_t delimiter_pos = executable_name.find_last_of('\\');
executable_name.erase(0, delimiter_pos + 1);
if (executable_name.rfind(".exe") != std::string::npos) {
// we strip .exe, only if the .exe is found
executable_name.resize(executable_name.size() - 4);
}
#else
// Linux & OSX path delimiter
size_t delimiter_pos = executable_name.find_last_of('/');
executable_name.erase(0, delimiter_pos + 1);
#endif
}
// Loop over all search paths and return the first hit
for (unsigned int i = 0; i < sizeof(searchPath) / sizeof(char *); ++i) {
std::string path(searchPath[i]);
size_t executable_name_pos = path.find("<executable_name>");
// If there is executable_name variable in the searchPath
// replace it with the value
if (executable_name_pos != std::string::npos) {
if (executable_path != 0) {
path.replace(executable_name_pos, strlen("<executable_name>"),
executable_name);
} else {
// Skip this path entry if no executable argument is given
continue;
}
}
#ifdef _DEBUG
printf("sdkFindFilePath <%s> in %s\n", filename, path.c_str());
#endif
// Test if the file exists
path.append(filename);
FILE *fp;
FOPEN(fp, path.c_str(), "rb");
if (fp != NULL) {
fclose(fp);
// File found
// returning an allocated array here for backwards compatibility reasons
char *file_path = reinterpret_cast<char *>(malloc(path.length() + 1));
STRCPY(file_path, path.length() + 1, path.c_str());
return file_path;
}
if (fp) {
fclose(fp);
}
}
// File not found
printf("\nerror: sdkFindFilePath: file <%s> not found!\n", filename);
return 0;
}
#endif // COMMON_HELPER_STRING_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/01_dpct_output/Common/helper_image.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (image,bitmap)
#ifndef COMMON_HELPER_IMAGE_H_
#define COMMON_HELPER_IMAGE_H_
#include <assert.h>
#include <exception.h>
#include <math.h>
#include <stdint.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#ifndef MIN
#define MIN(a, b) ((a < b) ? a : b)
#endif
#ifndef MAX
#define MAX(a, b) ((a > b) ? a : b)
#endif
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
#include <helper_string.h>
// namespace unnamed (internal)
namespace helper_image_internal {
//! size of PGM file header
const unsigned int PGMHeaderSize = 0x40;
// types
//! Data converter from unsigned char / unsigned byte to type T
template <class T>
struct ConverterFromUByte;
//! Data converter from unsigned char / unsigned byte
template <>
struct ConverterFromUByte<unsigned char> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
float operator()(const unsigned char &val) {
return static_cast<unsigned char>(val);
}
};
//! Data converter from unsigned char / unsigned byte to float
template <>
struct ConverterFromUByte<float> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
float operator()(const unsigned char &val) {
return static_cast<float>(val) / 255.0f;
}
};
//! Data converter from unsigned char / unsigned byte to type T
template <class T>
struct ConverterToUByte;
//! Data converter from unsigned char / unsigned byte to unsigned int
template <>
struct ConverterToUByte<unsigned char> {
//! Conversion operator (essentially a passthru
//! @return converted value
//! @param val value to convert
unsigned char operator()(const unsigned char &val) { return val; }
};
//! Data converter from unsigned char / unsigned byte to unsigned int
template <>
struct ConverterToUByte<float> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
unsigned char operator()(const float &val) {
return static_cast<unsigned char>(val * 255.0f);
}
};
} // namespace helper_image_internal
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode)
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result != 0)
#endif
#ifndef SSCANF
#define SSCANF sscanf_s
#endif
#else
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode))
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result == NULL)
#endif
#ifndef SSCANF
#define SSCANF sscanf
#endif
#endif
inline bool __loadPPM(const char *file, unsigned char **data, unsigned int *w,
unsigned int *h, unsigned int *channels) {
FILE *fp = NULL;
if (FOPEN_FAIL(FOPEN(fp, file, "rb"))) {
std::cerr << "__LoadPPM() : Failed to open file: " << file << std::endl;
return false;
}
// check header
char header[helper_image_internal::PGMHeaderSize];
if (fgets(header, helper_image_internal::PGMHeaderSize, fp) == NULL) {
std::cerr << "__LoadPPM() : reading PGM header returned NULL" << std::endl;
return false;
}
if (strncmp(header, "P5", 2) == 0) {
*channels = 1;
} else if (strncmp(header, "P6", 2) == 0) {
*channels = 3;
} else {
std::cerr << "__LoadPPM() : File is not a PPM or PGM image" << std::endl;
*channels = 0;
return false;
}
// parse header, read maxval, width and height
unsigned int width = 0;
unsigned int height = 0;
unsigned int maxval = 0;
unsigned int i = 0;
while (i < 3) {
if (fgets(header, helper_image_internal::PGMHeaderSize, fp) == NULL) {
std::cerr << "__LoadPPM() : reading PGM header returned NULL"
<< std::endl;
return false;
}
if (header[0] == '#') {
continue;
}
if (i == 0) {
i += SSCANF(header, "%u %u %u", &width, &height, &maxval);
} else if (i == 1) {
i += SSCANF(header, "%u %u", &height, &maxval);
} else if (i == 2) {
i += SSCANF(header, "%u", &maxval);
}
}
// check if given handle for the data is initialized
if (NULL != *data) {
if (*w != width || *h != height) {
std::cerr << "__LoadPPM() : Invalid image dimensions." << std::endl;
}
} else {
*data = (unsigned char *)malloc(sizeof(unsigned char) * width * height *
*channels);
*w = width;
*h = height;
}
// read and close file
if (fread(*data, sizeof(unsigned char), width * height * *channels, fp) ==
0) {
std::cerr << "__LoadPPM() read data returned error." << std::endl;
}
fclose(fp);
return true;
}
template <class T>
inline bool sdkLoadPGM(const char *file, T **data, unsigned int *w,
unsigned int *h) {
unsigned char *idata = NULL;
unsigned int channels;
if (true != __loadPPM(file, &idata, w, h, &channels)) {
return false;
}
unsigned int size = *w * *h * channels;
// initialize mem if necessary
// the correct size is checked / set in loadPGMc()
if (NULL == *data) {
*data = reinterpret_cast<T *>(malloc(sizeof(T) * size));
}
// copy and cast data
std::transform(idata, idata + size, *data,
helper_image_internal::ConverterFromUByte<T>());
free(idata);
return true;
}
template <class T>
inline bool sdkLoadPPM4(const char *file, T **data, unsigned int *w,
unsigned int *h) {
unsigned char *idata = 0;
unsigned int channels;
if (__loadPPM(file, &idata, w, h, &channels)) {
// pad 4th component
int size = *w * *h;
// keep the original pointer
unsigned char *idata_orig = idata;
*data = reinterpret_cast<T *>(malloc(sizeof(T) * size * 4));
unsigned char *ptr = *data;
for (int i = 0; i < size; i++) {
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = 0;
}
free(idata_orig);
return true;
} else {
free(idata);
return false;
}
}
inline bool __savePPM(const char *file, unsigned char *data, unsigned int w,
unsigned int h, unsigned int channels) {
assert(NULL != data);
assert(w > 0);
assert(h > 0);
std::fstream fh(file, std::fstream::out | std::fstream::binary);
if (fh.bad()) {
std::cerr << "__savePPM() : Opening file failed." << std::endl;
return false;
}
if (channels == 1) {
fh << "P5\n";
} else if (channels == 3) {
fh << "P6\n";
} else {
std::cerr << "__savePPM() : Invalid number of channels." << std::endl;
return false;
}
fh << w << "\n" << h << "\n" << 0xff << std::endl;
for (unsigned int i = 0; (i < (w * h * channels)) && fh.good(); ++i) {
fh << data[i];
}
fh.flush();
if (fh.bad()) {
std::cerr << "__savePPM() : Writing data failed." << std::endl;
return false;
}
fh.close();
return true;
}
template <class T>
inline bool sdkSavePGM(const char *file, T *data, unsigned int w,
unsigned int h) {
unsigned int size = w * h;
unsigned char *idata = (unsigned char *)malloc(sizeof(unsigned char) * size);
std::transform(data, data + size, idata,
helper_image_internal::ConverterToUByte<T>());
// write file
bool result = __savePPM(file, idata, w, h, 1);
// cleanup
free(idata);
return result;
}
inline bool sdkSavePPM4ub(const char *file, unsigned char *data, unsigned int w,
unsigned int h) {
// strip 4th component
int size = w * h;
unsigned char *ndata =
(unsigned char *)malloc(sizeof(unsigned char) * size * 3);
unsigned char *ptr = ndata;
for (int i = 0; i < size; i++) {
*ptr++ = *data++;
*ptr++ = *data++;
*ptr++ = *data++;
data++;
}
bool result = __savePPM(file, ndata, w, h, 3);
free(ndata);
return result;
}
//////////////////////////////////////////////////////////////////////////////
//! Read file \filename and return the data
//! @return bool if reading the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data uninitialized pointer, returned initialized and pointing to
//! the data read
//! @param len number of data elements in data, -1 on error
//////////////////////////////////////////////////////////////////////////////
template <class T>
inline bool sdkReadFile(const char *filename, T **data, unsigned int *len,
bool verbose) {
// check input arguments
assert(NULL != filename);
assert(NULL != len);
// intermediate storage for the data read
std::vector<T> data_read;
// open file for reading
FILE *fh = NULL;
// check if filestream is valid
if (FOPEN_FAIL(FOPEN(fh, filename, "r"))) {
printf("Unable to open input file: %s\n", filename);
return false;
}
// read all data elements
T token;
while (!feof(fh)) {
fscanf(fh, "%f", &token);
data_read.push_back(token);
}
// the last element is read twice
data_read.pop_back();
fclose(fh);
// check if the given handle is already initialized
if (NULL != *data) {
if (*len != data_read.size()) {
std::cerr << "sdkReadFile() : Initialized memory given but "
<< "size mismatch with signal read "
<< "(data read / data init = " << (unsigned int)data_read.size()
<< " / " << *len << ")" << std::endl;
return false;
}
} else {
// allocate storage for the data read
*data = reinterpret_cast<T *>(malloc(sizeof(T) * data_read.size()));
// store signal size
*len = static_cast<unsigned int>(data_read.size());
}
// copy data
memcpy(*data, &data_read.front(), sizeof(T) * data_read.size());
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Read file \filename and return the data
//! @return bool if reading the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data uninitialized pointer, returned initialized and pointing to
//! the data read
//! @param len number of data elements in data, -1 on error
//////////////////////////////////////////////////////////////////////////////
template <class T>
inline bool sdkReadFileBlocks(const char *filename, T **data, unsigned int *len,
unsigned int block_num, unsigned int block_size,
bool verbose) {
// check input arguments
assert(NULL != filename);
assert(NULL != len);
// open file for reading
FILE *fh = fopen(filename, "rb");
if (fh == NULL && verbose) {
std::cerr << "sdkReadFile() : Opening file failed." << std::endl;
return false;
}
// check if the given handle is already initialized
// allocate storage for the data read
data[block_num] = reinterpret_cast<T *>(malloc(block_size));
// read all data elements
fseek(fh, block_num * block_size, SEEK_SET);
*len = fread(data[block_num], sizeof(T), block_size / sizeof(T), fh);
fclose(fh);
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Write a data file \filename
//! @return true if writing the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data data to write
//! @param len number of data elements in data, -1 on error
//! @param epsilon epsilon for comparison
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool sdkWriteFile(const char *filename, const T *data, unsigned int len,
const S epsilon, bool verbose, bool append = false) {
assert(NULL != filename);
assert(NULL != data);
// open file for writing
// if (append) {
std::fstream fh(filename, std::fstream::out | std::fstream::ate);
if (verbose) {
std::cerr << "sdkWriteFile() : Open file " << filename
<< " for write/append." << std::endl;
}
/* } else {
std::fstream fh(filename, std::fstream::out);
if (verbose) {
std::cerr << "sdkWriteFile() : Open file " << filename << " for
write." << std::endl;
}
}
*/
// check if filestream is valid
if (!fh.good()) {
if (verbose) {
std::cerr << "sdkWriteFile() : Opening file failed." << std::endl;
}
return false;
}
// first write epsilon
fh << "# " << epsilon << "\n";
// write data
for (unsigned int i = 0; (i < len) && (fh.good()); ++i) {
fh << data[i] << ' ';
}
// Check if writing succeeded
if (!fh.good()) {
if (verbose) {
std::cerr << "sdkWriteFile() : Writing file failed." << std::endl;
}
return false;
}
// file ends with nl
fh << std::endl;
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Compare two arrays of arbitrary type
//! @return true if \a reference and \a data are identical, otherwise false
//! @param reference timer_interface to the reference data / gold image
//! @param data handle to the computed data
//! @param len number of elements in reference and data
//! @param epsilon epsilon to use for the comparison
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool compareData(const T *reference, const T *data,
const unsigned int len, const S epsilon,
const float threshold) {
assert(epsilon >= 0);
bool result = true;
unsigned int error_count = 0;
for (unsigned int i = 0; i < len; ++i) {
float diff = static_cast<float>(reference[i]) - static_cast<float>(data[i]);
bool comp = (diff <= epsilon) && (diff >= -epsilon);
result &= comp;
error_count += !comp;
#if 0
if (!comp) {
std::cerr << "ERROR, i = " << i << ",\t "
<< reference[i] << " / "
<< data[i]
<< " (reference / data)\n";
}
#endif
}
if (threshold == 0.0f) {
return (result) ? true : false;
} else {
if (error_count) {
printf("%4.2f(%%) of bytes mismatched (count=%d)\n",
static_cast<float>(error_count) * 100 / static_cast<float>(len),
error_count);
}
return (len * threshold > error_count) ? true : false;
}
}
#ifndef __MIN_EPSILON_ERROR
#define __MIN_EPSILON_ERROR 1e-3f
#endif
//////////////////////////////////////////////////////////////////////////////
//! Compare two arrays of arbitrary type
//! @return true if \a reference and \a data are identical, otherwise false
//! @param reference handle to the reference data / gold image
//! @param data handle to the computed data
//! @param len number of elements in reference and data
//! @param epsilon epsilon to use for the comparison
//! @param epsilon threshold % of (# of bytes) for pass/fail
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool compareDataAsFloatThreshold(const T *reference, const T *data,
const unsigned int len, const S epsilon,
const float threshold) {
assert(epsilon >= 0);
// If we set epsilon to be 0, let's set a minimum threshold
float max_error = MAX((float)epsilon, __MIN_EPSILON_ERROR);
int error_count = 0;
bool result = true;
for (unsigned int i = 0; i < len; ++i) {
float diff =
fabs(static_cast<float>(reference[i]) - static_cast<float>(data[i]));
bool comp = (diff < max_error);
result &= comp;
if (!comp) {
error_count++;
}
}
if (threshold == 0.0f) {
if (error_count) {
printf("total # of errors = %d\n", error_count);
}
return (error_count == 0) ? true : false;
} else {
if (error_count) {
printf("%4.2f(%%) of bytes mismatched (count=%d)\n",
static_cast<float>(error_count) * 100 / static_cast<float>(len),
error_count);
}
return ((len * threshold > error_count) ? true : false);
}
}
inline void sdkDumpBin(void *data, unsigned int bytes, const char *filename) {
printf("sdkDumpBin: <%s>\n", filename);
FILE *fp;
FOPEN(fp, filename, "wb");
fwrite(data, bytes, 1, fp);
fflush(fp);
fclose(fp);
}
inline bool sdkCompareBin2BinUint(const char *src_file, const char *ref_file,
unsigned int nelements, const float epsilon,
const float threshold, char *exec_path) {
unsigned int *src_buffer, *ref_buffer;
FILE *src_fp = NULL, *ref_fp = NULL;
uint64_t error_count = 0;
size_t fsize = 0;
if (FOPEN_FAIL(FOPEN(src_fp, src_file, "rb"))) {
printf("compareBin2Bin <unsigned int> unable to open src_file: %s\n",
src_file);
error_count++;
}
char *ref_file_path = sdkFindFilePath(ref_file, exec_path);
if (ref_file_path == NULL) {
printf("compareBin2Bin <unsigned int> unable to find <%s> in <%s>\n",
ref_file, exec_path);
printf(">>> Check info.xml and [project//data] folder <%s> <<<\n",
ref_file);
printf("Aborting comparison!\n");
printf(" FAILED\n");
error_count++;
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
} else {
if (FOPEN_FAIL(FOPEN(ref_fp, ref_file_path, "rb"))) {
printf(
"compareBin2Bin <unsigned int>"
" unable to open ref_file: %s\n",
ref_file_path);
error_count++;
}
if (src_fp && ref_fp) {
src_buffer = (unsigned int *)malloc(nelements * sizeof(unsigned int));
ref_buffer = (unsigned int *)malloc(nelements * sizeof(unsigned int));
fsize = fread(src_buffer, nelements, sizeof(unsigned int), src_fp);
fsize = fread(ref_buffer, nelements, sizeof(unsigned int), ref_fp);
printf(
"> compareBin2Bin <unsigned int> nelements=%d,"
" epsilon=%4.2f, threshold=%4.2f\n",
nelements, epsilon, threshold);
printf(" src_file <%s>, size=%d bytes\n", src_file,
static_cast<int>(fsize));
printf(" ref_file <%s>, size=%d bytes\n", ref_file_path,
static_cast<int>(fsize));
if (!compareData<unsigned int, float>(ref_buffer, src_buffer, nelements,
epsilon, threshold)) {
error_count++;
}
fclose(src_fp);
fclose(ref_fp);
free(src_buffer);
free(ref_buffer);
} else {
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
}
}
if (error_count == 0) {
printf(" OK\n");
} else {
printf(" FAILURE: %d errors...\n", (unsigned int)error_count);
}
return (error_count == 0); // returns true if all pixels pass
}
inline bool sdkCompareBin2BinFloat(const char *src_file, const char *ref_file,
unsigned int nelements, const float epsilon,
const float threshold, char *exec_path) {
float *src_buffer = NULL, *ref_buffer = NULL;
FILE *src_fp = NULL, *ref_fp = NULL;
size_t fsize = 0;
uint64_t error_count = 0;
if (FOPEN_FAIL(FOPEN(src_fp, src_file, "rb"))) {
printf("compareBin2Bin <float> unable to open src_file: %s\n", src_file);
error_count = 1;
}
char *ref_file_path = sdkFindFilePath(ref_file, exec_path);
if (ref_file_path == NULL) {
printf("compareBin2Bin <float> unable to find <%s> in <%s>\n", ref_file,
exec_path);
printf(">>> Check info.xml and [project//data] folder <%s> <<<\n",
exec_path);
printf("Aborting comparison!\n");
printf(" FAILED\n");
error_count++;
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
} else {
if (FOPEN_FAIL(FOPEN(ref_fp, ref_file_path, "rb"))) {
printf("compareBin2Bin <float> unable to open ref_file: %s\n",
ref_file_path);
error_count = 1;
}
if (src_fp && ref_fp) {
src_buffer = reinterpret_cast<float *>(malloc(nelements * sizeof(float)));
ref_buffer = reinterpret_cast<float *>(malloc(nelements * sizeof(float)));
printf(
"> compareBin2Bin <float> nelements=%d, epsilon=%4.2f,"
" threshold=%4.2f\n",
nelements, epsilon, threshold);
fsize = fread(src_buffer, sizeof(float), nelements, src_fp);
printf(" src_file <%s>, size=%d bytes\n", src_file,
static_cast<int>(fsize * sizeof(float)));
fsize = fread(ref_buffer, sizeof(float), nelements, ref_fp);
printf(" ref_file <%s>, size=%d bytes\n", ref_file_path,
static_cast<int>(fsize * sizeof(float)));
if (!compareDataAsFloatThreshold<float, float>(
ref_buffer, src_buffer, nelements, epsilon, threshold)) {
error_count++;
}
fclose(src_fp);
fclose(ref_fp);
free(src_buffer);
free(ref_buffer);
} else {
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
}
}
if (error_count == 0) {
printf(" OK\n");
} else {
printf(" FAILURE: %d errors...\n", (unsigned int)error_count);
}
return (error_count == 0); // returns true if all pixels pass
}
inline bool sdkCompareL2fe(const float *reference, const float *data,
const unsigned int len, const float epsilon) {
assert(epsilon >= 0);
float error = 0;
float ref = 0;
for (unsigned int i = 0; i < len; ++i) {
float diff = reference[i] - data[i];
error += diff * diff;
ref += reference[i] * reference[i];
}
float normRef = sqrtf(ref);
if (fabs(ref) < 1e-7) {
#ifdef _DEBUG
std::cerr << "ERROR, reference l2-norm is 0\n";
#endif
return false;
}
float normError = sqrtf(error);
error = normError / normRef;
bool result = error < epsilon;
#ifdef _DEBUG
if (!result) {
std::cerr << "ERROR, l2-norm error " << error << " is greater than epsilon "
<< epsilon << "\n";
}
#endif
return result;
}
inline bool sdkLoadPPMub(const char *file, unsigned char **data,
unsigned int *w, unsigned int *h) {
unsigned int channels;
return __loadPPM(file, data, w, h, &channels);
}
inline bool sdkLoadPPM4ub(const char *file, unsigned char **data,
unsigned int *w, unsigned int *h) {
unsigned char *idata = 0;
unsigned int channels;
if (__loadPPM(file, &idata, w, h, &channels)) {
// pad 4th component
int size = *w * *h;
// keep the original pointer
unsigned char *idata_orig = idata;
*data = (unsigned char *)malloc(sizeof(unsigned char) * size * 4);
unsigned char *ptr = *data;
for (int i = 0; i < size; i++) {
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = 0;
}
free(idata_orig);
return true;
} else {
free(idata);
return false;
}
}
inline bool sdkComparePPM(const char *src_file, const char *ref_file,
const float epsilon, const float threshold,
bool verboseErrors) {
unsigned char *src_data, *ref_data;
uint64_t error_count = 0;
unsigned int ref_width, ref_height;
unsigned int src_width, src_height;
if (src_file == NULL || ref_file == NULL) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: src_file or ref_file is NULL."
" Aborting comparison\n";
}
return false;
}
if (verboseErrors) {
std::cerr << "> Compare (a)rendered: <" << src_file << ">\n";
std::cerr << "> (b)reference: <" << ref_file << ">\n";
}
if (sdkLoadPPM4ub(ref_file, &ref_data, &ref_width, &ref_height) != true) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: unable to load ref image file: " << ref_file
<< "\n";
}
return false;
}
if (sdkLoadPPM4ub(src_file, &src_data, &src_width, &src_height) != true) {
std::cerr << "PPMvsPPM: unable to load src image file: " << src_file
<< "\n";
return false;
}
if (src_height != ref_height || src_width != ref_width) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: source and ref size mismatch (" << src_width
<< "," << src_height << ")vs(" << ref_width << "," << ref_height
<< ")\n";
}
}
if (verboseErrors) {
std::cerr << "PPMvsPPM: comparing images size (" << src_width << ","
<< src_height << ") epsilon(" << epsilon << "), threshold("
<< threshold * 100 << "%)\n";
}
if (compareData(ref_data, src_data, src_width * src_height * 4, epsilon,
threshold) == false) {
error_count = 1;
}
if (error_count == 0) {
if (verboseErrors) {
std::cerr << " OK\n\n";
}
} else {
if (verboseErrors) {
std::cerr << " FAILURE! " << error_count << " errors...\n\n";
}
}
// returns true if all pixels pass
return (error_count == 0) ? true : false;
}
inline bool sdkComparePGM(const char *src_file, const char *ref_file,
const float epsilon, const float threshold,
bool verboseErrors) {
unsigned char *src_data = 0, *ref_data = 0;
uint64_t error_count = 0;
unsigned int ref_width, ref_height;
unsigned int src_width, src_height;
if (src_file == NULL || ref_file == NULL) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: src_file or ref_file is NULL."
" Aborting comparison\n";
}
return false;
}
if (verboseErrors) {
std::cerr << "> Compare (a)rendered: <" << src_file << ">\n";
std::cerr << "> (b)reference: <" << ref_file << ">\n";
}
if (sdkLoadPPMub(ref_file, &ref_data, &ref_width, &ref_height) != true) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: unable to load ref image file: " << ref_file
<< "\n";
}
return false;
}
if (sdkLoadPPMub(src_file, &src_data, &src_width, &src_height) != true) {
std::cerr << "PGMvsPGM: unable to load src image file: " << src_file
<< "\n";
return false;
}
if (src_height != ref_height || src_width != ref_width) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: source and ref size mismatch (" << src_width
<< "," << src_height << ")vs(" << ref_width << "," << ref_height
<< ")\n";
}
}
if (verboseErrors)
std::cerr << "PGMvsPGM: comparing images size (" << src_width << ","
<< src_height << ") epsilon(" << epsilon << "), threshold("
<< threshold * 100 << "%)\n";
if (compareData(ref_data, src_data, src_width * src_height, epsilon,
threshold) == false) {
error_count = 1;
}
if (error_count == 0) {
if (verboseErrors) {
std::cerr << " OK\n\n";
}
} else {
if (verboseErrors) {
std::cerr << " FAILURE! " << error_count << " errors...\n\n";
}
}
// returns true if all pixels pass
return (error_count == 0) ? true : false;
}
#endif // COMMON_HELPER_IMAGE_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/01_dpct_output/Common/exception.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* CUda UTility Library */
#ifndef COMMON_EXCEPTION_H_
#define COMMON_EXCEPTION_H_
// includes, system
#include <stdlib.h>
#include <exception>
#include <iostream>
#include <stdexcept>
#include <string>
//! Exception wrapper.
//! @param Std_Exception Exception out of namespace std for easy typing.
template <class Std_Exception>
class Exception : public Std_Exception {
public:
//! @brief Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const char *detailed = "-");
//! Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const std::string &detailed);
//! Destructor
virtual ~Exception() throw();
private:
//! Constructor, default (private)
Exception();
//! Constructor, standard
//! @param str string returned by what()
explicit Exception(const std::string &str);
};
////////////////////////////////////////////////////////////////////////////////
//! Exception handler function for arbitrary exceptions
//! @param ex exception to handle
////////////////////////////////////////////////////////////////////////////////
template <class Exception_Typ>
inline void handleException(const Exception_Typ &ex) {
std::cerr << ex.what() << std::endl;
exit(EXIT_FAILURE);
}
//! Convenience macros
//! Exception caused by dynamic program behavior, e.g. file does not exist
#define RUNTIME_EXCEPTION(msg) \
Exception<std::runtime_error>::throw_it(__FILE__, __LINE__, msg)
//! Logic exception in program, e.g. an assert failed
#define LOGIC_EXCEPTION(msg) \
Exception<std::logic_error>::throw_it(__FILE__, __LINE__, msg)
//! Out of range exception
#define RANGE_EXCEPTION(msg) \
Exception<std::range_error>::throw_it(__FILE__, __LINE__, msg)
////////////////////////////////////////////////////////////////////////////////
//! Implementation
// includes, system
#include <sstream>
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const char *detailed) {
std::stringstream s;
// Quiet heavy-weight but exceptions are not for
// performance / release versions
s << "Exception in file '" << file << "' in line " << line << "\n"
<< "Detailed description: " << detailed << "\n";
throw Exception(s.str());
}
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const std::string &msg) {
throw_it(file, line, msg.c_str());
}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, default (private).
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception() : Std_Exception("Unknown Exception.\n") {}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, standard (private).
//! String returned by what().
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception(const std::string &s) : Std_Exception(s) {}
////////////////////////////////////////////////////////////////////////////////
//! Destructor
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::~Exception() throw() {}
// functions, exported
#endif // COMMON_EXCEPTION_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/01_dpct_output/Common/helper_cuda.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions for initialization and error checking
#ifndef COMMON_HELPER_CUDA_H_
#define COMMON_HELPER_CUDA_H_
#pragma once
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <helper_string.h>
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// Note, it is required that your SDK sample to include the proper header
// files, please refer the CUDA examples for examples of the needed CUDA
// headers, which may change depending on which CUDA functions are used.
// CUDA Runtime error messages
#ifdef __DPCT_HPP__
static const char *_cudaGetErrorEnum(dpct::err0 error) {
/*
DPCT1009:16: SYCL uses exceptions to report errors and does not use the error
codes. The original code was commented out and a warning string was inserted.
You need to rewrite this code.
*/
return "cudaGetErrorName is not supported" /*cudaGetErrorName(error)*/;
}
#endif
#ifdef CUDA_DRIVER_API
// CUDA Driver API errors
static const char *_cudaGetErrorEnum(CUresult error) {
static char unknown[] = "<unknown>";
const char *ret = NULL;
cuGetErrorName(error, &ret);
return ret ? ret : unknown;
}
#endif
#ifdef CUBLAS_API_H_
// cuBLAS API errors
static const char *_cudaGetErrorEnum(cublasStatus_t error) {
switch (error) {
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
case CUBLAS_STATUS_NOT_SUPPORTED:
return "CUBLAS_STATUS_NOT_SUPPORTED";
case CUBLAS_STATUS_LICENSE_ERROR:
return "CUBLAS_STATUS_LICENSE_ERROR";
}
return "<unknown>";
}
#endif
#ifdef _CUFFT_H_
// cuFFT API errors
static const char *_cudaGetErrorEnum(cufftResult error) {
switch (error) {
case CUFFT_SUCCESS:
return "CUFFT_SUCCESS";
case CUFFT_INVALID_PLAN:
return "CUFFT_INVALID_PLAN";
case CUFFT_ALLOC_FAILED:
return "CUFFT_ALLOC_FAILED";
case CUFFT_INVALID_TYPE:
return "CUFFT_INVALID_TYPE";
case CUFFT_INVALID_VALUE:
return "CUFFT_INVALID_VALUE";
case CUFFT_INTERNAL_ERROR:
return "CUFFT_INTERNAL_ERROR";
case CUFFT_EXEC_FAILED:
return "CUFFT_EXEC_FAILED";
case CUFFT_SETUP_FAILED:
return "CUFFT_SETUP_FAILED";
case CUFFT_INVALID_SIZE:
return "CUFFT_INVALID_SIZE";
case CUFFT_UNALIGNED_DATA:
return "CUFFT_UNALIGNED_DATA";
case CUFFT_INCOMPLETE_PARAMETER_LIST:
return "CUFFT_INCOMPLETE_PARAMETER_LIST";
case CUFFT_INVALID_DEVICE:
return "CUFFT_INVALID_DEVICE";
case CUFFT_PARSE_ERROR:
return "CUFFT_PARSE_ERROR";
case CUFFT_NO_WORKSPACE:
return "CUFFT_NO_WORKSPACE";
case CUFFT_NOT_IMPLEMENTED:
return "CUFFT_NOT_IMPLEMENTED";
case CUFFT_LICENSE_ERROR:
return "CUFFT_LICENSE_ERROR";
case CUFFT_NOT_SUPPORTED:
return "CUFFT_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSPARSEAPI
// cuSPARSE API errors
static const char *_cudaGetErrorEnum(cusparseStatus_t error) {
switch (error) {
case CUSPARSE_STATUS_SUCCESS:
return "CUSPARSE_STATUS_SUCCESS";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "CUSPARSE_STATUS_NOT_INITIALIZED";
case CUSPARSE_STATUS_ALLOC_FAILED:
return "CUSPARSE_STATUS_ALLOC_FAILED";
case CUSPARSE_STATUS_INVALID_VALUE:
return "CUSPARSE_STATUS_INVALID_VALUE";
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "CUSPARSE_STATUS_ARCH_MISMATCH";
case CUSPARSE_STATUS_MAPPING_ERROR:
return "CUSPARSE_STATUS_MAPPING_ERROR";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "CUSPARSE_STATUS_EXECUTION_FAILED";
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "CUSPARSE_STATUS_INTERNAL_ERROR";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSOLVER_COMMON_H_
// cuSOLVER API errors
static const char *_cudaGetErrorEnum(cusolverStatus_t error) {
switch (error) {
case CUSOLVER_STATUS_SUCCESS:
return "CUSOLVER_STATUS_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED:
return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED:
return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE:
return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH:
return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_MAPPING_ERROR:
return "CUSOLVER_STATUS_MAPPING_ERROR";
case CUSOLVER_STATUS_EXECUTION_FAILED:
return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR:
return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case CUSOLVER_STATUS_NOT_SUPPORTED:
return "CUSOLVER_STATUS_NOT_SUPPORTED ";
case CUSOLVER_STATUS_ZERO_PIVOT:
return "CUSOLVER_STATUS_ZERO_PIVOT";
case CUSOLVER_STATUS_INVALID_LICENSE:
return "CUSOLVER_STATUS_INVALID_LICENSE";
}
return "<unknown>";
}
#endif
#ifdef CURAND_H_
// cuRAND API errors
static const char *_cudaGetErrorEnum(int error) {
switch (error) {
case 0:
return "CURAND_STATUS_SUCCESS";
case 100:
return "CURAND_STATUS_VERSION_MISMATCH";
case 101:
return "CURAND_STATUS_NOT_INITIALIZED";
case 102:
return "CURAND_STATUS_ALLOCATION_FAILED";
case 103:
return "CURAND_STATUS_TYPE_ERROR";
case 104:
return "CURAND_STATUS_OUT_OF_RANGE";
case 105:
return "CURAND_STATUS_LENGTH_NOT_MULTIPLE";
case 106:
return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case 201:
return "CURAND_STATUS_LAUNCH_FAILURE";
case 202:
return "CURAND_STATUS_PREEXISTING_FAILURE";
case 203:
return "CURAND_STATUS_INITIALIZATION_FAILED";
case 204:
return "CURAND_STATUS_ARCH_MISMATCH";
case 999:
return "CURAND_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NVJPEGAPI
// nvJPEG API errors
static const char *_cudaGetErrorEnum(nvjpegStatus_t error) {
switch (error) {
case NVJPEG_STATUS_SUCCESS:
return "NVJPEG_STATUS_SUCCESS";
case NVJPEG_STATUS_NOT_INITIALIZED:
return "NVJPEG_STATUS_NOT_INITIALIZED";
case NVJPEG_STATUS_INVALID_PARAMETER:
return "NVJPEG_STATUS_INVALID_PARAMETER";
case NVJPEG_STATUS_BAD_JPEG:
return "NVJPEG_STATUS_BAD_JPEG";
case NVJPEG_STATUS_JPEG_NOT_SUPPORTED:
return "NVJPEG_STATUS_JPEG_NOT_SUPPORTED";
case NVJPEG_STATUS_ALLOCATOR_FAILURE:
return "NVJPEG_STATUS_ALLOCATOR_FAILURE";
case NVJPEG_STATUS_EXECUTION_FAILED:
return "NVJPEG_STATUS_EXECUTION_FAILED";
case NVJPEG_STATUS_ARCH_MISMATCH:
return "NVJPEG_STATUS_ARCH_MISMATCH";
case NVJPEG_STATUS_INTERNAL_ERROR:
return "NVJPEG_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NV_NPPIDEFS_H
// NPP API errors
static const char *_cudaGetErrorEnum(NppStatus error) {
switch (error) {
case NPP_NOT_SUPPORTED_MODE_ERROR:
return "NPP_NOT_SUPPORTED_MODE_ERROR";
case NPP_ROUND_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ROUND_MODE_NOT_SUPPORTED_ERROR";
case NPP_RESIZE_NO_OPERATION_ERROR:
return "NPP_RESIZE_NO_OPERATION_ERROR";
case NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY:
return "NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_BAD_ARG_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFF_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECT_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUAD_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEM_ALLOC_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTO_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_INPUT:
return "NPP_INVALID_INPUT";
case NPP_POINTER_ERROR:
return "NPP_POINTER_ERROR";
case NPP_WARNING:
return "NPP_WARNING";
case NPP_ODD_ROI_WARNING:
return "NPP_ODD_ROI_WARNING";
#else
// These are for CUDA 5.5 or higher
case NPP_BAD_ARGUMENT_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFFICIENT_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECTANGLE_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUADRANGLE_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEMORY_ALLOCATION_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_HOST_POINTER_ERROR:
return "NPP_INVALID_HOST_POINTER_ERROR";
case NPP_INVALID_DEVICE_POINTER_ERROR:
return "NPP_INVALID_DEVICE_POINTER_ERROR";
#endif
case NPP_LUT_NUMBER_OF_LEVELS_ERROR:
return "NPP_LUT_NUMBER_OF_LEVELS_ERROR";
case NPP_TEXTURE_BIND_ERROR:
return "NPP_TEXTURE_BIND_ERROR";
case NPP_WRONG_INTERSECTION_ROI_ERROR:
return "NPP_WRONG_INTERSECTION_ROI_ERROR";
case NPP_NOT_EVEN_STEP_ERROR:
return "NPP_NOT_EVEN_STEP_ERROR";
case NPP_INTERPOLATION_ERROR:
return "NPP_INTERPOLATION_ERROR";
case NPP_RESIZE_FACTOR_ERROR:
return "NPP_RESIZE_FACTOR_ERROR";
case NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR:
return "NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_MEMFREE_ERR:
return "NPP_MEMFREE_ERR";
case NPP_MEMSET_ERR:
return "NPP_MEMSET_ERR";
case NPP_MEMCPY_ERR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERR:
return "NPP_MIRROR_FLIP_ERR";
#else
case NPP_MEMFREE_ERROR:
return "NPP_MEMFREE_ERROR";
case NPP_MEMSET_ERROR:
return "NPP_MEMSET_ERROR";
case NPP_MEMCPY_ERROR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERROR:
return "NPP_MIRROR_FLIP_ERROR";
#endif
case NPP_ALIGNMENT_ERROR:
return "NPP_ALIGNMENT_ERROR";
case NPP_STEP_ERROR:
return "NPP_STEP_ERROR";
case NPP_SIZE_ERROR:
return "NPP_SIZE_ERROR";
case NPP_NULL_POINTER_ERROR:
return "NPP_NULL_POINTER_ERROR";
case NPP_CUDA_KERNEL_EXECUTION_ERROR:
return "NPP_CUDA_KERNEL_EXECUTION_ERROR";
case NPP_NOT_IMPLEMENTED_ERROR:
return "NPP_NOT_IMPLEMENTED_ERROR";
case NPP_ERROR:
return "NPP_ERROR";
case NPP_SUCCESS:
return "NPP_SUCCESS";
case NPP_WRONG_INTERSECTION_QUAD_WARNING:
return "NPP_WRONG_INTERSECTION_QUAD_WARNING";
case NPP_MISALIGNED_DST_ROI_WARNING:
return "NPP_MISALIGNED_DST_ROI_WARNING";
case NPP_AFFINE_QUAD_INCORRECT_WARNING:
return "NPP_AFFINE_QUAD_INCORRECT_WARNING";
case NPP_DOUBLE_SIZE_WARNING:
return "NPP_DOUBLE_SIZE_WARNING";
case NPP_WRONG_INTERSECTION_ROI_WARNING:
return "NPP_WRONG_INTERSECTION_ROI_WARNING";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x6000
/* These are 6.0 or higher */
case NPP_LUT_PALETTE_BITSIZE_ERROR:
return "NPP_LUT_PALETTE_BITSIZE_ERROR";
case NPP_ZC_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ZC_MODE_NOT_SUPPORTED_ERROR";
case NPP_QUALITY_INDEX_ERROR:
return "NPP_QUALITY_INDEX_ERROR";
case NPP_CHANNEL_ORDER_ERROR:
return "NPP_CHANNEL_ORDER_ERROR";
case NPP_ZERO_MASK_VALUE_ERROR:
return "NPP_ZERO_MASK_VALUE_ERROR";
case NPP_NUMBER_OF_CHANNELS_ERROR:
return "NPP_NUMBER_OF_CHANNELS_ERROR";
case NPP_COI_ERROR:
return "NPP_COI_ERROR";
case NPP_DIVISOR_ERROR:
return "NPP_DIVISOR_ERROR";
case NPP_CHANNEL_ERROR:
return "NPP_CHANNEL_ERROR";
case NPP_STRIDE_ERROR:
return "NPP_STRIDE_ERROR";
case NPP_ANCHOR_ERROR:
return "NPP_ANCHOR_ERROR";
case NPP_MASK_SIZE_ERROR:
return "NPP_MASK_SIZE_ERROR";
case NPP_MOMENT_00_ZERO_ERROR:
return "NPP_MOMENT_00_ZERO_ERROR";
case NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR:
return "NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR";
case NPP_THRESHOLD_ERROR:
return "NPP_THRESHOLD_ERROR";
case NPP_CONTEXT_MATCH_ERROR:
return "NPP_CONTEXT_MATCH_ERROR";
case NPP_FFT_FLAG_ERROR:
return "NPP_FFT_FLAG_ERROR";
case NPP_FFT_ORDER_ERROR:
return "NPP_FFT_ORDER_ERROR";
case NPP_SCALE_RANGE_ERROR:
return "NPP_SCALE_RANGE_ERROR";
case NPP_DATA_TYPE_ERROR:
return "NPP_DATA_TYPE_ERROR";
case NPP_OUT_OFF_RANGE_ERROR:
return "NPP_OUT_OFF_RANGE_ERROR";
case NPP_DIVIDE_BY_ZERO_ERROR:
return "NPP_DIVIDE_BY_ZERO_ERROR";
case NPP_RANGE_ERROR:
return "NPP_RANGE_ERROR";
case NPP_NO_MEMORY_ERROR:
return "NPP_NO_MEMORY_ERROR";
case NPP_ERROR_RESERVED:
return "NPP_ERROR_RESERVED";
case NPP_NO_OPERATION_WARNING:
return "NPP_NO_OPERATION_WARNING";
case NPP_DIVIDE_BY_ZERO_WARNING:
return "NPP_DIVIDE_BY_ZERO_WARNING";
#endif
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x7000
/* These are 7.0 or higher */
case NPP_OVERFLOW_ERROR:
return "NPP_OVERFLOW_ERROR";
case NPP_CORRUPTED_DATA_ERROR:
return "NPP_CORRUPTED_DATA_ERROR";
#endif
}
return "<unknown>";
}
#endif
template <typename T>
void check(T result, char const *const func, const char *const file,
int const line) {
}
#ifdef __DPCT_HPP__
// This will output the proper CUDA error strings in the event
// that a CUDA host call returns an error
#define checkCudaErrors(val) check((val), #val, __FILE__, __LINE__)
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError(msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:17: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
// This will only print the proper error string when calling cudaGetLastError
// but not exit program incase error detected.
#define printLastCudaError(msg) __printLastCudaError(msg, __FILE__, __LINE__)
inline void __printLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:19: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
#endif
#ifndef MAX
#define MAX(a, b) (a > b ? a : b)
#endif
// Float To Int conversion
inline int ftoi(float value) {
return (value >= 0 ? static_cast<int>(value + 0.5)
: static_cast<int>(value - 0.5));
}
// Beginning of GPU Architecture definitions
inline int _ConvertSMVer2Cores(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the # of cores per SM
typedef struct dpct_type_113531 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{0x30, 192},
{0x32, 192},
{0x35, 192},
{0x37, 192},
{0x50, 128},
{0x52, 128},
{0x53, 128},
{0x60, 64},
{0x61, 128},
{0x62, 128},
{0x70, 64},
{0x72, 64},
{0x75, 64},
{0x80, 64},
{0x86, 128},
{0x87, 128},
{0x89, 128},
{0x90, 128},
{-1, -1}};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoCores for SM %d.%d is undefined."
" Default to use %d Cores/SM\n",
major, minor, nGpuArchCoresPerSM[index - 1].Cores);
return nGpuArchCoresPerSM[index - 1].Cores;
}
inline const char* _ConvertSMVer2ArchName(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the GPU Arch name)
typedef struct dpct_type_281558 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
const char* name;
} sSMtoArchName;
sSMtoArchName nGpuArchNameSM[] = {
{0x30, "Kepler"},
{0x32, "Kepler"},
{0x35, "Kepler"},
{0x37, "Kepler"},
{0x50, "Maxwell"},
{0x52, "Maxwell"},
{0x53, "Maxwell"},
{0x60, "Pascal"},
{0x61, "Pascal"},
{0x62, "Pascal"},
{0x70, "Volta"},
{0x72, "Xavier"},
{0x75, "Turing"},
{0x80, "Ampere"},
{0x86, "Ampere"},
{0x87, "Ampere"},
{0x89, "Ada"},
{0x90, "Hopper"},
{-1, "Graphics Device"}};
int index = 0;
while (nGpuArchNameSM[index].SM != -1) {
if (nGpuArchNameSM[index].SM == ((major << 4) + minor)) {
return nGpuArchNameSM[index].name;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoArchName for SM %d.%d is undefined."
" Default to use %s\n",
major, minor, nGpuArchNameSM[index - 1].name);
return nGpuArchNameSM[index - 1].name;
}
// end of GPU Architecture definitions
#ifdef __DPCT_HPP__
// General GPU Device CUDA Initialization
inline int gpuDeviceInit(int devID) {
int device_count;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr,
"gpuDeviceInit() CUDA error: "
"no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
if (devID < 0) {
devID = 0;
}
if (devID > device_count - 1) {
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n",
device_count);
fprintf(stderr,
">> gpuDeviceInit (-device=%d) is not a valid"
" GPU device. <<\n",
devID);
fprintf(stderr, "\n");
return -devID;
}
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:21: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(DPCT_CHECK_ERROR(
major = dpct::dev_mgr::instance().get_device(devID).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor = dpct::dev_mgr::instance().get_device(devID).get_minor_version()));
/*
DPCT1035:22: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
if (computeMode == 0) {
fprintf(stderr,
"Error: device is running in <Compute Mode "
"Prohibited>, no threads can use cudaSetDevice().\n");
return -1;
}
if (major < 1) {
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(EXIT_FAILURE);
}
/*
DPCT1093:23: The "devID" device may be not the one intended for use. Adjust
the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID)));
printf("gpuDeviceInit() CUDA Device [%d]: \"%s\n", devID, _ConvertSMVer2ArchName(major, minor));
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
inline int gpuGetMaxGflopsDeviceId() try {
int current_device = 0, sm_per_multiproc = 0;
int max_perf_device = 0;
int device_count = 0;
int devices_prohibited = 0;
uint64_t max_compute_perf = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the best CUDA capable GPU device
current_device = 0;
while (current_device < device_count) {
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:24: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version()));
// If this GPU is not running on Compute Mode prohibited,
// then we can add it to the list
/*
DPCT1035:25: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (computeMode != 0) {
if (major == 9999 && minor == 9999) {
sm_per_multiproc = 1;
} else {
sm_per_multiproc =
_ConvertSMVer2Cores(major, minor);
}
int multiProcessorCount = 0, clockRate = 0;
checkCudaErrors(
DPCT_CHECK_ERROR(multiProcessorCount = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_compute_units()));
dpct::err0 result =
DPCT_CHECK_ERROR(clockRate = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_clock_frequency());
uint64_t compute_perf = (uint64_t)multiProcessorCount * sm_per_multiproc * clockRate;
if (compute_perf > max_compute_perf) {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
} else {
devices_prohibited++;
}
++current_device;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" all devices have compute mode prohibited.\n");
exit(EXIT_FAILURE);
}
return max_perf_device;
}
catch (sycl::exception const &exc) {
std::cerr << exc.what() << "Exception caught at file:" << __FILE__
<< ", line:" << __LINE__ << std::endl;
std::exit(1);
}
// Initialization code to find the best CUDA Device
inline int findCudaDevice(int argc, const char **argv) {
int devID = 0;
// If the command-line has a device number specified, use it
if (checkCmdLineFlag(argc, argv, "device")) {
devID = getCmdLineArgumentInt(argc, argv, "device=");
if (devID < 0) {
printf("Invalid command line parameter\n ");
exit(EXIT_FAILURE);
} else {
devID = gpuDeviceInit(devID);
if (devID < 0) {
printf("exiting...\n");
exit(EXIT_FAILURE);
}
}
} else {
// Otherwise pick the device with highest Gflops/s
devID = gpuGetMaxGflopsDeviceId();
/*
DPCT1093:26: The "devID" device may be not the one intended for use. Adjust
the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID)));
int major = 0, minor = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
major =
dpct::dev_mgr::instance().get_device(devID).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor =
dpct::dev_mgr::instance().get_device(devID).get_minor_version()));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
devID, _ConvertSMVer2ArchName(major, minor), major, minor);
}
return devID;
}
inline int findIntegratedGPU() {
int current_device = 0;
int device_count = 0;
int devices_prohibited = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr, "CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the integrated GPU which is compute capable
while (current_device < device_count) {
int computeMode = -1, integrated = -1;
/*
DPCT1035:27: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(
DPCT_CHECK_ERROR(integrated = dpct::dev_mgr::instance()
.get_device(current_device)
.get_integrated()));
// If GPU is integrated and is not running on Compute Mode prohibited,
// then cuda can map to GLES resource
/*
DPCT1035:28: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (integrated && (computeMode != 0)) {
/*
DPCT1093:29: The "current_device" device may be not the one intended for
use. Adjust the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(current_device)));
int major = 0, minor = 0;
checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version()));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
current_device, _ConvertSMVer2ArchName(major, minor), major, minor);
return current_device;
} else {
devices_prohibited++;
}
current_device++;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"CUDA error:"
" No GLES-CUDA Interop capable GPU found.\n");
exit(EXIT_FAILURE);
}
return -1;
}
// General check for CUDA GPU SM Capabilities
inline bool checkCudaCapabilities(int major_version, int minor_version) {
int dev;
int major = 0, minor = 0;
checkCudaErrors(dev = dpct::dev_mgr::instance().current_device_id());
checkCudaErrors(DPCT_CHECK_ERROR(
major = dpct::dev_mgr::instance().get_device(dev).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor = dpct::dev_mgr::instance().get_device(dev).get_minor_version()));
if ((major > major_version) ||
(major == major_version &&
minor >= minor_version)) {
printf(" Device %d: <%16s >, Compute SM %d.%d detected\n", dev,
_ConvertSMVer2ArchName(major, minor), major, minor);
return true;
} else {
printf(
" No GPU device was found that can support "
"CUDA compute capability %d.%d.\n",
major_version, minor_version);
return false;
}
}
#endif
// end of CUDA Helper Functions
#endif // COMMON_HELPER_CUDA_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/01_dpct_output/Common/helper_functions.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (string parsing,
// timers, image helpers, etc)
#ifndef COMMON_HELPER_FUNCTIONS_H_
#define COMMON_HELPER_FUNCTIONS_H_
#ifdef WIN32
#pragma warning(disable : 4996)
#endif
// includes, project
#include <assert.h>
#include <exception.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
// includes, timer, string parsing, image helpers
#include <helper_image.h> // helper functions for image compare, dump, data comparisons
#include <helper_string.h> // helper functions for string parsing
#include <helper_timer.h> // helper functions for timers
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
#endif // COMMON_HELPER_FUNCTIONS_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/01_dpct_output/Samples/5_Domain_Specific/HSOpticalFlow/addKernel.dp.hpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include "common.h"
///////////////////////////////////////////////////////////////////////////////
/// \brief add two vectors of size _count_
///
/// CUDA kernel
/// \param[in] op1 term one
/// \param[in] op2 term two
/// \param[in] count vector size
/// \param[out] sum result
///////////////////////////////////////////////////////////////////////////////
void AddKernel(const float *op1, const float *op2, int count,
float *sum, const sycl::nd_item<3> &item_ct1) {
const int pos = item_ct1.get_local_id(2) +
item_ct1.get_group(2) * item_ct1.get_local_range(2);
if (pos >= count) return;
sum[pos] = op1[pos] + op2[pos];
}
///////////////////////////////////////////////////////////////////////////////
/// \brief add two vectors of size _count_
/// \param[in] op1 term one
/// \param[in] op2 term two
/// \param[in] count vector size
/// \param[out] sum result
///////////////////////////////////////////////////////////////////////////////
static void Add(const float *op1, const float *op2, int count, float *sum) {
sycl::range<3> threads(1, 1, 256);
sycl::range<3> blocks(1, 1, iDivUp(count, threads[2]));
/*
DPCT1049:15: The work-group size passed to the SYCL kernel may exceed the
limit. To get the device limit, query info::device::max_work_group_size.
Adjust the work-group size if needed.
*/
dpct::get_default_queue().parallel_for(
sycl::nd_range<3>(blocks * threads, threads),
[=](sycl::nd_item<3> item_ct1) {
AddKernel(op1, op2, count, sum, item_ct1);
});
}
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/01_dpct_output/Samples/5_Domain_Specific/HSOpticalFlow/flowGold.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef FLOW_GOLD_H
#define FLOW_GOLD_H
void ComputeFlowGold(
const float *I0, // source frame
const float *I1, // tracked frame
int width, // frame width
int height, // frame height
int stride, // row access stride
float alpha, // smoothness coefficient
int nLevels, // number of levels in pyramid
int nWarpIters, // number of warping iterations per pyramid level
int nIters, // number of solver iterations (for linear system)
float *u, // output horizontal flow
float *v); // output vertical flow
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/01_dpct_output/Samples/5_Domain_Specific/HSOpticalFlow/common.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
///////////////////////////////////////////////////////////////////////////////
// Header for common includes and utility functions
///////////////////////////////////////////////////////////////////////////////
#ifndef COMMON_H
#define COMMON_H
///////////////////////////////////////////////////////////////////////////////
// Common includes
///////////////////////////////////////////////////////////////////////////////
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <memory.h>
#include <math.h>
#include <helper_cuda.h>
///////////////////////////////////////////////////////////////////////////////
// Common constants
///////////////////////////////////////////////////////////////////////////////
const int StrideAlignment = 32;
///////////////////////////////////////////////////////////////////////////////
// Common functions
///////////////////////////////////////////////////////////////////////////////
// Align up n to the nearest multiple of m
inline int iAlignUp(int n, int m = StrideAlignment) {
int mod = n % m;
if (mod)
return n + m - mod;
else
return n;
}
// round up n/m
inline int iDivUp(int n, int m) { return (n + m - 1) / m; }
// swap two values
template <typename T>
inline void Swap(T &a, T &b) {
T t = a;
a = b;
b = t;
}
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/01_dpct_output/Samples/5_Domain_Specific/HSOpticalFlow/downscaleKernel.dp.hpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include "common.h"
///////////////////////////////////////////////////////////////////////////////
/// \brief downscale image
///
/// CUDA kernel, relies heavily on texture unit
/// \param[in] width image width
/// \param[in] height image height
/// \param[in] stride image stride
/// \param[out] out result
///////////////////////////////////////////////////////////////////////////////
void DownscaleKernel(int width, int height, int stride, float *out,
dpct::image_accessor_ext<float, 2> texFine,
const sycl::nd_item<3> &item_ct1) {
const int ix = item_ct1.get_local_id(2) +
item_ct1.get_group(2) * item_ct1.get_local_range(2);
const int iy = item_ct1.get_local_id(1) +
item_ct1.get_group(1) * item_ct1.get_local_range(1);
if (ix >= width || iy >= height) {
return;
}
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
out[ix + iy * stride] =
0.25f *
(texFine.read(x - dx * 0.25f, y) + texFine.read(x + dx * 0.25f, y) +
texFine.read(x, y - dy * 0.25f) + texFine.read(x, y + dy * 0.25f));
}
///////////////////////////////////////////////////////////////////////////////
/// \brief downscale image
///
/// \param[in] src image to downscale
/// \param[in] width image width
/// \param[in] height image height
/// \param[in] stride image stride
/// \param[out] out result
///////////////////////////////////////////////////////////////////////////////
static void Downscale(const float *src, int width, int height, int stride,
int newWidth, int newHeight, int newStride, float *out) {
sycl::range<3> threads(1, 8, 32);
sycl::range<3> blocks(1, iDivUp(newHeight, threads[1]),
iDivUp(newWidth, threads[2]));
dpct::image_wrapper_base_p texFine;
dpct::image_data texRes;
memset(&texRes, 0, sizeof(dpct::image_data));
texRes.set_data_type(dpct::image_data_type::pitch);
texRes.set_data_ptr((void *)src);
/*
DPCT1059:1: SYCL only supports 4-channel image format. Adjust the code.
*/
texRes.set_channel(dpct::image_channel::create<float>());
texRes.set_x(width);
texRes.set_y(height);
texRes.set_pitch(stride * sizeof(float));
dpct::sampling_info texDescr;
memset(&texDescr, 0, sizeof(dpct::sampling_info));
texDescr.set(sycl::addressing_mode::mirrored_repeat,
sycl::filtering_mode::linear,
sycl::coordinate_normalization_mode::normalized);
/*
DPCT1007:2: Migration of cudaTextureDesc::readMode is not supported.
*/
texDescr.readMode = cudaReadModeElementType;
checkCudaErrors(
DPCT_CHECK_ERROR(texFine = dpct::create_image_wrapper(texRes, texDescr)));
/*
DPCT1049:0: The work-group size passed to the SYCL kernel may exceed the
limit. To get the device limit, query info::device::max_work_group_size.
Adjust the work-group size if needed.
*/
dpct::get_default_queue().submit([&](sycl::handler &cgh) {
auto texFine_acc =
static_cast<dpct::image_wrapper<float, 2> *>(texFine)->get_access(cgh);
auto texFine_smpl = texFine->get_sampler();
cgh.parallel_for(sycl::nd_range<3>(blocks * threads, threads),
[=](sycl::nd_item<3> item_ct1) {
DownscaleKernel(newWidth, newHeight, newStride, out,
dpct::image_accessor_ext<float, 2>(
texFine_smpl, texFine_acc),
item_ct1);
});
});
}
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/01_dpct_output/Samples/5_Domain_Specific/HSOpticalFlow/flowCUDA.dp.cpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include "common.h"
// include kernels
#include "downscaleKernel.dp.hpp"
#include "upscaleKernel.dp.hpp"
#include "warpingKernel.dp.hpp"
#include "derivativesKernel.dp.hpp"
#include "solverKernel.dp.hpp"
#include "addKernel.dp.hpp"
///////////////////////////////////////////////////////////////////////////////
/// \brief method logic
///
/// handles memory allocations, control flow
/// \param[in] I0 source image
/// \param[in] I1 tracked image
/// \param[in] width images width
/// \param[in] height images height
/// \param[in] stride images stride
/// \param[in] alpha degree of displacement field smoothness
/// \param[in] nLevels number of levels in a pyramid
/// \param[in] nWarpIters number of warping iterations per pyramid level
/// \param[in] nSolverIters number of solver iterations (Jacobi iterations)
/// \param[out] u horizontal displacement
/// \param[out] v vertical displacement
///////////////////////////////////////////////////////////////////////////////
void ComputeFlowCUDA(const float *I0, const float *I1, int width, int height,
int stride, float alpha, int nLevels, int nWarpIters,
int nSolverIters, float *u, float *v) {
printf("Computing optical flow on GPU...\n");
// pI0 and pI1 will hold device pointers
const float **pI0 = new const float *[nLevels];
const float **pI1 = new const float *[nLevels];
int *pW = new int[nLevels];
int *pH = new int[nLevels];
int *pS = new int[nLevels];
// device memory pointers
float *d_tmp;
float *d_du0;
float *d_dv0;
float *d_du1;
float *d_dv1;
float *d_Ix;
float *d_Iy;
float *d_Iz;
float *d_u;
float *d_v;
float *d_nu;
float *d_nv;
const int dataSize = stride * height * sizeof(float);
checkCudaErrors(DPCT_CHECK_ERROR(d_tmp = (float *)sycl::malloc_device(
dataSize, dpct::get_default_queue())));
checkCudaErrors(DPCT_CHECK_ERROR(d_du0 = (float *)sycl::malloc_device(
dataSize, dpct::get_default_queue())));
checkCudaErrors(DPCT_CHECK_ERROR(d_dv0 = (float *)sycl::malloc_device(
dataSize, dpct::get_default_queue())));
checkCudaErrors(DPCT_CHECK_ERROR(d_du1 = (float *)sycl::malloc_device(
dataSize, dpct::get_default_queue())));
checkCudaErrors(DPCT_CHECK_ERROR(d_dv1 = (float *)sycl::malloc_device(
dataSize, dpct::get_default_queue())));
checkCudaErrors(DPCT_CHECK_ERROR(d_Ix = (float *)sycl::malloc_device(
dataSize, dpct::get_default_queue())));
checkCudaErrors(DPCT_CHECK_ERROR(d_Iy = (float *)sycl::malloc_device(
dataSize, dpct::get_default_queue())));
checkCudaErrors(DPCT_CHECK_ERROR(d_Iz = (float *)sycl::malloc_device(
dataSize, dpct::get_default_queue())));
checkCudaErrors(DPCT_CHECK_ERROR(
d_u = (float *)sycl::malloc_device(dataSize, dpct::get_default_queue())));
checkCudaErrors(DPCT_CHECK_ERROR(
d_v = (float *)sycl::malloc_device(dataSize, dpct::get_default_queue())));
checkCudaErrors(DPCT_CHECK_ERROR(d_nu = (float *)sycl::malloc_device(
dataSize, dpct::get_default_queue())));
checkCudaErrors(DPCT_CHECK_ERROR(d_nv = (float *)sycl::malloc_device(
dataSize, dpct::get_default_queue())));
// prepare pyramid
int currentLevel = nLevels - 1;
// allocate GPU memory for input images
checkCudaErrors(DPCT_CHECK_ERROR(
*(pI0 + currentLevel) = (const float *)sycl::malloc_device(
dataSize, dpct::get_default_queue())));
checkCudaErrors(DPCT_CHECK_ERROR(
*(pI1 + currentLevel) = (const float *)sycl::malloc_device(
dataSize, dpct::get_default_queue())));
checkCudaErrors(
DPCT_CHECK_ERROR(dpct::get_default_queue()
.memcpy((void *)pI0[currentLevel], I0, dataSize)
.wait()));
checkCudaErrors(
DPCT_CHECK_ERROR(dpct::get_default_queue()
.memcpy((void *)pI1[currentLevel], I1, dataSize)
.wait()));
pW[currentLevel] = width;
pH[currentLevel] = height;
pS[currentLevel] = stride;
for (; currentLevel > 0; --currentLevel) {
int nw = pW[currentLevel] / 2;
int nh = pH[currentLevel] / 2;
int ns = iAlignUp(nw);
checkCudaErrors(DPCT_CHECK_ERROR(
*(pI0 + currentLevel - 1) = (const float *)sycl::malloc_device(
ns * nh * sizeof(float), dpct::get_default_queue())));
checkCudaErrors(DPCT_CHECK_ERROR(
*(pI1 + currentLevel - 1) = (const float *)sycl::malloc_device(
ns * nh * sizeof(float), dpct::get_default_queue())));
Downscale(pI0[currentLevel], pW[currentLevel], pH[currentLevel],
pS[currentLevel], nw, nh, ns, (float *)pI0[currentLevel - 1]);
Downscale(pI1[currentLevel], pW[currentLevel], pH[currentLevel],
pS[currentLevel], nw, nh, ns, (float *)pI1[currentLevel - 1]);
pW[currentLevel - 1] = nw;
pH[currentLevel - 1] = nh;
pS[currentLevel - 1] = ns;
}
checkCudaErrors(
DPCT_CHECK_ERROR(dpct::get_default_queue()
.memset(d_u, 0, stride * height * sizeof(float))
.wait()));
checkCudaErrors(
DPCT_CHECK_ERROR(dpct::get_default_queue()
.memset(d_v, 0, stride * height * sizeof(float))
.wait()));
// compute flow
for (; currentLevel < nLevels; ++currentLevel) {
for (int warpIter = 0; warpIter < nWarpIters; ++warpIter) {
checkCudaErrors(DPCT_CHECK_ERROR(
dpct::get_default_queue().memset(d_du0, 0, dataSize).wait()));
checkCudaErrors(DPCT_CHECK_ERROR(
dpct::get_default_queue().memset(d_dv0, 0, dataSize).wait()));
checkCudaErrors(DPCT_CHECK_ERROR(
dpct::get_default_queue().memset(d_du1, 0, dataSize).wait()));
checkCudaErrors(DPCT_CHECK_ERROR(
dpct::get_default_queue().memset(d_dv1, 0, dataSize).wait()));
// on current level we compute optical flow
// between frame 0 and warped frame 1
WarpImage(pI1[currentLevel], pW[currentLevel], pH[currentLevel],
pS[currentLevel], d_u, d_v, d_tmp);
ComputeDerivatives(pI0[currentLevel], d_tmp, pW[currentLevel],
pH[currentLevel], pS[currentLevel], d_Ix, d_Iy, d_Iz);
for (int iter = 0; iter < nSolverIters; ++iter) {
SolveForUpdate(d_du0, d_dv0, d_Ix, d_Iy, d_Iz, pW[currentLevel],
pH[currentLevel], pS[currentLevel], alpha, d_du1, d_dv1);
Swap(d_du0, d_du1);
Swap(d_dv0, d_dv1);
}
// update u, v
Add(d_u, d_du0, pH[currentLevel] * pS[currentLevel], d_u);
Add(d_v, d_dv0, pH[currentLevel] * pS[currentLevel], d_v);
}
if (currentLevel != nLevels - 1) {
// prolongate solution
float scaleX = (float)pW[currentLevel + 1] / (float)pW[currentLevel];
Upscale(d_u, pW[currentLevel], pH[currentLevel], pS[currentLevel],
pW[currentLevel + 1], pH[currentLevel + 1], pS[currentLevel + 1],
scaleX, d_nu);
float scaleY = (float)pH[currentLevel + 1] / (float)pH[currentLevel];
Upscale(d_v, pW[currentLevel], pH[currentLevel], pS[currentLevel],
pW[currentLevel + 1], pH[currentLevel + 1], pS[currentLevel + 1],
scaleY, d_nv);
Swap(d_u, d_nu);
Swap(d_v, d_nv);
}
}
checkCudaErrors(DPCT_CHECK_ERROR(
dpct::get_default_queue().memcpy(u, d_u, dataSize).wait()));
checkCudaErrors(DPCT_CHECK_ERROR(
dpct::get_default_queue().memcpy(v, d_v, dataSize).wait()));
// cleanup
for (int i = 0; i < nLevels; ++i) {
checkCudaErrors(DPCT_CHECK_ERROR(
sycl::free((void *)pI0[i], dpct::get_default_queue())));
checkCudaErrors(DPCT_CHECK_ERROR(
sycl::free((void *)pI1[i], dpct::get_default_queue())));
}
delete[] pI0;
delete[] pI1;
delete[] pW;
delete[] pH;
delete[] pS;
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_tmp, dpct::get_default_queue())));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_du0, dpct::get_default_queue())));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_dv0, dpct::get_default_queue())));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_du1, dpct::get_default_queue())));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_dv1, dpct::get_default_queue())));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_Ix, dpct::get_default_queue())));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_Iy, dpct::get_default_queue())));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_Iz, dpct::get_default_queue())));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_nu, dpct::get_default_queue())));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_nv, dpct::get_default_queue())));
checkCudaErrors(DPCT_CHECK_ERROR(sycl::free(d_u, dpct::get_default_queue())));
checkCudaErrors(DPCT_CHECK_ERROR(sycl::free(d_v, dpct::get_default_queue())));
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/01_dpct_output/Samples/5_Domain_Specific/HSOpticalFlow/main.cpp.dp.cpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
const static char *const sSDKsample = "HSOpticalFlow";
// CPU-GPU discrepancy threshold for self-test
const float THRESHOLD = 0.05f;
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include "common.h"
#include "flowGold.h"
#include "flowCUDA.h"
#include <helper_functions.h>
#include <cmath>
///////////////////////////////////////////////////////////////////////////////
/// \brief save optical flow in format described on vision.middlebury.edu/flow
/// \param[in] name output file name
/// \param[in] w optical flow field width
/// \param[in] h optical flow field height
/// \param[in] s optical flow field row stride
/// \param[in] u horizontal displacement
/// \param[in] v vertical displacement
///////////////////////////////////////////////////////////////////////////////
void WriteFloFile(const char *name, int w, int h, int s, const float *u,
const float *v) {
FILE *stream;
stream = fopen(name, "wb");
if (stream == 0) {
printf("Could not save flow to \"%s\"\n", name);
return;
}
float data = 202021.25f;
fwrite(&data, sizeof(float), 1, stream);
fwrite(&w, sizeof(w), 1, stream);
fwrite(&h, sizeof(h), 1, stream);
for (int i = 0; i < h; ++i) {
for (int j = 0; j < w; ++j) {
const int pos = j + i * s;
fwrite(u + pos, sizeof(float), 1, stream);
fwrite(v + pos, sizeof(float), 1, stream);
}
}
fclose(stream);
}
///////////////////////////////////////////////////////////////////////////////
/// \brief
/// load 4-channel unsigned byte image
/// and convert it to single channel FP32 image
/// \param[out] img_data pointer to raw image data
/// \param[out] img_w image width
/// \param[out] img_h image height
/// \param[out] img_s image row stride
/// \param[in] name image file name
/// \param[in] exePath executable file path
/// \return true if image is successfully loaded or false otherwise
///////////////////////////////////////////////////////////////////////////////
bool LoadImageAsFP32(float *&img_data, int &img_w, int &img_h, int &img_s,
const char *name, const char *exePath) {
printf("Loading \"%s\" ...\n", name);
char *name_ = sdkFindFilePath(name, exePath);
if (!name_) {
printf("File not found\n");
return false;
}
unsigned char *data = 0;
unsigned int w = 0, h = 0;
bool result = sdkLoadPPM4ub(name_, &data, &w, &h);
if (result == false) {
printf("Invalid file format\n");
return false;
}
img_w = w;
img_h = h;
img_s = iAlignUp(img_w);
img_data = new float[img_s * h];
// source is 4 channel image
const int widthStep = 4 * img_w;
for (int i = 0; i < img_h; ++i) {
for (int j = 0; j < img_w; ++j) {
img_data[j + i * img_s] = ((float)data[j * 4 + i * widthStep]) / 255.0f;
}
}
return true;
}
///////////////////////////////////////////////////////////////////////////////
/// \brief compare given flow field with gold (L1 norm)
/// \param[in] width optical flow field width
/// \param[in] height optical flow field height
/// \param[in] stride optical flow field row stride
/// \param[in] h_uGold horizontal displacement, gold
/// \param[in] h_vGold vertical displacement, gold
/// \param[in] h_u horizontal displacement
/// \param[in] h_v vertical displacement
/// \return true if discrepancy is lower than a given threshold
///////////////////////////////////////////////////////////////////////////////
bool CompareWithGold(int width, int height, int stride, const float *h_uGold,
const float *h_vGold, const float *h_u, const float *h_v) {
float error = 0.0f;
for (int i = 0; i < height; ++i) {
for (int j = 0; j < width; ++j) {
const int pos = j + i * stride;
error += fabsf(h_u[pos] - h_uGold[pos]) + fabsf(h_v[pos] - h_vGold[pos]);
}
}
error /= (float)(width * height);
printf("L1 error : %.6f\n", error);
return (error < THRESHOLD);
}
///////////////////////////////////////////////////////////////////////////////
/// application entry point
///////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
// welcome message
printf("%s Starting...\n\n", sSDKsample);
// pick GPU
findCudaDevice(argc, (const char **)argv);
// find images
const char *const sourceFrameName = "frame10.ppm";
const char *const targetFrameName = "frame11.ppm";
// image dimensions
int width;
int height;
// row access stride
int stride;
// flow is computed from source image to target image
float *h_source; // source image, host memory
float *h_target; // target image, host memory
// load image from file
if (!LoadImageAsFP32(h_source, width, height, stride, sourceFrameName,
argv[0])) {
exit(EXIT_FAILURE);
}
if (!LoadImageAsFP32(h_target, width, height, stride, targetFrameName,
argv[0])) {
exit(EXIT_FAILURE);
}
// allocate host memory for CPU results
float *h_uGold = new float[stride * height];
float *h_vGold = new float[stride * height];
// allocate host memory for GPU results
float *h_u = new float[stride * height];
float *h_v = new float[stride * height];
// smoothness
// if image brightness is not within [0,1]
// this paramter should be scaled appropriately
const float alpha = 0.2f;
// number of pyramid levels
const int nLevels = 5;
// number of solver iterations on each level
const int nSolverIters = 500;
// number of warping iterations
const int nWarpIters = 3;
ComputeFlowGold(h_source, h_target, width, height, stride, alpha, nLevels,
nWarpIters, nSolverIters, h_uGold, h_vGold);
ComputeFlowCUDA(h_source, h_target, width, height, stride, alpha, nLevels,
nWarpIters, nSolverIters, h_u, h_v);
// compare results (L1 norm)
bool status =
CompareWithGold(width, height, stride, h_uGold, h_vGold, h_u, h_v);
WriteFloFile("FlowGPU.flo", width, height, stride, h_u, h_v);
WriteFloFile("FlowCPU.flo", width, height, stride, h_uGold, h_vGold);
// free resources
delete[] h_uGold;
delete[] h_vGold;
delete[] h_u;
delete[] h_v;
delete[] h_source;
delete[] h_target;
// report self-test status
exit(status ? EXIT_SUCCESS : EXIT_FAILURE);
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/01_dpct_output/Samples/5_Domain_Specific/HSOpticalFlow/flowGold.cpp.dp.cpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "common.h"
#include "flowGold.h"
#include <cmath>
///////////////////////////////////////////////////////////////////////////////
/// \brief host texture fetch
///
/// read from arbitrary position within image using bilinear interpolation
/// out of range coords are mirrored
/// \param[in] t texture raw data
/// \param[in] w texture width
/// \param[in] h texture height
/// \param[in] s texture stride
/// \param[in] x x coord of the point to fetch value at
/// \param[in] y y coord of the point to fetch value at
/// \return fetched value
///////////////////////////////////////////////////////////////////////////////
inline float Tex2D(const float *t, int w, int h, int s, float x, float y) {
// integer parts in floating point format
float intPartX, intPartY;
// get fractional parts of coordinates
float dx = fabsf(modff(x, &intPartX));
float dy = fabsf(modff(y, &intPartY));
// assume pixels are squares
// one of the corners
int ix0 = (int)intPartX;
int iy0 = (int)intPartY;
// mirror out-of-range position
if (ix0 < 0) ix0 = abs(ix0 + 1);
if (iy0 < 0) iy0 = abs(iy0 + 1);
if (ix0 >= w) ix0 = w * 2 - ix0 - 1;
if (iy0 >= h) iy0 = h * 2 - iy0 - 1;
// corner which is opposite to (ix0, iy0)
int ix1 = ix0 + 1;
int iy1 = iy0 + 1;
if (ix1 >= w) ix1 = w * 2 - ix1 - 1;
if (iy1 >= h) iy1 = h * 2 - iy1 - 1;
float res = t[ix0 + iy0 * s] * (1.0f - dx) * (1.0f - dy);
res += t[ix1 + iy0 * s] * dx * (1.0f - dy);
res += t[ix0 + iy1 * s] * (1.0f - dx) * dy;
res += t[ix1 + iy1 * s] * dx * dy;
return res;
}
///////////////////////////////////////////////////////////////////////////////
/// \brief host texture fetch
///
/// read specific texel value
/// out of range coords are mirrored
/// \param[in] t texture raw data
/// \param[in] w texture width
/// \param[in] h texture height
/// \param[in] s texture stride
/// \param[in] x x coord of the point to fetch value at
/// \param[in] y y coord of the point to fetch value at
/// \return fetched value
///////////////////////////////////////////////////////////////////////////////
inline float Tex2Di(const float *src, int w, int h, int s, int x, int y) {
if (x < 0) x = abs(x + 1);
if (y < 0) y = abs(y + 1);
if (x >= w) x = w * 2 - x - 1;
if (y >= h) y = h * 2 - y - 1;
return src[x + y * s];
}
///////////////////////////////////////////////////////////////////////////////
/// \brief resize image
/// \param[in] src image to downscale
/// \param[in] width image width
/// \param[in] height image height
/// \param[in] stride image stride
/// \param[in] newWidth image new width
/// \param[in] newHeight image new height
/// \param[in] newStride image new stride
/// \param[out] out downscaled image data
///////////////////////////////////////////////////////////////////////////////
static void Downscale(const float *src, int width, int height, int stride,
int newWidth, int newHeight, int newStride, float *out) {
for (int i = 0; i < newHeight; ++i) {
for (int j = 0; j < newWidth; ++j) {
const int srcX = j * 2;
const int srcY = i * 2;
// average 4 neighbouring pixels
float sum;
sum = Tex2Di(src, width, height, stride, srcX + 0, srcY + 0);
sum += Tex2Di(src, width, height, stride, srcX + 0, srcY + 1);
sum += Tex2Di(src, width, height, stride, srcX + 1, srcY + 0);
sum += Tex2Di(src, width, height, stride, srcX + 1, srcY + 1);
// normalize
sum *= 0.25f;
out[j + i * newStride] = sum;
}
}
}
///////////////////////////////////////////////////////////////////////////////
/// \brief upscale one component of a displacement field
/// \param[in] src field component to upscale
/// \param[in] width field current width
/// \param[in] height field current height
/// \param[in] stride field current stride
/// \param[in] newWidth field new width
/// \param[in] newHeight field new height
/// \param[in] newStride field new stride
/// \param[in] scale value scale factor (multiplier)
/// \param[out] out upscaled field component
///////////////////////////////////////////////////////////////////////////////
static void Upscale(const float *src, int width, int height, int stride,
int newWidth, int newHeight, int newStride, float scale,
float *out) {
for (int i = 0; i < newHeight; ++i) {
for (int j = 0; j < newWidth; ++j) {
// position within smaller image
float x = ((float)j - 0.5f) * 0.5f;
float y = ((float)i - 0.5f) * 0.5f;
out[j + i * newStride] = Tex2D(src, width, height, stride, x, y) * scale;
}
}
}
///////////////////////////////////////////////////////////////////////////////
/// \brief warp image with provided vector field
///
/// For each output pixel there is a vector which tells which pixel
/// from a source image should be mapped to this particular output
/// pixel.
/// It is assumed that images and the vector field have the same stride and
/// resolution.
/// \param[in] src source image
/// \param[in] w width
/// \param[in] h height
/// \param[in] s stride
/// \param[in] u horizontal displacement
/// \param[in] v vertical displacement
/// \param[out] out warped image
///////////////////////////////////////////////////////////////////////////////
static void WarpImage(const float *src, int w, int h, int s, const float *u,
const float *v, float *out) {
for (int i = 0; i < h; ++i) {
for (int j = 0; j < w; ++j) {
const int pos = j + i * s;
// warped coords
float x = (float)j + u[pos];
float y = (float)i + v[pos];
out[pos] = Tex2D(src, w, h, s, x, y);
}
}
}
///////////////////////////////////////////////////////////////////////////////
/// \brief computes image derivatives for a pair of images
/// \param[in] I0 source image
/// \param[in] I1 tracked image
/// \param[in] w images width
/// \param[in] h images height
/// \param[in] s images stride
/// \param[out] Ix x derivative
/// \param[out] Iy y derivative
/// \param[out] Iz temporal derivative
///////////////////////////////////////////////////////////////////////////////
static void ComputeDerivatives(const float *I0, const float *I1, int w, int h,
int s, float *Ix, float *Iy, float *Iz) {
for (int i = 0; i < h; ++i) {
for (int j = 0; j < w; ++j) {
const int pos = j + i * s;
float t0, t1;
// derivative filter is (1, -8, 0, 8, -1)/12
// x derivative
t0 = Tex2Di(I0, w, h, s, j - 2, i);
t0 -= Tex2Di(I0, w, h, s, j - 1, i) * 8.0f;
t0 += Tex2Di(I0, w, h, s, j + 1, i) * 8.0f;
t0 -= Tex2Di(I0, w, h, s, j + 2, i);
t0 /= 12.0f;
t1 = Tex2Di(I1, w, h, s, j - 2, i);
t1 -= Tex2Di(I1, w, h, s, j - 1, i) * 8.0f;
t1 += Tex2Di(I1, w, h, s, j + 1, i) * 8.0f;
t1 -= Tex2Di(I1, w, h, s, j + 2, i);
t1 /= 12.0f;
// spatial derivatives are averaged
Ix[pos] = (t0 + t1) * 0.5f;
// t derivative
Iz[pos] = I1[pos] - I0[pos];
// y derivative
t0 = Tex2Di(I0, w, h, s, j, i - 2);
t0 -= Tex2Di(I0, w, h, s, j, i - 1) * 8.0f;
t0 += Tex2Di(I0, w, h, s, j, i + 1) * 8.0f;
t0 -= Tex2Di(I0, w, h, s, j, i + 2);
t0 /= 12.0f;
t1 = Tex2Di(I1, w, h, s, j, i - 2);
t1 -= Tex2Di(I1, w, h, s, j, i - 1) * 8.0f;
t1 += Tex2Di(I1, w, h, s, j, i + 1) * 8.0f;
t1 -= Tex2Di(I1, w, h, s, j, i + 2);
t1 /= 12.0f;
Iy[pos] = (t0 + t1) * 0.5f;
}
}
}
///////////////////////////////////////////////////////////////////////////////
/// \brief one iteration of classical Horn-Schunck method
///
/// It is one iteration of Jacobi method for a corresponding linear system
/// \param[in] du0 current horizontal displacement approximation
/// \param[in] dv0 current vertical displacement approximation
/// \param[in] Ix image x derivative
/// \param[in] Iy image y derivative
/// \param[in] Iz temporal derivative
/// \param[in] w width
/// \param[in] h height
/// \param[in] s stride
/// \param[in] alpha degree of smoothness
/// \param[out] du1 new horizontal displacement approximation
/// \param[out] dv1 new vertical displacement approximation
///////////////////////////////////////////////////////////////////////////////
static void SolveForUpdate(const float *du0, const float *dv0, const float *Ix,
const float *Iy, const float *Iz, int w, int h,
int s, float alpha, float *du1, float *dv1) {
for (int i = 0; i < h; ++i) {
for (int j = 0; j < w; ++j) {
const int pos = j + i * s;
int left, right, up, down;
// handle borders
if (j != 0)
left = pos - 1;
else
left = pos;
if (j != w - 1)
right = pos + 1;
else
right = pos;
if (i != 0)
down = pos - s;
else
down = pos;
if (i != h - 1)
up = pos + s;
else
up = pos;
float sumU = (du0[left] + du0[right] + du0[up] + du0[down]) * 0.25f;
float sumV = (dv0[left] + dv0[right] + dv0[up] + dv0[down]) * 0.25f;
float frac = (Ix[pos] * sumU + Iy[pos] * sumV + Iz[pos]) /
(Ix[pos] * Ix[pos] + Iy[pos] * Iy[pos] + alpha);
du1[pos] = sumU - Ix[pos] * frac;
dv1[pos] = sumV - Iy[pos] * frac;
}
}
}
///////////////////////////////////////////////////////////////////////////////
/// \brief method logic
///
/// handles memory allocation and control flow
/// \param[in] I0 source image
/// \param[in] I1 tracked image
/// \param[in] width images width
/// \param[in] height images height
/// \param[in] stride images stride
/// \param[in] alpha degree of displacement field smoothness
/// \param[in] nLevels number of levels in a pyramid
/// \param[in] nWarpIters number of warping iterations per pyramid level
/// \param[in] nSolverIters number of solver iterations (Jacobi iterations)
/// \param[out] u horizontal displacement
/// \param[out] v vertical displacement
///////////////////////////////////////////////////////////////////////////////
void ComputeFlowGold(const float *I0, const float *I1, int width, int height,
int stride, float alpha, int nLevels, int nWarpIters,
int nSolverIters, float *u, float *v) {
printf("Computing optical flow on CPU...\n");
float *u0 = u;
float *v0 = v;
const float **pI0 = new const float *[nLevels];
const float **pI1 = new const float *[nLevels];
int *pW = new int[nLevels];
int *pH = new int[nLevels];
int *pS = new int[nLevels];
const int pixelCountAligned = height * stride;
float *tmp = new float[pixelCountAligned];
float *du0 = new float[pixelCountAligned];
float *dv0 = new float[pixelCountAligned];
float *du1 = new float[pixelCountAligned];
float *dv1 = new float[pixelCountAligned];
float *Ix = new float[pixelCountAligned];
float *Iy = new float[pixelCountAligned];
float *Iz = new float[pixelCountAligned];
float *nu = new float[pixelCountAligned];
float *nv = new float[pixelCountAligned];
// prepare pyramid
int currentLevel = nLevels - 1;
pI0[currentLevel] = I0;
pI1[currentLevel] = I1;
pW[currentLevel] = width;
pH[currentLevel] = height;
pS[currentLevel] = stride;
for (; currentLevel > 0; --currentLevel) {
int nw = pW[currentLevel] / 2;
int nh = pH[currentLevel] / 2;
int ns = iAlignUp(nw);
pI0[currentLevel - 1] = new float[ns * nh];
pI1[currentLevel - 1] = new float[ns * nh];
Downscale(pI0[currentLevel], pW[currentLevel], pH[currentLevel],
pS[currentLevel], nw, nh, ns, (float *)pI0[currentLevel - 1]);
Downscale(pI1[currentLevel], pW[currentLevel], pH[currentLevel],
pS[currentLevel], nw, nh, ns, (float *)pI1[currentLevel - 1]);
pW[currentLevel - 1] = nw;
pH[currentLevel - 1] = nh;
pS[currentLevel - 1] = ns;
}
// initial approximation
memset(u, 0, stride * height * sizeof(float));
memset(v, 0, stride * height * sizeof(float));
// compute flow
for (; currentLevel < nLevels; ++currentLevel) {
for (int warpIter = 0; warpIter < nWarpIters; ++warpIter) {
memset(du0, 0, pixelCountAligned * sizeof(float));
memset(dv0, 0, pixelCountAligned * sizeof(float));
memset(du1, 0, pixelCountAligned * sizeof(float));
memset(dv1, 0, pixelCountAligned * sizeof(float));
WarpImage(pI1[currentLevel], pW[currentLevel], pH[currentLevel],
pS[currentLevel], u, v, tmp);
// on current level we compute optical flow
// between frame 0 and warped frame 1
ComputeDerivatives(pI0[currentLevel], tmp, pW[currentLevel],
pH[currentLevel], pS[currentLevel], Ix, Iy, Iz);
for (int iter = 0; iter < nSolverIters; ++iter) {
SolveForUpdate(du0, dv0, Ix, Iy, Iz, pW[currentLevel], pH[currentLevel],
pS[currentLevel], alpha, du1, dv1);
Swap(du0, du1);
Swap(dv0, dv1);
}
// update u, v
for (int i = 0; i < pH[currentLevel] * pS[currentLevel]; ++i) {
u[i] += du0[i];
v[i] += dv0[i];
}
} // end for (int warpIter = 0; warpIter < nWarpIters; ++warpIter)
if (currentLevel != nLevels - 1) {
// prolongate solution
float scaleX = (float)pW[currentLevel + 1] / (float)pW[currentLevel];
Upscale(u, pW[currentLevel], pH[currentLevel], pS[currentLevel],
pW[currentLevel + 1], pH[currentLevel + 1], pS[currentLevel + 1],
scaleX, nu);
float scaleY = (float)pH[currentLevel + 1] / (float)pH[currentLevel];
Upscale(v, pW[currentLevel], pH[currentLevel], pS[currentLevel],
pW[currentLevel + 1], pH[currentLevel + 1], pS[currentLevel + 1],
scaleY, nv);
Swap(u, nu);
Swap(v, nv);
}
} // end for (; currentLevel < nLevels; ++currentLevel)
if (u != u0) {
// solution is not in the specified array
// copy
memcpy(u0, u, pixelCountAligned * sizeof(float));
memcpy(v0, v, pixelCountAligned * sizeof(float));
Swap(u, nu);
Swap(v, nv);
}
// cleanup
// last level is not being freed here
// because it refers to input images
for (int i = 0; i < nLevels - 1; ++i) {
delete[] pI0[i];
delete[] pI1[i];
}
delete[] pI0;
delete[] pI1;
delete[] pW;
delete[] pH;
delete[] pS;
delete[] tmp;
delete[] du0;
delete[] dv0;
delete[] du1;
delete[] dv1;
delete[] Ix;
delete[] Iy;
delete[] Iz;
delete[] nu;
delete[] nv;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/01_dpct_output/Samples/5_Domain_Specific/HSOpticalFlow/flowCUDA.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef FLOW_CUDA_H
#define FLOW_CUDA_H
void ComputeFlowCUDA(
const float *I0, // source frame
const float *I1, // tracked frame
int width, // frame width
int height, // frame height
int stride, // row access stride
float alpha, // smoothness coefficient
int nLevels, // number of levels in pyramid
int nWarpIters, // number of warping iterations per pyramid level
int nSolverIters, // number of solver iterations (for linear system)
float *u, // output horizontal flow
float *v); // output vertical flow
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/01_dpct_output/Samples/5_Domain_Specific/HSOpticalFlow/warpingKernel.dp.hpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include "common.h"
///////////////////////////////////////////////////////////////////////////////
/// \brief warp image with a given displacement field, CUDA kernel.
/// \param[in] width image width
/// \param[in] height image height
/// \param[in] stride image stride
/// \param[in] u horizontal displacement
/// \param[in] v vertical displacement
/// \param[out] out result
///////////////////////////////////////////////////////////////////////////////
void WarpingKernel(int width, int height, int stride, const float *u,
const float *v, float *out,
dpct::image_accessor_ext<float, 2> texToWarp,
const sycl::nd_item<3> &item_ct1) {
const int ix = item_ct1.get_local_id(2) +
item_ct1.get_group(2) * item_ct1.get_local_range(2);
const int iy = item_ct1.get_local_id(1) +
item_ct1.get_group(1) * item_ct1.get_local_range(1);
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float x = ((float)ix + u[pos] + 0.5f) / (float)width;
float y = ((float)iy + v[pos] + 0.5f) / (float)height;
out[pos] = texToWarp.read(x, y);
}
///////////////////////////////////////////////////////////////////////////////
/// \brief warp image with provided vector field, CUDA kernel wrapper.
///
/// For each output pixel there is a vector which tells which pixel
/// from a source image should be mapped to this particular output
/// pixel.
/// It is assumed that images and the vector field have the same stride and
/// resolution.
/// \param[in] src source image
/// \param[in] w width
/// \param[in] h height
/// \param[in] s stride
/// \param[in] u horizontal displacement
/// \param[in] v vertical displacement
/// \param[out] out warped image
///////////////////////////////////////////////////////////////////////////////
static void WarpImage(const float *src, int w, int h, int s, const float *u,
const float *v, float *out) {
sycl::range<3> threads(1, 6, 32);
sycl::range<3> blocks(1, iDivUp(h, threads[1]), iDivUp(w, threads[2]));
dpct::image_wrapper_base_p texToWarp;
dpct::image_data texRes;
memset(&texRes, 0, sizeof(dpct::image_data));
texRes.set_data_type(dpct::image_data_type::pitch);
texRes.set_data_ptr((void *)src);
/*
DPCT1059:7: SYCL only supports 4-channel image format. Adjust the code.
*/
texRes.set_channel(dpct::image_channel::create<float>());
texRes.set_x(w);
texRes.set_y(h);
texRes.set_pitch(s * sizeof(float));
dpct::sampling_info texDescr;
memset(&texDescr, 0, sizeof(dpct::sampling_info));
texDescr.set(sycl::addressing_mode::mirrored_repeat,
sycl::filtering_mode::linear,
sycl::coordinate_normalization_mode::normalized);
/*
DPCT1007:8: Migration of cudaTextureDesc::readMode is not supported.
*/
texDescr.readMode = cudaReadModeElementType;
checkCudaErrors(DPCT_CHECK_ERROR(
texToWarp = dpct::create_image_wrapper(texRes, texDescr)));
/*
DPCT1049:6: The work-group size passed to the SYCL kernel may exceed the
limit. To get the device limit, query info::device::max_work_group_size.
Adjust the work-group size if needed.
*/
dpct::get_default_queue().submit([&](sycl::handler &cgh) {
auto texToWarp_acc =
static_cast<dpct::image_wrapper<float, 2> *>(texToWarp)->get_access(
cgh);
auto texToWarp_smpl = texToWarp->get_sampler();
cgh.parallel_for(sycl::nd_range<3>(blocks * threads, threads),
[=](sycl::nd_item<3> item_ct1) {
WarpingKernel(w, h, s, u, v, out,
dpct::image_accessor_ext<float, 2>(
texToWarp_smpl, texToWarp_acc),
item_ct1);
});
});
}
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/StructuredGrids/guided_HSOpticalFlow_SYCLMigration/01_dpct_output/Samples/5_Domain_Specific/HSOpticalFlow/derivativesKernel.dp.hpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include "common.h"
///////////////////////////////////////////////////////////////////////////////
/// \brief compute image derivatives
///
/// CUDA kernel, relies heavily on texture unit
/// \param[in] width image width
/// \param[in] height image height
/// \param[in] stride image stride
/// \param[out] Ix x derivative
/// \param[out] Iy y derivative
/// \param[out] Iz temporal derivative
///////////////////////////////////////////////////////////////////////////////
void ComputeDerivativesKernel(int width, int height, int stride, float *Ix,
float *Iy, float *Iz,
dpct::image_accessor_ext<float, 2> texSource,
dpct::image_accessor_ext<float, 2> texTarget,
const sycl::nd_item<3> &item_ct1) {
const int ix = item_ct1.get_local_id(2) +
item_ct1.get_group(2) * item_ct1.get_local_range(2);
const int iy = item_ct1.get_local_id(1) +
item_ct1.get_group(1) * item_ct1.get_local_range(1);
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
float t0, t1;
// x derivative
t0 = texSource.read(x - 2.0f * dx, y);
t0 -= texSource.read(x - 1.0f * dx, y) * 8.0f;
t0 += texSource.read(x + 1.0f * dx, y) * 8.0f;
t0 -= texSource.read(x + 2.0f * dx, y);
t0 /= 12.0f;
t1 = texTarget.read(x - 2.0f * dx, y);
t1 -= texTarget.read(x - 1.0f * dx, y) * 8.0f;
t1 += texTarget.read(x + 1.0f * dx, y) * 8.0f;
t1 -= texTarget.read(x + 2.0f * dx, y);
t1 /= 12.0f;
Ix[pos] = (t0 + t1) * 0.5f;
// t derivative
Iz[pos] = texTarget.read(x, y) - texSource.read(x, y);
// y derivative
t0 = texSource.read(x, y - 2.0f * dy);
t0 -= texSource.read(x, y - 1.0f * dy) * 8.0f;
t0 += texSource.read(x, y + 1.0f * dy) * 8.0f;
t0 -= texSource.read(x, y + 2.0f * dy);
t0 /= 12.0f;
t1 = texTarget.read(x, y - 2.0f * dy);
t1 -= texTarget.read(x, y - 1.0f * dy) * 8.0f;
t1 += texTarget.read(x, y + 1.0f * dy) * 8.0f;
t1 -= texTarget.read(x, y + 2.0f * dy);
t1 /= 12.0f;
Iy[pos] = (t0 + t1) * 0.5f;
}
///////////////////////////////////////////////////////////////////////////////
/// \brief compute image derivatives
///
/// \param[in] I0 source image
/// \param[in] I1 tracked image
/// \param[in] w image width
/// \param[in] h image height
/// \param[in] s image stride
/// \param[out] Ix x derivative
/// \param[out] Iy y derivative
/// \param[out] Iz temporal derivative
///////////////////////////////////////////////////////////////////////////////
static void ComputeDerivatives(const float *I0, const float *I1, int w, int h,
int s, float *Ix, float *Iy, float *Iz) {
sycl::range<3> threads(1, 6, 32);
sycl::range<3> blocks(1, iDivUp(h, threads[1]), iDivUp(w, threads[2]));
dpct::image_wrapper_base_p texSource, texTarget;
dpct::image_data texRes;
memset(&texRes, 0, sizeof(dpct::image_data));
texRes.set_data_type(dpct::image_data_type::pitch);
texRes.set_data_ptr((void *)I0);
/*
DPCT1059:10: SYCL only supports 4-channel image format. Adjust the code.
*/
texRes.set_channel(dpct::image_channel::create<float>());
texRes.set_x(w);
texRes.set_y(h);
texRes.set_pitch(s * sizeof(float));
dpct::sampling_info texDescr;
memset(&texDescr, 0, sizeof(dpct::sampling_info));
texDescr.set(sycl::addressing_mode::mirrored_repeat,
sycl::filtering_mode::linear,
sycl::coordinate_normalization_mode::normalized);
/*
DPCT1007:11: Migration of cudaTextureDesc::readMode is not supported.
*/
texDescr.readMode = cudaReadModeElementType;
checkCudaErrors(DPCT_CHECK_ERROR(
texSource = dpct::create_image_wrapper(texRes, texDescr)));
memset(&texRes, 0, sizeof(dpct::image_data));
texRes.set_data_type(dpct::image_data_type::pitch);
texRes.set_data_ptr((void *)I1);
/*
DPCT1059:12: SYCL only supports 4-channel image format. Adjust the code.
*/
texRes.set_channel(dpct::image_channel::create<float>());
texRes.set_x(w);
texRes.set_y(h);
texRes.set_pitch(s * sizeof(float));
checkCudaErrors(DPCT_CHECK_ERROR(
texTarget = dpct::create_image_wrapper(texRes, texDescr)));
/*
DPCT1049:9: The work-group size passed to the SYCL kernel may exceed the
limit. To get the device limit, query info::device::max_work_group_size.
Adjust the work-group size if needed.
*/
dpct::get_default_queue().submit([&](sycl::handler &cgh) {
auto texSource_acc =
static_cast<dpct::image_wrapper<float, 2> *>(texSource)->get_access(
cgh);
auto texTarget_acc =
static_cast<dpct::image_wrapper<float, 2> *>(texTarget)->get_access(
cgh);
auto texSource_smpl = texSource->get_sampler();
auto texTarget_smpl = texTarget->get_sampler();
cgh.parallel_for(
sycl::nd_range<3>(blocks * threads, threads),
[=](sycl::nd_item<3> item_ct1) {
ComputeDerivativesKernel(
w, h, s, Ix, Iy, Iz,
dpct::image_accessor_ext<float, 2>(texSource_smpl, texSource_acc),
dpct::image_accessor_ext<float, 2>(texTarget_smpl, texTarget_acc),
item_ct1);
});
});
}
| hpp |
Subsets and Splits