repo_name
stringclasses 10
values | file_path
stringlengths 29
222
| content
stringlengths 24
926k
| extention
stringclasses 5
values |
---|---|---|---|
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/03_SYCL_Migration_Jacobi_Iterative/dpct_output/Common/helper_timer.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Helper Timing Functions
#ifndef COMMON_HELPER_TIMER_H_
#define COMMON_HELPER_TIMER_H_
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// includes, system
#include <vector>
// includes, project
#include <exception.h>
// Definition of the StopWatch Interface, this is used if we don't want to use
// the CUT functions But rather in a self contained class interface
class StopWatchInterface {
public:
StopWatchInterface() {}
virtual ~StopWatchInterface() {}
public:
//! Start time measurement
virtual void start() = 0;
//! Stop time measurement
virtual void stop() = 0;
//! Reset time counters to zero
virtual void reset() = 0;
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
virtual float getTime() = 0;
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
virtual float getAverageTime() = 0;
};
//////////////////////////////////////////////////////////////////
// Begin Stopwatch timer class definitions for all OS platforms //
//////////////////////////////////////////////////////////////////
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// includes, system
#define WINDOWS_LEAN_AND_MEAN
#include <windows.h>
#undef min
#undef max
//! Windows specific implementation of StopWatch
class StopWatchWin : public StopWatchInterface {
public:
//! Constructor, default
StopWatchWin()
: start_time(),
end_time(),
diff_time(0.0f),
total_time(0.0f),
running(false),
clock_sessions(0),
freq(0),
freq_set(false) {
if (!freq_set) {
// helper variable
LARGE_INTEGER temp;
// get the tick frequency from the OS
QueryPerformanceFrequency(reinterpret_cast<LARGE_INTEGER *>(&temp));
// convert to type in which it is needed
freq = (static_cast<double>(temp.QuadPart)) / 1000.0;
// rememeber query
freq_set = true;
}
}
// Destructor
~StopWatchWin() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// member variables
//! Start of measurement
LARGE_INTEGER start_time;
//! End of measurement
LARGE_INTEGER end_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
//! tick frequency
double freq;
//! flag if the frequency has been set
bool freq_set;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::start() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::stop() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&end_time));
diff_time = static_cast<float>(((static_cast<double>(end_time.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
total_time += diff_time;
clock_sessions++;
running = false;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
LARGE_INTEGER temp;
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&temp));
retval += static_cast<float>(((static_cast<double>(temp.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
#else
// Declarations for Stopwatch on Linux and Mac OSX
// includes, system
#include <sys/time.h>
#include <ctime>
//! Windows specific implementation of StopWatch
class StopWatchLinux : public StopWatchInterface {
public:
//! Constructor, default
StopWatchLinux()
: start_time(),
diff_time(0.0),
total_time(0.0),
running(false),
clock_sessions(0) {}
// Destructor
virtual ~StopWatchLinux() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// helper functions
//! Get difference between start time and current time
inline float getDiffTime();
private:
// member variables
//! Start of measurement
struct timeval start_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::start() {
gettimeofday(&start_time, 0);
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::stop() {
diff_time = getDiffTime();
total_time += diff_time;
running = false;
clock_sessions++;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
gettimeofday(&start_time, 0);
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
retval += getDiffTime();
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getDiffTime() {
struct timeval t_time;
gettimeofday(&t_time, 0);
// time difference in milli-seconds
return static_cast<float>(1000.0 * (t_time.tv_sec - start_time.tv_sec) +
(0.001 * (t_time.tv_usec - start_time.tv_usec)));
}
#endif // WIN32
////////////////////////////////////////////////////////////////////////////////
//! Timer functionality exported
////////////////////////////////////////////////////////////////////////////////
//! Create a new timer
//! @return true if a time has been created, otherwise false
//! @param name of the new timer, 0 if the creation failed
////////////////////////////////////////////////////////////////////////////////
inline bool sdkCreateTimer(StopWatchInterface **timer_interface) {
// printf("sdkCreateTimer called object %08x\n", (void *)*timer_interface);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
*timer_interface = reinterpret_cast<StopWatchInterface *>(new StopWatchWin());
#else
*timer_interface =
reinterpret_cast<StopWatchInterface *>(new StopWatchLinux());
#endif
return (*timer_interface != NULL) ? true : false;
}
////////////////////////////////////////////////////////////////////////////////
//! Delete a timer
//! @return true if a time has been deleted, otherwise false
//! @param name of the timer to delete
////////////////////////////////////////////////////////////////////////////////
inline bool sdkDeleteTimer(StopWatchInterface **timer_interface) {
// printf("sdkDeleteTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
delete *timer_interface;
*timer_interface = NULL;
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Start the time with name \a name
//! @param name name of the timer to start
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStartTimer(StopWatchInterface **timer_interface) {
// printf("sdkStartTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->start();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop the time with name \a name. Does not reset.
//! @param name name of the timer to stop
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStopTimer(StopWatchInterface **timer_interface) {
// printf("sdkStopTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->stop();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Resets the timer's counter.
//! @param name name of the timer to reset.
////////////////////////////////////////////////////////////////////////////////
inline bool sdkResetTimer(StopWatchInterface **timer_interface) {
// printf("sdkResetTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->reset();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Return the average time for timer execution as the total time
//! for the timer dividied by the number of completed (stopped) runs the timer
//! has made.
//! Excludes the current running time if the timer is currently running.
//! @param name name of the timer to return the time of
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetAverageTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetAverageTimerValue called object %08x\n", (void
// *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getAverageTime();
} else {
return 0.0f;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Total execution time for the timer over all runs since the last reset
//! or timer creation.
//! @param name name of the timer to obtain the value of.
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetTimerValue called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getTime();
} else {
return 0.0f;
}
}
#endif // COMMON_HELPER_TIMER_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/03_SYCL_Migration_Jacobi_Iterative/dpct_output/Common/helper_string.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (string parsing, timers, etc)
#ifndef COMMON_HELPER_STRING_H_
#define COMMON_HELPER_STRING_H_
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <string>
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#ifndef _CRT_SECURE_NO_DEPRECATE
#define _CRT_SECURE_NO_DEPRECATE
#endif
#ifndef STRCASECMP
#define STRCASECMP _stricmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP _strnicmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy_s(sFilePath, nLength, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode)
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result != 0)
#endif
#ifndef SSCANF
#define SSCANF sscanf_s
#endif
#ifndef SPRINTF
#define SPRINTF sprintf_s
#endif
#else // Linux Includes
#include <string.h>
#include <strings.h>
#ifndef STRCASECMP
#define STRCASECMP strcasecmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP strncasecmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy(sFilePath, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode))
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result == NULL)
#endif
#ifndef SSCANF
#define SSCANF sscanf
#endif
#ifndef SPRINTF
#define SPRINTF sprintf
#endif
#endif
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// CUDA Utility Helper Functions
inline int stringRemoveDelimiter(char delimiter, const char *string) {
int string_start = 0;
while (string[string_start] == delimiter) {
string_start++;
}
if (string_start >= static_cast<int>(strlen(string) - 1)) {
return 0;
}
return string_start;
}
inline int getFileExtension(char *filename, char **extension) {
int string_length = static_cast<int>(strlen(filename));
while (filename[string_length--] != '.') {
if (string_length == 0) break;
}
if (string_length > 0) string_length += 2;
if (string_length == 0)
*extension = NULL;
else
*extension = &filename[string_length];
return string_length;
}
inline bool checkCmdLineFlag(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
const char *equal_pos = strchr(string_argv, '=');
int argv_length = static_cast<int>(
equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv);
int length = static_cast<int>(strlen(string_ref));
if (length == argv_length &&
!STRNCASECMP(string_argv, string_ref, length)) {
bFound = true;
continue;
}
}
}
return bFound;
}
// This function wraps the CUDA Driver API into a template function
template <class T>
inline bool getCmdLineArgumentValue(const int argc, const char **argv,
const char *string_ref, T *value) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
*value = (T)atoi(&string_argv[length + auto_inc]);
}
bFound = true;
i = argc;
}
}
}
return bFound;
}
inline int getCmdLineArgumentInt(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
int value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = atoi(&string_argv[length + auto_inc]);
} else {
value = 0;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline float getCmdLineArgumentFloat(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
float value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = static_cast<float>(atof(&string_argv[length + auto_inc]));
} else {
value = 0.f;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline bool getCmdLineArgumentString(const int argc, const char **argv,
const char *string_ref,
char **string_retval) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
char *string_argv = const_cast<char *>(&argv[i][string_start]);
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
*string_retval = &string_argv[length + 1];
bFound = true;
continue;
}
}
}
if (!bFound) {
*string_retval = NULL;
}
return bFound;
}
//////////////////////////////////////////////////////////////////////////////
//! Find the path for a file assuming that
//! files are found in the searchPath.
//!
//! @return the path if succeeded, otherwise 0
//! @param filename name of the file
//! @param executable_path optional absolute path of the executable
//////////////////////////////////////////////////////////////////////////////
inline char *sdkFindFilePath(const char *filename,
const char *executable_path) {
// <executable_name> defines a variable that is replaced with the name of the
// executable
// Typical relative search paths to locate needed companion files (e.g. sample
// input data, or JIT source files) The origin for the relative search may be
// the .exe file, a .bat file launching an .exe, a browser .exe launching the
// .exe or .bat, etc
const char *searchPath[] = {
"./", // same dir
"./data/", // same dir
"../../../../Samples/<executable_name>/", // up 4 in tree
"../../../Samples/<executable_name>/", // up 3 in tree
"../../Samples/<executable_name>/", // up 2 in tree
"../../../../Samples/<executable_name>/data/", // up 4 in tree
"../../../Samples/<executable_name>/data/", // up 3 in tree
"../../Samples/<executable_name>/data/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/", // up 4 in tree
"../../../Samples/0_Introduction/<executable_name>/", // up 3 in tree
"../../Samples/0_Introduction/<executable_name>/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/", // up 4 in tree
"../../../Samples/1_Utilities/<executable_name>/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 4 in tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 3 in tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 2 in tree
"../../../../Samples/3_CUDA_Features/<executable_name>/", // up 4 in tree
"../../../Samples/3_CUDA_Features/<executable_name>/", // up 3 in tree
"../../Samples/3_CUDA_Features/<executable_name>/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 4 in tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 3 in tree
"../../Samples/4_CUDA_Libraries/<executable_name>/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/", // up 4 in tree
"../../../Samples/5_Domain_Specific/<executable_name>/", // up 3 in tree
"../../Samples/5_Domain_Specific/<executable_name>/", // up 2 in tree
"../../../../Samples/6_Performance/<executable_name>/", // up 4 in tree
"../../../Samples/6_Performance/<executable_name>/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/data/", // up 4 in tree
"../../../Samples/0_Introduction/<executable_name>/data/", // up 3 in tree
"../../Samples/0_Introduction/<executable_name>/data/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/data/", // up 4 in tree
"../../../Samples/1_Utilities/<executable_name>/data/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/data/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 4 in tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 3 in tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 2 in tree
"../../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 4 in tree
"../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 3 in tree
"../../Samples/3_CUDA_Features/<executable_name>/data/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 4 in tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 3 in tree
"../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 4 in tree
"../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 3 in tree
"../../Samples/5_Domain_Specific/<executable_name>/data/", // up 2 in tree
"../../../../Samples/6_Performance/<executable_name>/data/", // up 4 in tree
"../../../Samples/6_Performance/<executable_name>/data/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/data/", // up 2 in tree
"../../../../Common/data/", // up 4 in tree
"../../../Common/data/", // up 3 in tree
"../../Common/data/" // up 2 in tree
};
// Extract the executable name
std::string executable_name;
if (executable_path != 0) {
executable_name = std::string(executable_path);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// Windows path delimiter
size_t delimiter_pos = executable_name.find_last_of('\\');
executable_name.erase(0, delimiter_pos + 1);
if (executable_name.rfind(".exe") != std::string::npos) {
// we strip .exe, only if the .exe is found
executable_name.resize(executable_name.size() - 4);
}
#else
// Linux & OSX path delimiter
size_t delimiter_pos = executable_name.find_last_of('/');
executable_name.erase(0, delimiter_pos + 1);
#endif
}
// Loop over all search paths and return the first hit
for (unsigned int i = 0; i < sizeof(searchPath) / sizeof(char *); ++i) {
std::string path(searchPath[i]);
size_t executable_name_pos = path.find("<executable_name>");
// If there is executable_name variable in the searchPath
// replace it with the value
if (executable_name_pos != std::string::npos) {
if (executable_path != 0) {
path.replace(executable_name_pos, strlen("<executable_name>"),
executable_name);
} else {
// Skip this path entry if no executable argument is given
continue;
}
}
#ifdef _DEBUG
printf("sdkFindFilePath <%s> in %s\n", filename, path.c_str());
#endif
// Test if the file exists
path.append(filename);
FILE *fp;
FOPEN(fp, path.c_str(), "rb");
if (fp != NULL) {
fclose(fp);
// File found
// returning an allocated array here for backwards compatibility reasons
char *file_path = reinterpret_cast<char *>(malloc(path.length() + 1));
STRCPY(file_path, path.length() + 1, path.c_str());
return file_path;
}
if (fp) {
fclose(fp);
}
}
// File not found
printf("\nerror: sdkFindFilePath: file <%s> not found!\n", filename);
return 0;
}
#endif // COMMON_HELPER_STRING_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/03_SYCL_Migration_Jacobi_Iterative/dpct_output/Common/exception.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* CUda UTility Library */
#ifndef COMMON_EXCEPTION_H_
#define COMMON_EXCEPTION_H_
// includes, system
#include <stdlib.h>
#include <exception>
#include <iostream>
#include <stdexcept>
#include <string>
//! Exception wrapper.
//! @param Std_Exception Exception out of namespace std for easy typing.
template <class Std_Exception>
class Exception : public Std_Exception {
public:
//! @brief Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const char *detailed = "-");
//! Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const std::string &detailed);
//! Destructor
virtual ~Exception() throw();
private:
//! Constructor, default (private)
Exception();
//! Constructor, standard
//! @param str string returned by what()
explicit Exception(const std::string &str);
};
////////////////////////////////////////////////////////////////////////////////
//! Exception handler function for arbitrary exceptions
//! @param ex exception to handle
////////////////////////////////////////////////////////////////////////////////
template <class Exception_Typ>
inline void handleException(const Exception_Typ &ex) {
std::cerr << ex.what() << std::endl;
exit(EXIT_FAILURE);
}
//! Convenience macros
//! Exception caused by dynamic program behavior, e.g. file does not exist
#define RUNTIME_EXCEPTION(msg) \
Exception<std::runtime_error>::throw_it(__FILE__, __LINE__, msg)
//! Logic exception in program, e.g. an assert failed
#define LOGIC_EXCEPTION(msg) \
Exception<std::logic_error>::throw_it(__FILE__, __LINE__, msg)
//! Out of range exception
#define RANGE_EXCEPTION(msg) \
Exception<std::range_error>::throw_it(__FILE__, __LINE__, msg)
////////////////////////////////////////////////////////////////////////////////
//! Implementation
// includes, system
#include <sstream>
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const char *detailed) {
std::stringstream s;
// Quiet heavy-weight but exceptions are not for
// performance / release versions
s << "Exception in file '" << file << "' in line " << line << "\n"
<< "Detailed description: " << detailed << "\n";
throw Exception(s.str());
}
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const std::string &msg) {
throw_it(file, line, msg.c_str());
}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, default (private).
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception() : Std_Exception("Unknown Exception.\n") {}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, standard (private).
//! String returned by what().
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception(const std::string &s) : Std_Exception(s) {}
////////////////////////////////////////////////////////////////////////////////
//! Destructor
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::~Exception() throw() {}
// functions, exported
#endif // COMMON_EXCEPTION_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/03_SYCL_Migration_Jacobi_Iterative/dpct_output/Common/helper_cuda.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions for initialization and error checking
#ifndef COMMON_HELPER_CUDA_H_
#define COMMON_HELPER_CUDA_H_
#pragma once
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <helper_string.h>
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// Note, it is required that your SDK sample to include the proper header
// files, please refer the CUDA examples for examples of the needed CUDA
// headers, which may change depending on which CUDA functions are used.
// CUDA Runtime error messages
#ifdef __DPCT_HPP__
static const char *_cudaGetErrorEnum(dpct::err0 error) {
/*
DPCT1009:16: SYCL uses exceptions to report errors and does not use the error
codes. The original code was commented out and a warning string was inserted.
You need to rewrite this code.
*/
return "cudaGetErrorName is not supported" /*cudaGetErrorName(error)*/;
}
#endif
#ifdef CUDA_DRIVER_API
// CUDA Driver API errors
static const char *_cudaGetErrorEnum(CUresult error) {
static char unknown[] = "<unknown>";
const char *ret = NULL;
cuGetErrorName(error, &ret);
return ret ? ret : unknown;
}
#endif
#ifdef CUBLAS_API_H_
// cuBLAS API errors
static const char *_cudaGetErrorEnum(cublasStatus_t error) {
switch (error) {
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
case CUBLAS_STATUS_NOT_SUPPORTED:
return "CUBLAS_STATUS_NOT_SUPPORTED";
case CUBLAS_STATUS_LICENSE_ERROR:
return "CUBLAS_STATUS_LICENSE_ERROR";
}
return "<unknown>";
}
#endif
#ifdef _CUFFT_H_
// cuFFT API errors
static const char *_cudaGetErrorEnum(cufftResult error) {
switch (error) {
case CUFFT_SUCCESS:
return "CUFFT_SUCCESS";
case CUFFT_INVALID_PLAN:
return "CUFFT_INVALID_PLAN";
case CUFFT_ALLOC_FAILED:
return "CUFFT_ALLOC_FAILED";
case CUFFT_INVALID_TYPE:
return "CUFFT_INVALID_TYPE";
case CUFFT_INVALID_VALUE:
return "CUFFT_INVALID_VALUE";
case CUFFT_INTERNAL_ERROR:
return "CUFFT_INTERNAL_ERROR";
case CUFFT_EXEC_FAILED:
return "CUFFT_EXEC_FAILED";
case CUFFT_SETUP_FAILED:
return "CUFFT_SETUP_FAILED";
case CUFFT_INVALID_SIZE:
return "CUFFT_INVALID_SIZE";
case CUFFT_UNALIGNED_DATA:
return "CUFFT_UNALIGNED_DATA";
case CUFFT_INCOMPLETE_PARAMETER_LIST:
return "CUFFT_INCOMPLETE_PARAMETER_LIST";
case CUFFT_INVALID_DEVICE:
return "CUFFT_INVALID_DEVICE";
case CUFFT_PARSE_ERROR:
return "CUFFT_PARSE_ERROR";
case CUFFT_NO_WORKSPACE:
return "CUFFT_NO_WORKSPACE";
case CUFFT_NOT_IMPLEMENTED:
return "CUFFT_NOT_IMPLEMENTED";
case CUFFT_LICENSE_ERROR:
return "CUFFT_LICENSE_ERROR";
case CUFFT_NOT_SUPPORTED:
return "CUFFT_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSPARSEAPI
// cuSPARSE API errors
static const char *_cudaGetErrorEnum(cusparseStatus_t error) {
switch (error) {
case CUSPARSE_STATUS_SUCCESS:
return "CUSPARSE_STATUS_SUCCESS";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "CUSPARSE_STATUS_NOT_INITIALIZED";
case CUSPARSE_STATUS_ALLOC_FAILED:
return "CUSPARSE_STATUS_ALLOC_FAILED";
case CUSPARSE_STATUS_INVALID_VALUE:
return "CUSPARSE_STATUS_INVALID_VALUE";
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "CUSPARSE_STATUS_ARCH_MISMATCH";
case CUSPARSE_STATUS_MAPPING_ERROR:
return "CUSPARSE_STATUS_MAPPING_ERROR";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "CUSPARSE_STATUS_EXECUTION_FAILED";
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "CUSPARSE_STATUS_INTERNAL_ERROR";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSOLVER_COMMON_H_
// cuSOLVER API errors
static const char *_cudaGetErrorEnum(cusolverStatus_t error) {
switch (error) {
case CUSOLVER_STATUS_SUCCESS:
return "CUSOLVER_STATUS_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED:
return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED:
return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE:
return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH:
return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_MAPPING_ERROR:
return "CUSOLVER_STATUS_MAPPING_ERROR";
case CUSOLVER_STATUS_EXECUTION_FAILED:
return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR:
return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case CUSOLVER_STATUS_NOT_SUPPORTED:
return "CUSOLVER_STATUS_NOT_SUPPORTED ";
case CUSOLVER_STATUS_ZERO_PIVOT:
return "CUSOLVER_STATUS_ZERO_PIVOT";
case CUSOLVER_STATUS_INVALID_LICENSE:
return "CUSOLVER_STATUS_INVALID_LICENSE";
}
return "<unknown>";
}
#endif
#ifdef CURAND_H_
// cuRAND API errors
static const char *_cudaGetErrorEnum(int error) {
switch (error) {
case 0:
return "CURAND_STATUS_SUCCESS";
case 100:
return "CURAND_STATUS_VERSION_MISMATCH";
case 101:
return "CURAND_STATUS_NOT_INITIALIZED";
case 102:
return "CURAND_STATUS_ALLOCATION_FAILED";
case 103:
return "CURAND_STATUS_TYPE_ERROR";
case 104:
return "CURAND_STATUS_OUT_OF_RANGE";
case 105:
return "CURAND_STATUS_LENGTH_NOT_MULTIPLE";
case 106:
return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case 201:
return "CURAND_STATUS_LAUNCH_FAILURE";
case 202:
return "CURAND_STATUS_PREEXISTING_FAILURE";
case 203:
return "CURAND_STATUS_INITIALIZATION_FAILED";
case 204:
return "CURAND_STATUS_ARCH_MISMATCH";
case 999:
return "CURAND_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NVJPEGAPI
// nvJPEG API errors
static const char *_cudaGetErrorEnum(nvjpegStatus_t error) {
switch (error) {
case NVJPEG_STATUS_SUCCESS:
return "NVJPEG_STATUS_SUCCESS";
case NVJPEG_STATUS_NOT_INITIALIZED:
return "NVJPEG_STATUS_NOT_INITIALIZED";
case NVJPEG_STATUS_INVALID_PARAMETER:
return "NVJPEG_STATUS_INVALID_PARAMETER";
case NVJPEG_STATUS_BAD_JPEG:
return "NVJPEG_STATUS_BAD_JPEG";
case NVJPEG_STATUS_JPEG_NOT_SUPPORTED:
return "NVJPEG_STATUS_JPEG_NOT_SUPPORTED";
case NVJPEG_STATUS_ALLOCATOR_FAILURE:
return "NVJPEG_STATUS_ALLOCATOR_FAILURE";
case NVJPEG_STATUS_EXECUTION_FAILED:
return "NVJPEG_STATUS_EXECUTION_FAILED";
case NVJPEG_STATUS_ARCH_MISMATCH:
return "NVJPEG_STATUS_ARCH_MISMATCH";
case NVJPEG_STATUS_INTERNAL_ERROR:
return "NVJPEG_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NV_NPPIDEFS_H
// NPP API errors
static const char *_cudaGetErrorEnum(NppStatus error) {
switch (error) {
case NPP_NOT_SUPPORTED_MODE_ERROR:
return "NPP_NOT_SUPPORTED_MODE_ERROR";
case NPP_ROUND_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ROUND_MODE_NOT_SUPPORTED_ERROR";
case NPP_RESIZE_NO_OPERATION_ERROR:
return "NPP_RESIZE_NO_OPERATION_ERROR";
case NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY:
return "NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_BAD_ARG_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFF_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECT_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUAD_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEM_ALLOC_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTO_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_INPUT:
return "NPP_INVALID_INPUT";
case NPP_POINTER_ERROR:
return "NPP_POINTER_ERROR";
case NPP_WARNING:
return "NPP_WARNING";
case NPP_ODD_ROI_WARNING:
return "NPP_ODD_ROI_WARNING";
#else
// These are for CUDA 5.5 or higher
case NPP_BAD_ARGUMENT_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFFICIENT_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECTANGLE_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUADRANGLE_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEMORY_ALLOCATION_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_HOST_POINTER_ERROR:
return "NPP_INVALID_HOST_POINTER_ERROR";
case NPP_INVALID_DEVICE_POINTER_ERROR:
return "NPP_INVALID_DEVICE_POINTER_ERROR";
#endif
case NPP_LUT_NUMBER_OF_LEVELS_ERROR:
return "NPP_LUT_NUMBER_OF_LEVELS_ERROR";
case NPP_TEXTURE_BIND_ERROR:
return "NPP_TEXTURE_BIND_ERROR";
case NPP_WRONG_INTERSECTION_ROI_ERROR:
return "NPP_WRONG_INTERSECTION_ROI_ERROR";
case NPP_NOT_EVEN_STEP_ERROR:
return "NPP_NOT_EVEN_STEP_ERROR";
case NPP_INTERPOLATION_ERROR:
return "NPP_INTERPOLATION_ERROR";
case NPP_RESIZE_FACTOR_ERROR:
return "NPP_RESIZE_FACTOR_ERROR";
case NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR:
return "NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_MEMFREE_ERR:
return "NPP_MEMFREE_ERR";
case NPP_MEMSET_ERR:
return "NPP_MEMSET_ERR";
case NPP_MEMCPY_ERR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERR:
return "NPP_MIRROR_FLIP_ERR";
#else
case NPP_MEMFREE_ERROR:
return "NPP_MEMFREE_ERROR";
case NPP_MEMSET_ERROR:
return "NPP_MEMSET_ERROR";
case NPP_MEMCPY_ERROR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERROR:
return "NPP_MIRROR_FLIP_ERROR";
#endif
case NPP_ALIGNMENT_ERROR:
return "NPP_ALIGNMENT_ERROR";
case NPP_STEP_ERROR:
return "NPP_STEP_ERROR";
case NPP_SIZE_ERROR:
return "NPP_SIZE_ERROR";
case NPP_NULL_POINTER_ERROR:
return "NPP_NULL_POINTER_ERROR";
case NPP_CUDA_KERNEL_EXECUTION_ERROR:
return "NPP_CUDA_KERNEL_EXECUTION_ERROR";
case NPP_NOT_IMPLEMENTED_ERROR:
return "NPP_NOT_IMPLEMENTED_ERROR";
case NPP_ERROR:
return "NPP_ERROR";
case NPP_SUCCESS:
return "NPP_SUCCESS";
case NPP_WRONG_INTERSECTION_QUAD_WARNING:
return "NPP_WRONG_INTERSECTION_QUAD_WARNING";
case NPP_MISALIGNED_DST_ROI_WARNING:
return "NPP_MISALIGNED_DST_ROI_WARNING";
case NPP_AFFINE_QUAD_INCORRECT_WARNING:
return "NPP_AFFINE_QUAD_INCORRECT_WARNING";
case NPP_DOUBLE_SIZE_WARNING:
return "NPP_DOUBLE_SIZE_WARNING";
case NPP_WRONG_INTERSECTION_ROI_WARNING:
return "NPP_WRONG_INTERSECTION_ROI_WARNING";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x6000
/* These are 6.0 or higher */
case NPP_LUT_PALETTE_BITSIZE_ERROR:
return "NPP_LUT_PALETTE_BITSIZE_ERROR";
case NPP_ZC_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ZC_MODE_NOT_SUPPORTED_ERROR";
case NPP_QUALITY_INDEX_ERROR:
return "NPP_QUALITY_INDEX_ERROR";
case NPP_CHANNEL_ORDER_ERROR:
return "NPP_CHANNEL_ORDER_ERROR";
case NPP_ZERO_MASK_VALUE_ERROR:
return "NPP_ZERO_MASK_VALUE_ERROR";
case NPP_NUMBER_OF_CHANNELS_ERROR:
return "NPP_NUMBER_OF_CHANNELS_ERROR";
case NPP_COI_ERROR:
return "NPP_COI_ERROR";
case NPP_DIVISOR_ERROR:
return "NPP_DIVISOR_ERROR";
case NPP_CHANNEL_ERROR:
return "NPP_CHANNEL_ERROR";
case NPP_STRIDE_ERROR:
return "NPP_STRIDE_ERROR";
case NPP_ANCHOR_ERROR:
return "NPP_ANCHOR_ERROR";
case NPP_MASK_SIZE_ERROR:
return "NPP_MASK_SIZE_ERROR";
case NPP_MOMENT_00_ZERO_ERROR:
return "NPP_MOMENT_00_ZERO_ERROR";
case NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR:
return "NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR";
case NPP_THRESHOLD_ERROR:
return "NPP_THRESHOLD_ERROR";
case NPP_CONTEXT_MATCH_ERROR:
return "NPP_CONTEXT_MATCH_ERROR";
case NPP_FFT_FLAG_ERROR:
return "NPP_FFT_FLAG_ERROR";
case NPP_FFT_ORDER_ERROR:
return "NPP_FFT_ORDER_ERROR";
case NPP_SCALE_RANGE_ERROR:
return "NPP_SCALE_RANGE_ERROR";
case NPP_DATA_TYPE_ERROR:
return "NPP_DATA_TYPE_ERROR";
case NPP_OUT_OFF_RANGE_ERROR:
return "NPP_OUT_OFF_RANGE_ERROR";
case NPP_DIVIDE_BY_ZERO_ERROR:
return "NPP_DIVIDE_BY_ZERO_ERROR";
case NPP_RANGE_ERROR:
return "NPP_RANGE_ERROR";
case NPP_NO_MEMORY_ERROR:
return "NPP_NO_MEMORY_ERROR";
case NPP_ERROR_RESERVED:
return "NPP_ERROR_RESERVED";
case NPP_NO_OPERATION_WARNING:
return "NPP_NO_OPERATION_WARNING";
case NPP_DIVIDE_BY_ZERO_WARNING:
return "NPP_DIVIDE_BY_ZERO_WARNING";
#endif
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x7000
/* These are 7.0 or higher */
case NPP_OVERFLOW_ERROR:
return "NPP_OVERFLOW_ERROR";
case NPP_CORRUPTED_DATA_ERROR:
return "NPP_CORRUPTED_DATA_ERROR";
#endif
}
return "<unknown>";
}
#endif
template <typename T>
void check(T result, char const *const func, const char *const file,
int const line) {
}
#ifdef __DPCT_HPP__
// This will output the proper CUDA error strings in the event
// that a CUDA host call returns an error
#define checkCudaErrors(val) check((val), #val, __FILE__, __LINE__)
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError(msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:17: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
// This will only print the proper error string when calling cudaGetLastError
// but not exit program incase error detected.
#define printLastCudaError(msg) __printLastCudaError(msg, __FILE__, __LINE__)
inline void __printLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:19: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
#endif
#ifndef MAX
#define MAX(a, b) (a > b ? a : b)
#endif
// Float To Int conversion
inline int ftoi(float value) {
return (value >= 0 ? static_cast<int>(value + 0.5)
: static_cast<int>(value - 0.5));
}
// Beginning of GPU Architecture definitions
inline int _ConvertSMVer2Cores(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the # of cores per SM
typedef struct dpct_type_313818 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{0x30, 192},
{0x32, 192},
{0x35, 192},
{0x37, 192},
{0x50, 128},
{0x52, 128},
{0x53, 128},
{0x60, 64},
{0x61, 128},
{0x62, 128},
{0x70, 64},
{0x72, 64},
{0x75, 64},
{0x80, 64},
{0x86, 128},
{0x87, 128},
{0x89, 128},
{0x90, 128},
{-1, -1}};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoCores for SM %d.%d is undefined."
" Default to use %d Cores/SM\n",
major, minor, nGpuArchCoresPerSM[index - 1].Cores);
return nGpuArchCoresPerSM[index - 1].Cores;
}
inline const char* _ConvertSMVer2ArchName(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the GPU Arch name)
typedef struct dpct_type_739564 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
const char* name;
} sSMtoArchName;
sSMtoArchName nGpuArchNameSM[] = {
{0x30, "Kepler"},
{0x32, "Kepler"},
{0x35, "Kepler"},
{0x37, "Kepler"},
{0x50, "Maxwell"},
{0x52, "Maxwell"},
{0x53, "Maxwell"},
{0x60, "Pascal"},
{0x61, "Pascal"},
{0x62, "Pascal"},
{0x70, "Volta"},
{0x72, "Xavier"},
{0x75, "Turing"},
{0x80, "Ampere"},
{0x86, "Ampere"},
{0x87, "Ampere"},
{0x89, "Ada"},
{0x90, "Hopper"},
{-1, "Graphics Device"}};
int index = 0;
while (nGpuArchNameSM[index].SM != -1) {
if (nGpuArchNameSM[index].SM == ((major << 4) + minor)) {
return nGpuArchNameSM[index].name;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoArchName for SM %d.%d is undefined."
" Default to use %s\n",
major, minor, nGpuArchNameSM[index - 1].name);
return nGpuArchNameSM[index - 1].name;
}
// end of GPU Architecture definitions
#ifdef __DPCT_HPP__
// General GPU Device CUDA Initialization
inline int gpuDeviceInit(int devID) {
int device_count;
/*
DPCT1003:21: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((device_count = dpct::dev_mgr::instance().device_count(), 0));
if (device_count == 0) {
fprintf(stderr,
"gpuDeviceInit() CUDA error: "
"no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
if (devID < 0) {
devID = 0;
}
if (devID > device_count - 1) {
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n",
device_count);
fprintf(stderr,
">> gpuDeviceInit (-device=%d) is not a valid"
" GPU device. <<\n",
devID);
fprintf(stderr, "\n");
return -devID;
}
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:22: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
checkCudaErrors((computeMode = 1, 0));
checkCudaErrors(
(major = dpct::dev_mgr::instance().get_device(devID).get_major_version(),
0));
checkCudaErrors(
(minor = dpct::dev_mgr::instance().get_device(devID).get_minor_version(),
0));
/*
DPCT1035:23: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
if (computeMode == 0) {
fprintf(stderr,
"Error: device is running in <Compute Mode "
"Prohibited>, no threads can use cudaSetDevice().\n");
return -1;
}
if (major < 1) {
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(EXIT_FAILURE);
}
/*
DPCT1093:24: The "devID" device may be not the one intended for use. Adjust
the selected device if needed.
*/
/*
DPCT1003:25: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((dpct::select_device(devID), 0));
printf("gpuDeviceInit() CUDA Device [%d]: \"%s\n", devID, _ConvertSMVer2ArchName(major, minor));
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
inline int gpuGetMaxGflopsDeviceId() try {
int current_device = 0, sm_per_multiproc = 0;
int max_perf_device = 0;
int device_count = 0;
int devices_prohibited = 0;
uint64_t max_compute_perf = 0;
/*
DPCT1003:26: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((device_count = dpct::dev_mgr::instance().device_count(), 0));
if (device_count == 0) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the best CUDA capable GPU device
current_device = 0;
while (current_device < device_count) {
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:27: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors((computeMode = 1, 0));
checkCudaErrors((major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version(),
0));
checkCudaErrors((minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version(),
0));
// If this GPU is not running on Compute Mode prohibited,
// then we can add it to the list
/*
DPCT1035:28: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (computeMode != 0) {
if (major == 9999 && minor == 9999) {
sm_per_multiproc = 1;
} else {
sm_per_multiproc =
_ConvertSMVer2Cores(major, minor);
}
int multiProcessorCount = 0, clockRate = 0;
checkCudaErrors((multiProcessorCount = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_compute_units(),
0));
dpct::err0 result = (clockRate = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_clock_frequency(),
0);
uint64_t compute_perf = (uint64_t)multiProcessorCount * sm_per_multiproc * clockRate;
if (compute_perf > max_compute_perf) {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
} else {
devices_prohibited++;
}
++current_device;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" all devices have compute mode prohibited.\n");
exit(EXIT_FAILURE);
}
return max_perf_device;
}
catch (sycl::exception const &exc) {
std::cerr << exc.what() << "Exception caught at file:" << __FILE__
<< ", line:" << __LINE__ << std::endl;
std::exit(1);
}
// Initialization code to find the best CUDA Device
inline int findCudaDevice(int argc, const char **argv) {
int devID = 0;
// If the command-line has a device number specified, use it
if (checkCmdLineFlag(argc, argv, "device")) {
devID = getCmdLineArgumentInt(argc, argv, "device=");
if (devID < 0) {
printf("Invalid command line parameter\n ");
exit(EXIT_FAILURE);
} else {
devID = gpuDeviceInit(devID);
if (devID < 0) {
printf("exiting...\n");
exit(EXIT_FAILURE);
}
}
} else {
// Otherwise pick the device with highest Gflops/s
devID = gpuGetMaxGflopsDeviceId();
/*
DPCT1093:29: The "devID" device may be not the one intended for use. Adjust
the selected device if needed.
*/
/*
DPCT1003:30: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors((dpct::select_device(devID), 0));
int major = 0, minor = 0;
checkCudaErrors((
major = dpct::dev_mgr::instance().get_device(devID).get_major_version(),
0));
checkCudaErrors((
minor = dpct::dev_mgr::instance().get_device(devID).get_minor_version(),
0));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
devID, _ConvertSMVer2ArchName(major, minor), major, minor);
}
return devID;
}
inline int findIntegratedGPU() {
int current_device = 0;
int device_count = 0;
int devices_prohibited = 0;
/*
DPCT1003:31: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((device_count = dpct::dev_mgr::instance().device_count(), 0));
if (device_count == 0) {
fprintf(stderr, "CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the integrated GPU which is compute capable
while (current_device < device_count) {
int computeMode = -1, integrated = -1;
/*
DPCT1035:32: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors((computeMode = 1, 0));
checkCudaErrors((integrated = dpct::dev_mgr::instance()
.get_device(current_device)
.get_integrated(),
0));
// If GPU is integrated and is not running on Compute Mode prohibited,
// then cuda can map to GLES resource
/*
DPCT1035:33: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (integrated && (computeMode != 0)) {
/*
DPCT1093:34: The "current_device" device may be not the one intended for
use. Adjust the selected device if needed.
*/
/*
DPCT1003:35: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors((dpct::select_device(current_device), 0));
int major = 0, minor = 0;
checkCudaErrors((major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version(),
0));
checkCudaErrors((minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version(),
0));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
current_device, _ConvertSMVer2ArchName(major, minor), major, minor);
return current_device;
} else {
devices_prohibited++;
}
current_device++;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"CUDA error:"
" No GLES-CUDA Interop capable GPU found.\n");
exit(EXIT_FAILURE);
}
return -1;
}
// General check for CUDA GPU SM Capabilities
inline bool checkCudaCapabilities(int major_version, int minor_version) {
int dev;
int major = 0, minor = 0;
checkCudaErrors(dev = dpct::dev_mgr::instance().current_device_id());
checkCudaErrors(
(major = dpct::dev_mgr::instance().get_device(dev).get_major_version(),
0));
checkCudaErrors(
(minor = dpct::dev_mgr::instance().get_device(dev).get_minor_version(),
0));
if ((major > major_version) ||
(major == major_version &&
minor >= minor_version)) {
printf(" Device %d: <%16s >, Compute SM %d.%d detected\n", dev,
_ConvertSMVer2ArchName(major, minor), major, minor);
return true;
} else {
printf(
" No GPU device was found that can support "
"CUDA compute capability %d.%d.\n",
major_version, minor_version);
return false;
}
}
#endif
// end of CUDA Helper Functions
#endif // COMMON_HELPER_CUDA_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/03_SYCL_Migration_Jacobi_Iterative/dpct_output/Samples/3_CUDA_Features/jacobiCudaGraphs/jacobi.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef JACOBI_H
#define JACOBI_H
#define N_ROWS 512
#endif | h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/03_SYCL_Migration_Jacobi_Iterative/dpct_output/Samples/3_CUDA_Features/jacobiCudaGraphs/jacobi.dp.cpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include <helper_cuda.h>
#include <vector>
#include "jacobi.h"
// 8 Rows of square-matrix A processed by each CTA.
// This can be max 32 and only power of 2 (i.e., 2/4/8/16/32).
#define ROWS_PER_CTA 8
#if !defined(DPCT_COMPATIBILITY_TEMP) || DPCT_COMPATIBILITY_TEMP >= 600
#else
__device__ double atomicAdd(double *address, double val) {
unsigned long long int *address_as_ull = (unsigned long long int *)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN !=
// NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
static void JacobiMethod(const float *A, const double *b,
const float conv_threshold, double *x,
double *x_new, double *sum,
const sycl::nd_item<3> &item_ct1,
double *x_shared, double *b_shared) {
// Handle to thread block group
auto cta = item_ct1.get_group();
// N_ROWS == n
for (int i = item_ct1.get_local_id(2); i < N_ROWS;
i += item_ct1.get_local_range(2)) {
x_shared[i] = x[i];
}
if (item_ct1.get_local_id(2) < ROWS_PER_CTA) {
int k = item_ct1.get_local_id(2);
for (int i = k + (item_ct1.get_group(2) * ROWS_PER_CTA);
(k < ROWS_PER_CTA) && (i < N_ROWS);
k += ROWS_PER_CTA, i += ROWS_PER_CTA) {
b_shared[i % (ROWS_PER_CTA + 1)] = b[i];
}
}
/*
DPCT1065:0: Consider replacing sycl::nd_item::barrier() with
sycl::nd_item::barrier(sycl::access::fence_space::local_space) for better
performance if there is no access to global memory.
*/
item_ct1.barrier();
sycl::sub_group tile32 = item_ct1.get_sub_group();
for (int k = 0, i = item_ct1.get_group(2) * ROWS_PER_CTA;
(k < ROWS_PER_CTA) && (i < N_ROWS); k++, i++) {
double rowThreadSum = 0.0;
for (int j = item_ct1.get_local_id(2); j < N_ROWS;
j += item_ct1.get_local_range(2)) {
rowThreadSum += (A[i * N_ROWS + j] * x_shared[j]);
}
for (int offset = item_ct1.get_sub_group().get_local_linear_range() / 2;
offset > 0; offset /= 2) {
rowThreadSum += tile32.shuffle_down(rowThreadSum, offset);
}
if (item_ct1.get_sub_group().get_local_linear_id() == 0) {
dpct::atomic_fetch_add<sycl::access::address_space::generic_space>(
&b_shared[i % (ROWS_PER_CTA + 1)], -rowThreadSum);
}
}
/*
DPCT1065:1: Consider replacing sycl::nd_item::barrier() with
sycl::nd_item::barrier(sycl::access::fence_space::local_space) for better
performance if there is no access to global memory.
*/
item_ct1.barrier();
if (item_ct1.get_local_id(2) < ROWS_PER_CTA) {
dpct::experimental::logical_group tile8 = dpct::experimental::logical_group(
item_ct1, item_ct1.get_group(), ROWS_PER_CTA);
double temp_sum = 0.0;
int k = item_ct1.get_local_id(2);
for (int i = k + (item_ct1.get_group(2) * ROWS_PER_CTA);
(k < ROWS_PER_CTA) && (i < N_ROWS);
k += ROWS_PER_CTA, i += ROWS_PER_CTA) {
double dx = b_shared[i % (ROWS_PER_CTA + 1)];
dx /= A[i * N_ROWS + i];
x_new[i] = (x_shared[i] + dx);
temp_sum += sycl::fabs(dx);
}
for (int offset = tile8.get_local_linear_range() / 2; offset > 0;
offset /= 2) {
temp_sum += dpct::shift_sub_group_left(item_ct1.get_sub_group(), temp_sum,
offset, 8);
}
if (tile8.get_local_linear_id() == 0) {
dpct::atomic_fetch_add<sycl::access::address_space::generic_space>(
sum, temp_sum);
}
}
}
// Thread block size for finalError kernel should be multiple of 32
static void finalError(double *x, double *g_sum,
const sycl::nd_item<3> &item_ct1, uint8_t *dpct_local) {
// Handle to thread block group
auto cta = item_ct1.get_group();
auto warpSum = (double *)dpct_local;
double sum = 0.0;
int globalThreadId = item_ct1.get_group(2) * item_ct1.get_local_range(2) +
item_ct1.get_local_id(2);
for (int i = globalThreadId; i < N_ROWS;
i += item_ct1.get_local_range(2) * item_ct1.get_group_range(2)) {
double d = x[i] - 1.0;
sum += sycl::fabs(d);
}
sycl::sub_group tile32 = item_ct1.get_sub_group();
for (int offset = item_ct1.get_sub_group().get_local_linear_range() / 2;
offset > 0; offset /= 2) {
sum += tile32.shuffle_down(sum, offset);
}
if (item_ct1.get_sub_group().get_local_linear_id() == 0) {
warpSum[item_ct1.get_local_id(2) /
item_ct1.get_sub_group().get_local_range().get(0)] = sum;
}
/*
DPCT1065:2: Consider replacing sycl::nd_item::barrier() with
sycl::nd_item::barrier(sycl::access::fence_space::local_space) for better
performance if there is no access to global memory.
*/
item_ct1.barrier();
double blockSum = 0.0;
if (item_ct1.get_local_id(2) <
(item_ct1.get_local_range(2) /
item_ct1.get_sub_group().get_local_range().get(0))) {
blockSum = warpSum[item_ct1.get_local_id(2)];
}
if (item_ct1.get_local_id(2) < 32) {
for (int offset = item_ct1.get_sub_group().get_local_linear_range() / 2;
offset > 0; offset /= 2) {
blockSum += tile32.shuffle_down(blockSum, offset);
}
if (item_ct1.get_sub_group().get_local_linear_id() == 0) {
dpct::atomic_fetch_add<sycl::access::address_space::generic_space>(
g_sum, blockSum);
}
}
}
double JacobiMethodGpuCudaGraphExecKernelSetParams(
const float *A, const double *b, const float conv_threshold,
const int max_iter, double *x, double *x_new, dpct::queue_ptr stream) {
// CTA size
sycl::range<3> nthreads(1, 1, 256);
// grid size
sycl::range<3> nblocks(1, 1, (N_ROWS / ROWS_PER_CTA) + 2);
cudaGraph_t graph;
cudaGraphExec_t graphExec = NULL;
double sum = 0.0;
double *d_sum = NULL;
/*
DPCT1003:36: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors(
(d_sum = sycl::malloc_device<double>(1, dpct::get_default_queue()), 0));
std::vector<cudaGraphNode_t> nodeDependencies;
cudaGraphNode_t memcpyNode, jacobiKernelNode, memsetNode;
dpct::pitched_data memcpyParams_from_data_ct1, memcpyParams_to_data_ct1;
sycl::id<3> memcpyParams_from_pos_ct1(0, 0, 0),
memcpyParams_to_pos_ct1(0, 0, 0);
sycl::range<3> memcpyParams_size_ct1(1, 1, 1);
dpct::memcpy_direction memcpyParams_direction_ct1;
cudaMemsetParams memsetParams = {0};
memsetParams.dst = (void *)d_sum;
memsetParams.value = 0;
memsetParams.pitch = 0;
// elementSize can be max 4 bytes, so we take sizeof(float) and width=2
memsetParams.elementSize = sizeof(float);
memsetParams.width = 2;
memsetParams.height = 1;
/*
DPCT1007:37: Migration of cudaGraphCreate is not supported.
*/
checkCudaErrors(cudaGraphCreate(&graph, 0));
/*
DPCT1007:38: Migration of cudaGraphAddMemsetNode is not supported.
*/
checkCudaErrors(
cudaGraphAddMemsetNode(&memsetNode, graph, NULL, 0, &memsetParams));
nodeDependencies.push_back(memsetNode);
/*
DPCT1082:39: Migration of cudaKernelNodeParams type is not supported.
*/
cudaKernelNodeParams NodeParams0, NodeParams1;
NodeParams0.func = (void *)JacobiMethod;
NodeParams0.gridDim = nblocks;
NodeParams0.blockDim = nthreads;
NodeParams0.sharedMemBytes = 0;
void *kernelArgs0[6] = {(void *)&A, (void *)&b, (void *)&conv_threshold,
(void *)&x, (void *)&x_new, (void *)&d_sum};
NodeParams0.kernelParams = kernelArgs0;
NodeParams0.extra = NULL;
/*
DPCT1007:40: Migration of cudaGraphAddKernelNode is not supported.
*/
checkCudaErrors(
cudaGraphAddKernelNode(&jacobiKernelNode, graph, nodeDependencies.data(),
nodeDependencies.size(), &NodeParams0));
nodeDependencies.clear();
nodeDependencies.push_back(jacobiKernelNode);
memcpyParams_from_data_ct1 = NULL->to_pitched_data();
memcpyParams_from_pos_ct1 = sycl::id<3>(0, 0, 0);
memcpyParams_from_data_ct1 = dpct::pitched_data(d_sum, sizeof(double), 1, 1);
memcpyParams_to_data_ct1 = NULL->to_pitched_data();
memcpyParams_to_pos_ct1 = sycl::id<3>(0, 0, 0);
memcpyParams_to_data_ct1 = dpct::pitched_data(&sum, sizeof(double), 1, 1);
memcpyParams_size_ct1 = sycl::range<3>(sizeof(double), 1, 1);
memcpyParams_direction_ct1 = dpct::device_to_host;
/*
DPCT1007:41: Migration of cudaGraphAddMemcpyNode is not supported.
*/
checkCudaErrors(
cudaGraphAddMemcpyNode(&memcpyNode, graph, nodeDependencies.data(),
nodeDependencies.size(), &memcpyParams));
/*
DPCT1007:42: Migration of cudaGraphInstantiate is not supported.
*/
checkCudaErrors(cudaGraphInstantiate(&graphExec, graph, NULL, NULL, 0));
NodeParams1.func = (void *)JacobiMethod;
NodeParams1.gridDim = nblocks;
NodeParams1.blockDim = nthreads;
NodeParams1.sharedMemBytes = 0;
void *kernelArgs1[6] = {(void *)&A, (void *)&b, (void *)&conv_threshold,
(void *)&x_new, (void *)&x, (void *)&d_sum};
NodeParams1.kernelParams = kernelArgs1;
NodeParams1.extra = NULL;
int k = 0;
for (k = 0; k < max_iter; k++) {
/*
DPCT1007:43: Migration of cudaGraphExecKernelNodeSetParams is not supported.
*/
checkCudaErrors(cudaGraphExecKernelNodeSetParams(
graphExec, jacobiKernelNode,
((k & 1) == 0) ? &NodeParams0 : &NodeParams1));
/*
DPCT1007:44: Migration of cudaGraphLaunch is not supported.
*/
checkCudaErrors(cudaGraphLaunch(graphExec, stream));
/*
DPCT1003:45: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors((stream->wait(), 0));
if (sum <= conv_threshold) {
/*
DPCT1003:46: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors((stream->memset(d_sum, 0, sizeof(double)), 0));
nblocks[2] = (N_ROWS / nthreads[2]) + 1;
/*
DPCT1083:4: The size of local memory in the migrated code may be different
from the original code. Check that the allocated memory size in the
migrated code is correct.
*/
size_t sharedMemSize = ((nthreads[2] / 32) + 1) * sizeof(double);
if ((k & 1) == 0) {
/*
DPCT1049:3: The work-group size passed to the SYCL kernel may exceed the
limit. To get the device limit, query info::device::max_work_group_size.
Adjust the work-group size if needed.
*/
stream->submit([&](sycl::handler &cgh) {
sycl::local_accessor<uint8_t, 1> dpct_local_acc_ct1(
sycl::range<1>(sharedMemSize), cgh);
cgh.parallel_for(sycl::nd_range<3>(nblocks * nthreads, nthreads),
[=](sycl::nd_item<3> item_ct1)
[[intel::reqd_sub_group_size(32)]] {
finalError(x_new, d_sum, item_ct1,
dpct_local_acc_ct1.get_pointer());
});
});
} else {
/*
DPCT1049:5: The work-group size passed to the SYCL kernel may exceed the
limit. To get the device limit, query info::device::max_work_group_size.
Adjust the work-group size if needed.
*/
stream->submit([&](sycl::handler &cgh) {
sycl::local_accessor<uint8_t, 1> dpct_local_acc_ct1(
sycl::range<1>(sharedMemSize), cgh);
cgh.parallel_for(sycl::nd_range<3>(nblocks * nthreads, nthreads),
[=](sycl::nd_item<3> item_ct1)
[[intel::reqd_sub_group_size(32)]] {
finalError(x, d_sum, item_ct1,
dpct_local_acc_ct1.get_pointer());
});
});
}
/*
DPCT1003:47: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors((stream->memcpy(&sum, d_sum, sizeof(double)), 0));
/*
DPCT1003:48: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors((stream->wait(), 0));
printf("GPU iterations : %d\n", k + 1);
printf("GPU error : %.3e\n", sum);
break;
}
}
/*
DPCT1003:49: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((sycl::free(d_sum, dpct::get_default_queue()), 0));
return sum;
}
double JacobiMethodGpuCudaGraphExecUpdate(const float *A, const double *b,
const float conv_threshold,
const int max_iter, double *x,
double *x_new,
dpct::queue_ptr stream) {
// CTA size
sycl::range<3> nthreads(1, 1, 256);
// grid size
sycl::range<3> nblocks(1, 1, (N_ROWS / ROWS_PER_CTA) + 2);
cudaGraph_t graph;
cudaGraphExec_t graphExec = NULL;
double sum = 0.0;
double *d_sum;
/*
DPCT1003:50: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors(
(d_sum = sycl::malloc_device<double>(1, dpct::get_default_queue()), 0));
int k = 0;
for (k = 0; k < max_iter; k++) {
/*
DPCT1027:51: The call to cudaStreamBeginCapture was replaced with 0 because
SYCL currently does not support capture operations on queues.
*/
checkCudaErrors(0);
/*
DPCT1003:52: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors((stream->memset(d_sum, 0, sizeof(double)), 0));
if ((k & 1) == 0) {
/*
DPCT1049:6: The work-group size passed to the SYCL kernel may exceed the
limit. To get the device limit, query info::device::max_work_group_size.
Adjust the work-group size if needed.
*/
stream->submit([&](sycl::handler &cgh) {
/*
DPCT1101:91: 'N_ROWS' expression was replaced with a value. Modify the
code to use the original expression, provided in comments, if it is
correct.
*/
sycl::local_accessor<double, 1> x_shared_acc_ct1(
sycl::range<1>(512 /*N_ROWS*/), cgh);
/*
DPCT1101:92: 'ROWS_PER_CTA + 1' expression was replaced with a value.
Modify the code to use the original expression, provided in comments,
if it is correct.
*/
sycl::local_accessor<double, 1> b_shared_acc_ct1(
sycl::range<1>(9 /*ROWS_PER_CTA + 1*/), cgh);
cgh.parallel_for(
sycl::nd_range<3>(nblocks * nthreads, nthreads),
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
JacobiMethod(A, b, conv_threshold, x, x_new, d_sum, item_ct1,
x_shared_acc_ct1.get_pointer(),
b_shared_acc_ct1.get_pointer());
});
});
} else {
/*
DPCT1049:7: The work-group size passed to the SYCL kernel may exceed the
limit. To get the device limit, query info::device::max_work_group_size.
Adjust the work-group size if needed.
*/
stream->submit([&](sycl::handler &cgh) {
/*
DPCT1101:93: 'N_ROWS' expression was replaced with a value. Modify the
code to use the original expression, provided in comments, if it is
correct.
*/
sycl::local_accessor<double, 1> x_shared_acc_ct1(
sycl::range<1>(512 /*N_ROWS*/), cgh);
/*
DPCT1101:94: 'ROWS_PER_CTA + 1' expression was replaced with a value.
Modify the code to use the original expression, provided in comments,
if it is correct.
*/
sycl::local_accessor<double, 1> b_shared_acc_ct1(
sycl::range<1>(9 /*ROWS_PER_CTA + 1*/), cgh);
cgh.parallel_for(
sycl::nd_range<3>(nblocks * nthreads, nthreads),
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
JacobiMethod(A, b, conv_threshold, x_new, x, d_sum, item_ct1,
x_shared_acc_ct1.get_pointer(),
b_shared_acc_ct1.get_pointer());
});
});
}
/*
DPCT1003:53: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors((stream->memcpy(&sum, d_sum, sizeof(double)), 0));
/*
DPCT1027:54: The call to cudaStreamEndCapture was replaced with 0 because
SYCL currently does not support capture operations on queues.
*/
checkCudaErrors(0);
if (graphExec == NULL) {
/*
DPCT1007:55: Migration of cudaGraphInstantiate is not supported.
*/
checkCudaErrors(cudaGraphInstantiate(&graphExec, graph, NULL, NULL, 0));
} else {
cudaGraphExecUpdateResult updateResult_out;
/*
DPCT1007:56: Migration of cudaGraphExecUpdate is not supported.
*/
checkCudaErrors(
cudaGraphExecUpdate(graphExec, graph, NULL, &updateResult_out));
if (updateResult_out != cudaGraphExecUpdateSuccess) {
if (graphExec != NULL) {
/*
DPCT1007:57: Migration of cudaGraphExecDestroy is not supported.
*/
checkCudaErrors(cudaGraphExecDestroy(graphExec));
}
printf("k = %d graph update failed with error - %d\n", k,
updateResult_out);
/*
DPCT1007:58: Migration of cudaGraphInstantiate is not supported.
*/
checkCudaErrors(cudaGraphInstantiate(&graphExec, graph, NULL, NULL, 0));
}
}
/*
DPCT1007:59: Migration of cudaGraphLaunch is not supported.
*/
checkCudaErrors(cudaGraphLaunch(graphExec, stream));
/*
DPCT1003:60: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors((stream->wait(), 0));
if (sum <= conv_threshold) {
/*
DPCT1003:61: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors((stream->memset(d_sum, 0, sizeof(double)), 0));
nblocks[2] = (N_ROWS / nthreads[2]) + 1;
/*
DPCT1083:9: The size of local memory in the migrated code may be different
from the original code. Check that the allocated memory size in the
migrated code is correct.
*/
size_t sharedMemSize = ((nthreads[2] / 32) + 1) * sizeof(double);
if ((k & 1) == 0) {
/*
DPCT1049:8: The work-group size passed to the SYCL kernel may exceed the
limit. To get the device limit, query info::device::max_work_group_size.
Adjust the work-group size if needed.
*/
stream->submit([&](sycl::handler &cgh) {
sycl::local_accessor<uint8_t, 1> dpct_local_acc_ct1(
sycl::range<1>(sharedMemSize), cgh);
cgh.parallel_for(sycl::nd_range<3>(nblocks * nthreads, nthreads),
[=](sycl::nd_item<3> item_ct1)
[[intel::reqd_sub_group_size(32)]] {
finalError(x_new, d_sum, item_ct1,
dpct_local_acc_ct1.get_pointer());
});
});
} else {
/*
DPCT1049:10: The work-group size passed to the SYCL kernel may exceed
the limit. To get the device limit, query
info::device::max_work_group_size. Adjust the work-group size if needed.
*/
stream->submit([&](sycl::handler &cgh) {
sycl::local_accessor<uint8_t, 1> dpct_local_acc_ct1(
sycl::range<1>(sharedMemSize), cgh);
cgh.parallel_for(sycl::nd_range<3>(nblocks * nthreads, nthreads),
[=](sycl::nd_item<3> item_ct1)
[[intel::reqd_sub_group_size(32)]] {
finalError(x, d_sum, item_ct1,
dpct_local_acc_ct1.get_pointer());
});
});
}
/*
DPCT1003:62: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors((stream->memcpy(&sum, d_sum, sizeof(double)), 0));
/*
DPCT1003:63: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors((stream->wait(), 0));
printf("GPU iterations : %d\n", k + 1);
printf("GPU error : %.3e\n", sum);
break;
}
}
/*
DPCT1003:64: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((sycl::free(d_sum, dpct::get_default_queue()), 0));
return sum;
}
double JacobiMethodGpu(const float *A, const double *b,
const float conv_threshold, const int max_iter,
double *x, double *x_new, dpct::queue_ptr stream) {
// CTA size
sycl::range<3> nthreads(1, 1, 256);
// grid size
sycl::range<3> nblocks(1, 1, (N_ROWS / ROWS_PER_CTA) + 2);
double sum = 0.0;
double *d_sum;
/*
DPCT1003:65: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors(
(d_sum = sycl::malloc_device<double>(1, dpct::get_default_queue()), 0));
int k = 0;
for (k = 0; k < max_iter; k++) {
/*
DPCT1003:66: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors((stream->memset(d_sum, 0, sizeof(double)), 0));
if ((k & 1) == 0) {
/*
DPCT1049:11: The work-group size passed to the SYCL kernel may exceed the
limit. To get the device limit, query info::device::max_work_group_size.
Adjust the work-group size if needed.
*/
stream->submit([&](sycl::handler &cgh) {
/*
DPCT1101:95: 'N_ROWS' expression was replaced with a value. Modify the
code to use the original expression, provided in comments, if it is
correct.
*/
sycl::local_accessor<double, 1> x_shared_acc_ct1(
sycl::range<1>(512 /*N_ROWS*/), cgh);
/*
DPCT1101:96: 'ROWS_PER_CTA + 1' expression was replaced with a value.
Modify the code to use the original expression, provided in comments,
if it is correct.
*/
sycl::local_accessor<double, 1> b_shared_acc_ct1(
sycl::range<1>(9 /*ROWS_PER_CTA + 1*/), cgh);
cgh.parallel_for(
sycl::nd_range<3>(nblocks * nthreads, nthreads),
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
JacobiMethod(A, b, conv_threshold, x, x_new, d_sum, item_ct1,
x_shared_acc_ct1.get_pointer(),
b_shared_acc_ct1.get_pointer());
});
});
} else {
/*
DPCT1049:12: The work-group size passed to the SYCL kernel may exceed the
limit. To get the device limit, query info::device::max_work_group_size.
Adjust the work-group size if needed.
*/
stream->submit([&](sycl::handler &cgh) {
/*
DPCT1101:97: 'N_ROWS' expression was replaced with a value. Modify the
code to use the original expression, provided in comments, if it is
correct.
*/
sycl::local_accessor<double, 1> x_shared_acc_ct1(
sycl::range<1>(512 /*N_ROWS*/), cgh);
/*
DPCT1101:98: 'ROWS_PER_CTA + 1' expression was replaced with a value.
Modify the code to use the original expression, provided in comments,
if it is correct.
*/
sycl::local_accessor<double, 1> b_shared_acc_ct1(
sycl::range<1>(9 /*ROWS_PER_CTA + 1*/), cgh);
cgh.parallel_for(
sycl::nd_range<3>(nblocks * nthreads, nthreads),
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
JacobiMethod(A, b, conv_threshold, x_new, x, d_sum, item_ct1,
x_shared_acc_ct1.get_pointer(),
b_shared_acc_ct1.get_pointer());
});
});
}
/*
DPCT1003:67: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors((stream->memcpy(&sum, d_sum, sizeof(double)), 0));
/*
DPCT1003:68: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors((stream->wait(), 0));
if (sum <= conv_threshold) {
/*
DPCT1003:69: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors((stream->memset(d_sum, 0, sizeof(double)), 0));
nblocks[2] = (N_ROWS / nthreads[2]) + 1;
/*
DPCT1083:14: The size of local memory in the migrated code may be
different from the original code. Check that the allocated memory size in
the migrated code is correct.
*/
size_t sharedMemSize = ((nthreads[2] / 32) + 1) * sizeof(double);
if ((k & 1) == 0) {
/*
DPCT1049:13: The work-group size passed to the SYCL kernel may exceed
the limit. To get the device limit, query
info::device::max_work_group_size. Adjust the work-group size if needed.
*/
stream->submit([&](sycl::handler &cgh) {
sycl::local_accessor<uint8_t, 1> dpct_local_acc_ct1(
sycl::range<1>(sharedMemSize), cgh);
cgh.parallel_for(sycl::nd_range<3>(nblocks * nthreads, nthreads),
[=](sycl::nd_item<3> item_ct1)
[[intel::reqd_sub_group_size(32)]] {
finalError(x_new, d_sum, item_ct1,
dpct_local_acc_ct1.get_pointer());
});
});
} else {
/*
DPCT1049:15: The work-group size passed to the SYCL kernel may exceed
the limit. To get the device limit, query
info::device::max_work_group_size. Adjust the work-group size if needed.
*/
stream->submit([&](sycl::handler &cgh) {
sycl::local_accessor<uint8_t, 1> dpct_local_acc_ct1(
sycl::range<1>(sharedMemSize), cgh);
cgh.parallel_for(sycl::nd_range<3>(nblocks * nthreads, nthreads),
[=](sycl::nd_item<3> item_ct1)
[[intel::reqd_sub_group_size(32)]] {
finalError(x, d_sum, item_ct1,
dpct_local_acc_ct1.get_pointer());
});
});
}
/*
DPCT1003:70: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors((stream->memcpy(&sum, d_sum, sizeof(double)), 0));
/*
DPCT1003:71: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors((stream->wait(), 0));
printf("GPU iterations : %d\n", k + 1);
printf("GPU error : %.3e\n", sum);
break;
}
}
/*
DPCT1003:72: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((sycl::free(d_sum, dpct::get_default_queue()), 0));
return sum;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/03_SYCL_Migration_Jacobi_Iterative/dpct_output/Samples/3_CUDA_Features/jacobiCudaGraphs/main.cpp.dp.cpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// This sample demonstrates Instantiated CUDA Graph Update
// with Jacobi Iterative Method in 3 different methods:
// 1 - JacobiMethodGpuCudaGraphExecKernelSetParams() - CUDA Graph with
// cudaGraphExecKernelNodeSetParams() 2 - JacobiMethodGpuCudaGraphExecUpdate() -
// CUDA Graph with cudaGraphExecUpdate() 3 - JacobiMethodGpu() - Non CUDA Graph
// method
// Jacobi method on a linear system A*x = b,
// where A is diagonally dominant and the exact solution consists
// of all ones.
// The dimension N_ROWS is included in jacobi.h
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include <helper_cuda.h>
#include <helper_timer.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "jacobi.h"
// Run the Jacobi method for A*x = b on GPU with CUDA Graph -
// cudaGraphExecKernelNodeSetParams().
extern double JacobiMethodGpuCudaGraphExecKernelSetParams(
const float *A, const double *b, const float conv_threshold,
const int max_iter, double *x, double *x_new, dpct::queue_ptr stream);
// Run the Jacobi method for A*x = b on GPU with Instantiated CUDA Graph Update
// API - cudaGraphExecUpdate().
extern double JacobiMethodGpuCudaGraphExecUpdate(
const float *A, const double *b, const float conv_threshold,
const int max_iter, double *x, double *x_new, dpct::queue_ptr stream);
// Run the Jacobi method for A*x = b on GPU without CUDA Graph.
extern double JacobiMethodGpu(const float *A, const double *b,
const float conv_threshold, const int max_iter,
double *x, double *x_new, dpct::queue_ptr stream);
// creates N_ROWS x N_ROWS matrix A with N_ROWS+1 on the diagonal and 1
// elsewhere. The elements of the right hand side b all equal 2*n, hence the
// exact solution x to A*x = b is a vector of ones.
void createLinearSystem(float *A, double *b);
// Run the Jacobi method for A*x = b on CPU.
void JacobiMethodCPU(float *A, double *b, float conv_threshold, int max_iter,
int *numit, double *x);
int main(int argc, char **argv) {
if (checkCmdLineFlag(argc, (const char **)argv, "help")) {
printf("Command line: jacobiCudaGraphs [-option]\n");
printf("Valid options:\n");
printf(
"-gpumethod=<0,1 or 2> : 0 - [Default] "
"JacobiMethodGpuCudaGraphExecKernelSetParams\n");
printf(" : 1 - JacobiMethodGpuCudaGraphExecUpdate\n");
printf(" : 2 - JacobiMethodGpu - Non CUDA Graph\n");
printf("-device=device_num : cuda device id");
printf("-help : Output a help message\n");
exit(EXIT_SUCCESS);
}
int gpumethod = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "gpumethod")) {
gpumethod = getCmdLineArgumentInt(argc, (const char **)argv, "gpumethod");
if (gpumethod < 0 || gpumethod > 2) {
printf("Error: gpumethod must be 0 or 1 or 2, gpumethod=%d is invalid\n",
gpumethod);
exit(EXIT_SUCCESS);
}
}
int dev = findCudaDevice(argc, (const char **)argv);
double *b = NULL;
float *A = NULL;
/*
DPCT1003:73: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors(
(b = sycl::malloc_host<double>(N_ROWS, dpct::get_default_queue()), 0));
memset(b, 0, N_ROWS * sizeof(double));
/*
DPCT1003:74: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors(
(A = sycl::malloc_host<float>(N_ROWS * N_ROWS, dpct::get_default_queue()),
0));
memset(A, 0, N_ROWS * N_ROWS * sizeof(float));
createLinearSystem(A, b);
double *x = NULL;
// start with array of all zeroes
x = (double *)calloc(N_ROWS, sizeof(double));
float conv_threshold = 1.0e-2;
int max_iter = 4 * N_ROWS * N_ROWS;
int cnt = 0;
// create timer
StopWatchInterface *timerCPU = NULL, *timerGpu = NULL;
sdkCreateTimer(&timerCPU);
sdkStartTimer(&timerCPU);
JacobiMethodCPU(A, b, conv_threshold, max_iter, &cnt, x);
double sum = 0.0;
// Compute error
for (int i = 0; i < N_ROWS; i++) {
double d = x[i] - 1.0;
sum += fabs(d);
}
sdkStopTimer(&timerCPU);
printf("CPU iterations : %d\n", cnt);
printf("CPU error : %.3e\n", sum);
printf("CPU Processing time: %f (ms)\n", sdkGetTimerValue(&timerCPU));
float *d_A;
double *d_b, *d_x, *d_x_new;
dpct::queue_ptr stream1;
/*
DPCT1003:75: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
/*
DPCT1025:76: The SYCL queue is created ignoring the flag and priority options.
*/
checkCudaErrors((stream1 = dpct::get_current_device().create_queue(), 0));
/*
DPCT1003:77: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((
d_b = sycl::malloc_device<double>(N_ROWS, dpct::get_default_queue()), 0));
/*
DPCT1003:78: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors(
(d_A = (float *)sycl::malloc_device(sizeof(float) * N_ROWS * N_ROWS,
dpct::get_default_queue()),
0));
/*
DPCT1003:79: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((
d_x = sycl::malloc_device<double>(N_ROWS, dpct::get_default_queue()), 0));
/*
DPCT1003:80: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors(
(d_x_new = sycl::malloc_device<double>(N_ROWS, dpct::get_default_queue()),
0));
/*
DPCT1003:81: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((stream1->memset(d_x, 0, sizeof(double) * N_ROWS), 0));
/*
DPCT1003:82: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((stream1->memset(d_x_new, 0, sizeof(double) * N_ROWS), 0));
/*
DPCT1003:83: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors(
(stream1->memcpy(d_A, A, sizeof(float) * N_ROWS * N_ROWS), 0));
/*
DPCT1003:84: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((stream1->memcpy(d_b, b, sizeof(double) * N_ROWS), 0));
sdkCreateTimer(&timerGpu);
sdkStartTimer(&timerGpu);
double sumGPU = 0.0;
if (gpumethod == 0) {
sumGPU = JacobiMethodGpuCudaGraphExecKernelSetParams(
d_A, d_b, conv_threshold, max_iter, d_x, d_x_new, stream1);
} else if (gpumethod == 1) {
sumGPU = JacobiMethodGpuCudaGraphExecUpdate(
d_A, d_b, conv_threshold, max_iter, d_x, d_x_new, stream1);
} else if (gpumethod == 2) {
sumGPU = JacobiMethodGpu(d_A, d_b, conv_threshold, max_iter, d_x, d_x_new,
stream1);
}
sdkStopTimer(&timerGpu);
printf("GPU Processing time: %f (ms)\n", sdkGetTimerValue(&timerGpu));
/*
DPCT1003:85: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((sycl::free(d_b, dpct::get_default_queue()), 0));
/*
DPCT1003:86: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((sycl::free(d_A, dpct::get_default_queue()), 0));
/*
DPCT1003:87: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((sycl::free(d_x, dpct::get_default_queue()), 0));
/*
DPCT1003:88: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((sycl::free(d_x_new, dpct::get_default_queue()), 0));
/*
DPCT1003:89: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((sycl::free(A, dpct::get_default_queue()), 0));
/*
DPCT1003:90: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((sycl::free(b, dpct::get_default_queue()), 0));
printf("&&&& jacobiCudaGraphs %s\n",
(fabs(sum - sumGPU) < conv_threshold) ? "PASSED" : "FAILED");
return (fabs(sum - sumGPU) < conv_threshold) ? EXIT_SUCCESS : EXIT_FAILURE;
}
void createLinearSystem(float *A, double *b) {
int i, j;
for (i = 0; i < N_ROWS; i++) {
b[i] = 2.0 * N_ROWS;
for (j = 0; j < N_ROWS; j++) A[i * N_ROWS + j] = 1.0;
A[i * N_ROWS + i] = N_ROWS + 1.0;
}
}
void JacobiMethodCPU(float *A, double *b, float conv_threshold, int max_iter,
int *num_iter, double *x) {
double *x_new;
x_new = (double *)calloc(N_ROWS, sizeof(double));
int k;
for (k = 0; k < max_iter; k++) {
double sum = 0.0;
for (int i = 0; i < N_ROWS; i++) {
double temp_dx = b[i];
for (int j = 0; j < N_ROWS; j++) temp_dx -= A[i * N_ROWS + j] * x[j];
temp_dx /= A[i * N_ROWS + i];
x_new[i] += temp_dx;
sum += fabs(temp_dx);
}
for (int i = 0; i < N_ROWS; i++) x[i] = x_new[i];
if (sum <= conv_threshold) break;
}
*num_iter = k + 1;
free(x_new);
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/03_SYCL_Migration_Jacobi_Iterative/dpct_output/include/dpct/util.hpp | //==---- util.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_UTIL_HPP__
#define __DPCT_UTIL_HPP__
#include <sycl/sycl.hpp>
#include <complex>
#include <type_traits>
#include <cassert>
#include <cstdint>
namespace dpct {
namespace detail {
template <typename tag, typename T> class generic_error_type {
public:
generic_error_type() = default;
generic_error_type(T value) : value{value} {}
operator T() const { return value; }
private:
T value;
};
} // namespace detail
using err0 = detail::generic_error_type<struct err0_tag, int>;
using err1 = detail::generic_error_type<struct err1_tag, int>;
/// shift_sub_group_left move values held by the work-items in a sub_group
/// directly to another work-item in the sub_group, by shifting values a fixed
/// number of work-items to the left. The input sub_group will be divided into
/// several logical sub_groups with id range [0, \p logical_sub_group_size - 1].
/// Each work-item in logical sub_group gets value from another work-item whose
/// id is caller's id adds \p delta. If calculated id is outside the logical
/// sub_group id range, the work-item will get value from itself. The \p
/// logical_sub_group_size must be a power of 2 and not exceed input sub_group
/// size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_left(sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int end_index =
(id / logical_sub_group_size + 1) * logical_sub_group_size;
T result = sycl::shift_group_left(g, x, delta);
if ((id + delta) >= end_index) {
result = x;
}
return result;
}
namespace experimental {
/// The logical-group is a logical collection of some work-items within a
/// work-group.
/// Note: Please make sure that the logical-group size is a power of 2 in the
/// range [1, current_sub_group_size].
class logical_group {
sycl::nd_item<3> _item;
sycl::group<3> _g;
uint32_t _logical_group_size;
uint32_t _group_linear_range_in_parent;
public:
/// Dividing \p parent_group into several logical-groups.
/// \param [in] item Current work-item.
/// \param [in] parent_group The group to be divided.
/// \param [in] size The logical-group size.
logical_group(sycl::nd_item<3> item, sycl::group<3> parent_group,
uint32_t size)
: _item(item), _g(parent_group), _logical_group_size(size) {
_group_linear_range_in_parent =
(_g.get_local_linear_range() - 1) / _logical_group_size + 1;
}
/// Returns the index of the work-item within the logical-group.
uint32_t get_local_linear_id() const {
return _item.get_local_linear_id() % _logical_group_size;
}
/// Returns the number of work-items in the logical-group.
uint32_t get_local_linear_range() const {
if (_g.get_local_linear_range() % _logical_group_size == 0) {
return _logical_group_size;
}
uint32_t last_item_group_id =
_g.get_local_linear_range() / _logical_group_size;
uint32_t first_of_last_group = last_item_group_id * _logical_group_size;
if (_item.get_local_linear_id() >= first_of_last_group) {
return _g.get_local_linear_range() - first_of_last_group;
} else {
return _logical_group_size;
}
}
};
} // namespace experimental
} // namespace dpct
#endif // __DPCT_UTIL_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/03_SYCL_Migration_Jacobi_Iterative/dpct_output/include/dpct/image.hpp | //==---- image.hpp --------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_IMAGE_HPP__
#define __DPCT_IMAGE_HPP__
#include <sycl/sycl.hpp>
#include "memory.hpp"
namespace dpct {
enum class image_channel_data_type {
signed_int,
unsigned_int,
fp,
};
/// Image channel info, include channel number, order, data width and type
class image_channel {
image_channel_data_type _type = image_channel_data_type::signed_int;
/// Number of channels.
unsigned _channel_num = 0;
/// Total size of all channels in bytes.
unsigned _total_size = 0;
/// Size of each channel in bytes.
unsigned _channel_size = 0;
public:
image_channel() = default;
unsigned get_total_size() { return _total_size; }
void set_channel_num(unsigned channel_num) {
_channel_num = channel_num;
_total_size = _channel_size * _channel_num;
}
/// image_channel constructor.
/// \param r Channel r width in bits.
/// \param g Channel g width in bits. Should be same with \p r, or zero.
/// \param b Channel b width in bits. Should be same with \p g, or zero.
/// \param a Channel a width in bits. Should be same with \p b, or zero.
/// \param data_type Image channel data type: signed_nt, unsigned_int or fp.
image_channel(int r, int g, int b, int a, image_channel_data_type data_type) {
_type = data_type;
if (a) {
assert(r == a && "SYCL doesn't support different channel size");
assert(r == b && "SYCL doesn't support different channel size");
assert(r == g && "SYCL doesn't support different channel size");
set_channel_size(4, a);
} else if (b) {
assert(r == b && "SYCL doesn't support different channel size");
assert(r == g && "SYCL doesn't support different channel size");
set_channel_size(3, b);
} else if (g) {
assert(r == g && "SYCL doesn't support different channel size");
set_channel_size(2, g);
} else {
set_channel_size(1, r);
}
}
void set_channel_type(sycl::image_channel_type type) {
switch (type) {
case sycl::image_channel_type::unsigned_int8:
_type = image_channel_data_type::unsigned_int;
_channel_size = 1;
break;
case sycl::image_channel_type::unsigned_int16:
_type = image_channel_data_type::unsigned_int;
_channel_size = 2;
break;
case sycl::image_channel_type::unsigned_int32:
_type = image_channel_data_type::unsigned_int;
_channel_size = 4;
break;
case sycl::image_channel_type::signed_int8:
_type = image_channel_data_type::signed_int;
_channel_size = 1;
break;
case sycl::image_channel_type::signed_int16:
_type = image_channel_data_type::signed_int;
_channel_size = 2;
break;
case sycl::image_channel_type::signed_int32:
_type = image_channel_data_type::signed_int;
_channel_size = 4;
break;
case sycl::image_channel_type::fp16:
_type = image_channel_data_type::fp;
_channel_size = 2;
break;
case sycl::image_channel_type::fp32:
_type = image_channel_data_type::fp;
_channel_size = 4;
break;
default:
break;
}
_total_size = _channel_size * _channel_num;
}
/// Set channel size.
/// \param in_channel_num Channels number to set.
/// \param channel_size Size for each channel in bits.
void set_channel_size(unsigned in_channel_num,
unsigned channel_size) {
if (in_channel_num < _channel_num)
return;
_channel_num = in_channel_num;
_channel_size = channel_size / 8;
_total_size = _channel_size * _channel_num;
}
};
/// 2D or 3D matrix data for image.
class image_matrix {
image_channel _channel;
int _range[3] = {1, 1, 1};
int _dims = 0;
void *_host_data = nullptr;
/// Set range of each dimension.
template <int dimensions> void set_range(sycl::range<dimensions> range) {
for (int i = 0; i < dimensions; ++i)
_range[i] = range[i];
_dims = dimensions;
}
public:
/// Constructor with channel info and dimension size info.
template <int dimensions>
image_matrix(image_channel channel, sycl::range<dimensions> range)
: _channel(channel) {
set_range(range);
_host_data = std::malloc(range.size() * _channel.get_total_size());
}
image_matrix(sycl::image_channel_type channel_type, unsigned channel_num,
size_t x, size_t y) {
_channel.set_channel_type(channel_type);
_channel.set_channel_num(channel_num);
_dims = 1;
_range[0] = x;
if (y) {
_dims = 2;
_range[1] = y;
}
_host_data = std::malloc(_range[0] * _range[1] * _channel.get_total_size());
}
/// Convert to pitched data.
pitched_data to_pitched_data() {
return pitched_data(_host_data, _range[0], _range[0], _range[1]);
}
~image_matrix() {
if (_host_data)
std::free(_host_data);
_host_data = nullptr;
}
};
} // namespace dpct
#endif // __DPCT_IMAGE_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/03_SYCL_Migration_Jacobi_Iterative/dpct_output/include/dpct/dpct.hpp | //==---- dpct.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_HPP__
#define __DPCT_HPP__
#include <sycl/sycl.hpp>
#include <iostream>
#include <limits.h>
#include <math.h>
#include "atomic.hpp"
#include "device.hpp"
#include "image.hpp"
#include "memory.hpp"
#include "util.hpp"
#define DPCT_COMPATIBILITY_TEMP (600)
#endif // __DPCT_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/03_SYCL_Migration_Jacobi_Iterative/dpct_output/include/dpct/device.hpp | //==---- device.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_DEVICE_HPP__
#define __DPCT_DEVICE_HPP__
#include <sycl/sycl.hpp>
#include <algorithm>
#include <cstring>
#include <iostream>
#include <mutex>
#include <set>
#include <sstream>
#include <map>
#include <vector>
#include <thread>
#if defined(__linux__)
#include <unistd.h>
#include <sys/syscall.h>
#endif
#if defined(_WIN64)
#define NOMINMAX
#include <windows.h>
#endif
namespace dpct {
/// SYCL default exception handler
auto exception_handler = [](sycl::exception_list exceptions) {
for (std::exception_ptr const &e : exceptions) {
try {
std::rethrow_exception(e);
} catch (sycl::exception const &e) {
std::cerr << "Caught asynchronous SYCL exception:" << std::endl
<< e.what() << std::endl
<< "Exception caught at file:" << __FILE__
<< ", line:" << __LINE__ << std::endl;
}
}
};
typedef sycl::queue *queue_ptr;
class device_info {
public:
int get_integrated() const { return _integrated; }
int get_max_clock_frequency() const { return _frequency; }
int get_max_compute_units() const { return _max_compute_units; }
void set_name(const char* name) {
size_t length = strlen(name);
if (length < 256) {
std::memcpy(_name, name, length + 1);
} else {
std::memcpy(_name, name, 255);
_name[255] = '\0';
}
}
void set_max_work_item_sizes(const sycl::id<3> max_work_item_sizes) {
_max_work_item_sizes = max_work_item_sizes;
for (int i = 0; i < 3; ++i)
_max_work_item_sizes_i[i] = max_work_item_sizes[i];
}
void set_host_unified_memory(bool host_unified_memory) {
_host_unified_memory = host_unified_memory;
}
void set_major_version(int major) { _major = major; }
void set_minor_version(int minor) { _minor = minor; }
void set_max_clock_frequency(int frequency) { _frequency = frequency; }
void set_max_compute_units(int max_compute_units) {
_max_compute_units = max_compute_units;
}
void set_global_mem_size(size_t global_mem_size) {
_global_mem_size = global_mem_size;
}
void set_local_mem_size(size_t local_mem_size) {
_local_mem_size = local_mem_size;
}
void set_max_work_group_size(int max_work_group_size) {
_max_work_group_size = max_work_group_size;
}
void set_max_sub_group_size(int max_sub_group_size) {
_max_sub_group_size = max_sub_group_size;
}
void
set_max_work_items_per_compute_unit(int max_work_items_per_compute_unit) {
_max_work_items_per_compute_unit = max_work_items_per_compute_unit;
}
void set_max_nd_range_size(int max_nd_range_size[]) {
for (int i = 0; i < 3; i++) {
_max_nd_range_size[i] = max_nd_range_size[i];
_max_nd_range_size_i[i] = max_nd_range_size[i];
}
}
void set_memory_clock_rate(unsigned int memory_clock_rate) {
_memory_clock_rate = memory_clock_rate;
}
void set_memory_bus_width(unsigned int memory_bus_width) {
_memory_bus_width = memory_bus_width;
}
void
set_max_register_size_per_work_group(int max_register_size_per_work_group) {
_max_register_size_per_work_group = max_register_size_per_work_group;
}
private:
char _name[256];
sycl::id<3> _max_work_item_sizes;
int _max_work_item_sizes_i[3];
bool _host_unified_memory = false;
int _major;
int _minor;
int _integrated = 0;
int _frequency;
// Set estimated value 3200000 kHz as default value.
unsigned int _memory_clock_rate = 3200000;
// Set estimated value 64 bits as default value.
unsigned int _memory_bus_width = 64;
int _max_compute_units;
int _max_work_group_size;
int _max_sub_group_size;
int _max_work_items_per_compute_unit;
int _max_register_size_per_work_group;
size_t _global_mem_size;
size_t _local_mem_size;
size_t _max_nd_range_size[3];
int _max_nd_range_size_i[3];
};
/// dpct device extension
class device_ext : public sycl::device {
public:
device_ext() : sycl::device(), _ctx(*this) {}
~device_ext() {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
for (auto &task : _tasks) {
if (task.joinable())
task.join();
}
_tasks.clear();
_queues.clear();
}
device_ext(const sycl::device &base)
: sycl::device(base), _ctx(*this) {
_saved_queue = _default_queue = create_queue(true);
}
int get_major_version() const {
int major, minor;
get_version(major, minor);
return major;
}
int get_minor_version() const {
int major, minor;
get_version(major, minor);
return minor;
}
int get_max_compute_units() const {
return get_device_info().get_max_compute_units();
}
int get_max_clock_frequency() const {
return get_device_info().get_max_clock_frequency();
}
int get_integrated() const { return get_device_info().get_integrated(); }
void get_device_info(device_info &out) const {
device_info prop;
prop.set_name(get_info<sycl::info::device::name>().c_str());
int major, minor;
get_version(major, minor);
prop.set_major_version(major);
prop.set_minor_version(minor);
prop.set_max_work_item_sizes(
#if (__SYCL_COMPILER_VERSION && __SYCL_COMPILER_VERSION<20220902)
// oneAPI DPC++ compiler older than 2022/09/02, where max_work_item_sizes is an enum class element
get_info<sycl::info::device::max_work_item_sizes>());
#else
// SYCL 2020-conformant code, max_work_item_sizes is a struct templated by an int
get_info<sycl::info::device::max_work_item_sizes<3>>());
#endif
prop.set_host_unified_memory(
this->has(sycl::aspect::usm_host_allocations));
prop.set_max_clock_frequency(
get_info<sycl::info::device::max_clock_frequency>());
prop.set_max_compute_units(
get_info<sycl::info::device::max_compute_units>());
prop.set_max_work_group_size(
get_info<sycl::info::device::max_work_group_size>());
prop.set_global_mem_size(
get_info<sycl::info::device::global_mem_size>());
prop.set_local_mem_size(get_info<sycl::info::device::local_mem_size>());
#if (defined(SYCL_EXT_INTEL_DEVICE_INFO) && SYCL_EXT_INTEL_DEVICE_INFO >= 6)
if (this->has(sycl::aspect::ext_intel_memory_clock_rate)) {
unsigned int tmp =
this->get_info<sycl::ext::intel::info::device::memory_clock_rate>();
if (tmp != 0)
prop.set_memory_clock_rate(1000 * tmp);
}
if (this->has(sycl::aspect::ext_intel_memory_bus_width)) {
prop.set_memory_bus_width(
this->get_info<sycl::ext::intel::info::device::memory_bus_width>());
}
#elif defined(_MSC_VER) && !defined(__clang__)
#pragma message("get_device_info: querying memory_clock_rate and \
memory_bus_width are not supported by the compiler used. \
Use 3200000 kHz as memory_clock_rate default value. \
Use 64 bits as memory_bus_width default value.")
#else
#warning "get_device_info: querying memory_clock_rate and \
memory_bus_width are not supported by the compiler used. \
Use 3200000 kHz as memory_clock_rate default value. \
Use 64 bits as memory_bus_width default value."
#endif
size_t max_sub_group_size = 1;
std::vector<size_t> sub_group_sizes =
get_info<sycl::info::device::sub_group_sizes>();
for (const auto &sub_group_size : sub_group_sizes) {
if (max_sub_group_size < sub_group_size)
max_sub_group_size = sub_group_size;
}
prop.set_max_sub_group_size(max_sub_group_size);
prop.set_max_work_items_per_compute_unit(
get_info<sycl::info::device::max_work_group_size>());
int max_nd_range_size[] = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
prop.set_max_nd_range_size(max_nd_range_size);
// Estimates max register size per work group, feel free to update the value
// according to device properties.
prop.set_max_register_size_per_work_group(65536);
out = prop;
}
device_info get_device_info() const {
device_info prop;
get_device_info(prop);
return prop;
}
sycl::queue &default_queue() { return *_default_queue; }
sycl::queue *create_queue(bool enable_exception_handler = false) {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
sycl::async_handler eh = {};
if (enable_exception_handler) {
eh = exception_handler;
}
auto property = get_default_property_list_for_queue();
_queues.push_back(std::make_shared<sycl::queue>(
_ctx, *this, eh, property));
return _queues.back().get();
}
private:
sycl::property_list get_default_property_list_for_queue() const {
#ifdef DPCT_PROFILING_ENABLED
auto property =
sycl::property_list{sycl::property::queue::enable_profiling(),
sycl::property::queue::in_order()};
#else
auto property =
sycl::property_list{sycl::property::queue::in_order()};
#endif
return property;
}
void get_version(int &major, int &minor) const {
// Version string has the following format:
// a. OpenCL<space><major.minor><space><vendor-specific-information>
// b. <major.minor>
std::string ver;
ver = get_info<sycl::info::device::version>();
std::string::size_type i = 0;
while (i < ver.size()) {
if (isdigit(ver[i]))
break;
i++;
}
major = std::stoi(&(ver[i]));
while (i < ver.size()) {
if (ver[i] == '.')
break;
i++;
}
i++;
minor = std::stoi(&(ver[i]));
}
sycl::queue *_default_queue;
sycl::queue *_saved_queue;
sycl::context _ctx;
std::vector<std::shared_ptr<sycl::queue>> _queues;
mutable std::recursive_mutex m_mutex;
std::vector<std::thread> _tasks;
};
static inline unsigned int get_tid() {
#if defined(__linux__)
return syscall(SYS_gettid);
#elif defined(_WIN64)
return GetCurrentThreadId();
#else
#error "Only support Windows and Linux."
#endif
}
/// device manager
class dev_mgr {
public:
device_ext ¤t_device() {
unsigned int dev_id=current_device_id();
check_id(dev_id);
return *_devs[dev_id];
}
device_ext &get_device(unsigned int id) const {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
check_id(id);
return *_devs[id];
}
unsigned int current_device_id() const {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
auto it=_thread2dev_map.find(get_tid());
if(it != _thread2dev_map.end())
return it->second;
return DEFAULT_DEVICE_ID;
}
void select_device(unsigned int id) {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
check_id(id);
_thread2dev_map[get_tid()]=id;
}
unsigned int device_count() { return _devs.size(); }
/// Returns the instance of device manager singleton.
static dev_mgr &instance() {
static dev_mgr d_m;
return d_m;
}
dev_mgr(const dev_mgr &) = delete;
dev_mgr &operator=(const dev_mgr &) = delete;
dev_mgr(dev_mgr &&) = delete;
dev_mgr &operator=(dev_mgr &&) = delete;
private:
mutable std::recursive_mutex m_mutex;
dev_mgr() {
sycl::device default_device =
sycl::device(sycl::default_selector_v);
_devs.push_back(std::make_shared<device_ext>(default_device));
std::vector<sycl::device> sycl_all_devs =
sycl::device::get_devices(sycl::info::device_type::all);
// Collect other devices except for the default device.
if (default_device.is_cpu())
_cpu_device = 0;
for (auto &dev : sycl_all_devs) {
if (dev == default_device) {
continue;
}
_devs.push_back(std::make_shared<device_ext>(dev));
if (_cpu_device == -1 && dev.is_cpu()) {
_cpu_device = _devs.size() - 1;
}
}
}
void check_id(unsigned int id) const {
if (id >= _devs.size()) {
throw std::runtime_error("invalid device id");
}
}
std::vector<std::shared_ptr<device_ext>> _devs;
/// DEFAULT_DEVICE_ID is used, if current_device_id() can not find current
/// thread id in _thread2dev_map, which means default device should be used
/// for the current thread.
const unsigned int DEFAULT_DEVICE_ID = 0;
/// thread-id to device-id map.
std::map<unsigned int, unsigned int> _thread2dev_map;
int _cpu_device = -1;
};
/// Util function to get the default queue of current device in
/// dpct device manager.
static inline sycl::queue &get_default_queue() {
return dev_mgr::instance().current_device().default_queue();
}
/// Util function to get the current device.
static inline device_ext &get_current_device() {
return dev_mgr::instance().current_device();
}
static inline unsigned int select_device(unsigned int id) {
dev_mgr::instance().select_device(id);
return id;
}
} // namespace dpct
#endif // __DPCT_DEVICE_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/03_SYCL_Migration_Jacobi_Iterative/dpct_output/include/dpct/memory.hpp | //==---- memory.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MEMORY_HPP__
#define __DPCT_MEMORY_HPP__
#include <sycl/sycl.hpp>
#include <cassert>
#include <cstdint>
#include <cstring>
#include <mutex>
#include <unordered_map>
#include <map>
#include <utility>
#include <thread>
#include <type_traits>
#if defined(__linux__)
#include <sys/mman.h>
#elif defined(_WIN64)
#define NOMINMAX
#include <windows.h>
#else
#error "Only support Windows and Linux."
#endif
#include "device.hpp"
namespace dpct {
enum memcpy_direction {
host_to_host,
host_to_device,
device_to_host,
device_to_device,
automatic
};
/// Pitched 2D/3D memory data.
class pitched_data {
public:
pitched_data() : pitched_data(nullptr, 0, 0, 0) {}
pitched_data(void *data, size_t pitch, size_t x, size_t y)
: _data(data), _pitch(pitch), _x(x), _y(y) {}
void *get_data_ptr() { return _data; }
size_t get_pitch() { return _pitch; }
size_t get_y() { return _y; }
private:
void *_data;
size_t _pitch, _x, _y;
};
namespace detail {
/// Set \p value to the first \p size bytes starting from \p dev_ptr in \p q.
///
/// \param q The queue in which the operation is done.
/// \param dev_ptr Pointer to the device memory address.
/// \param value Value to be set.
/// \param size Number of bytes to be set to the value.
/// \returns An event representing the memset operation.
static inline sycl::event dpct_memset(sycl::queue &q, void *dev_ptr,
int value, size_t size) {
return q.memset(dev_ptr, value, size);
}
/// Set \p value to the 3D memory region pointed by \p data in \p q. \p size
/// specifies the 3D memory size to set.
///
/// \param q The queue in which the operation is done.
/// \param data Pointer to the device memory region.
/// \param value Value to be set.
/// \param size Memory region size.
/// \returns An event list representing the memset operations.
static inline std::vector<sycl::event>
dpct_memset(sycl::queue &q, pitched_data data, int value,
sycl::range<3> size) {
std::vector<sycl::event> event_list;
size_t slice = data.get_pitch() * data.get_y();
unsigned char *data_surface = (unsigned char *)data.get_data_ptr();
for (size_t z = 0; z < size.get(2); ++z) {
unsigned char *data_ptr = data_surface;
for (size_t y = 0; y < size.get(1); ++y) {
event_list.push_back(dpct_memset(q, data_ptr, value, size.get(0)));
data_ptr += data.get_pitch();
}
data_surface += slice;
}
return event_list;
}
/// memset 2D matrix with pitch.
static inline std::vector<sycl::event>
dpct_memset(sycl::queue &q, void *ptr, size_t pitch, int val, size_t x,
size_t y) {
return dpct_memset(q, pitched_data(ptr, pitch, x, 1), val,
sycl::range<3>(x, y, 1));
}
enum class pointer_access_attribute {
host_only = 0,
device_only,
host_device,
end
};
static pointer_access_attribute get_pointer_attribute(sycl::queue &q,
const void *ptr) {
switch (sycl::get_pointer_type(ptr, q.get_context())) {
case sycl::usm::alloc::unknown:
return pointer_access_attribute::host_only;
case sycl::usm::alloc::device:
return pointer_access_attribute::device_only;
case sycl::usm::alloc::shared:
case sycl::usm::alloc::host:
return pointer_access_attribute::host_device;
}
}
static memcpy_direction deduce_memcpy_direction(sycl::queue &q, void *to_ptr,
const void *from_ptr,
memcpy_direction dir) {
switch (dir) {
case memcpy_direction::host_to_host:
case memcpy_direction::host_to_device:
case memcpy_direction::device_to_host:
case memcpy_direction::device_to_device:
return dir;
case memcpy_direction::automatic: {
// table[to_attribute][from_attribute]
static const memcpy_direction
direction_table[static_cast<unsigned>(pointer_access_attribute::end)]
[static_cast<unsigned>(pointer_access_attribute::end)] =
{{memcpy_direction::host_to_host,
memcpy_direction::device_to_host,
memcpy_direction::host_to_host},
{memcpy_direction::host_to_device,
memcpy_direction::device_to_device,
memcpy_direction::device_to_device},
{memcpy_direction::host_to_host,
memcpy_direction::device_to_device,
memcpy_direction::device_to_device}};
return direction_table[static_cast<unsigned>(get_pointer_attribute(
q, to_ptr))][static_cast<unsigned>(get_pointer_attribute(q, from_ptr))];
}
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
}
static sycl::event
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction,
const std::vector<sycl::event> &dep_events = {}) {
if (!size)
return sycl::event{};
return q.memcpy(to_ptr, from_ptr, size, dep_events);
}
// Get actual copy range and make sure it will not exceed range.
static inline size_t get_copy_range(sycl::range<3> size, size_t slice,
size_t pitch) {
return slice * (size.get(2) - 1) + pitch * (size.get(1) - 1) + size.get(0);
}
static inline size_t get_offset(sycl::id<3> id, size_t slice,
size_t pitch) {
return slice * id.get(2) + pitch * id.get(1) + id.get(0);
}
/// copy 3D matrix specified by \p size from 3D matrix specified by \p from_ptr
/// and \p from_range to another specified by \p to_ptr and \p to_range.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
sycl::range<3> to_range, sycl::range<3> from_range,
sycl::id<3> to_id, sycl::id<3> from_id,
sycl::range<3> size, memcpy_direction direction,
const std::vector<sycl::event> &dep_events = {}) {
// RAII for host pointer
class host_buffer {
void *_buf;
size_t _size;
sycl::queue &_q;
const std::vector<sycl::event> &_deps; // free operation depends
public:
host_buffer(size_t size, sycl::queue &q,
const std::vector<sycl::event> &deps)
: _buf(std::malloc(size)), _size(size), _q(q), _deps(deps) {}
void *get_ptr() const { return _buf; }
size_t get_size() const { return _size; }
~host_buffer() {
if (_buf) {
_q.submit([&](sycl::handler &cgh) {
cgh.depends_on(_deps);
cgh.host_task([buf = _buf] { std::free(buf); });
});
}
}
};
std::vector<sycl::event> event_list;
size_t to_slice = to_range.get(1) * to_range.get(0),
from_slice = from_range.get(1) * from_range.get(0);
unsigned char *to_surface =
(unsigned char *)to_ptr + get_offset(to_id, to_slice, to_range.get(0));
const unsigned char *from_surface =
(const unsigned char *)from_ptr +
get_offset(from_id, from_slice, from_range.get(0));
if (to_slice == from_slice && to_slice == size.get(1) * size.get(0)) {
return {dpct_memcpy(q, to_surface, from_surface, to_slice * size.get(2),
direction, dep_events)};
}
direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction);
size_t size_slice = size.get(1) * size.get(0);
switch (direction) {
case host_to_host:
for (size_t z = 0; z < size.get(2); ++z) {
unsigned char *to_ptr = to_surface;
const unsigned char *from_ptr = from_surface;
if (to_range.get(0) == from_range.get(0) &&
to_range.get(0) == size.get(0)) {
event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size_slice,
direction, dep_events));
} else {
for (size_t y = 0; y < size.get(1); ++y) {
event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size.get(0),
direction, dep_events));
to_ptr += to_range.get(0);
from_ptr += from_range.get(0);
}
}
to_surface += to_slice;
from_surface += from_slice;
}
break;
case host_to_device: {
host_buffer buf(get_copy_range(size, to_slice, to_range.get(0)), q,
event_list);
std::vector<sycl::event> host_events;
if (to_slice == size_slice) {
// Copy host data to a temp host buffer with the shape of target.
host_events =
dpct_memcpy(q, buf.get_ptr(), from_surface, to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size,
host_to_host, dep_events);
} else {
// Copy host data to a temp host buffer with the shape of target.
host_events = dpct_memcpy(
q, buf.get_ptr(), from_surface, to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host,
// If has padding data, not sure whether it is useless. So fill temp
// buffer with it.
std::vector<sycl::event>{
dpct_memcpy(q, buf.get_ptr(), to_surface, buf.get_size(),
device_to_host, dep_events)});
}
// Copy from temp host buffer to device with only one submit.
event_list.push_back(dpct_memcpy(q, to_surface, buf.get_ptr(),
buf.get_size(), host_to_device,
host_events));
break;
}
case device_to_host: {
host_buffer buf(get_copy_range(size, from_slice, from_range.get(0)), q,
event_list);
// Copy from host temp buffer to host target with reshaping.
event_list = dpct_memcpy(
q, to_surface, buf.get_ptr(), to_range, from_range, sycl::id<3>(0, 0, 0),
sycl::id<3>(0, 0, 0), size, host_to_host,
// Copy from device to temp host buffer with only one submit.
std::vector<sycl::event>{dpct_memcpy(q, buf.get_ptr(), from_surface,
buf.get_size(),
device_to_host, dep_events)});
break;
}
case device_to_device:
event_list.push_back(q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
cgh.parallel_for<class dpct_memcpy_3d_detail>(
size,
[=](sycl::id<3> id) {
to_surface[get_offset(id, to_slice, to_range.get(0))] =
from_surface[get_offset(id, from_slice, from_range.get(0))];
});
}));
break;
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
return event_list;
}
/// memcpy 2D/3D matrix specified by pitched_data.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, pitched_data to, sycl::id<3> to_id,
pitched_data from, sycl::id<3> from_id, sycl::range<3> size,
memcpy_direction direction = automatic) {
return dpct_memcpy(q, to.get_data_ptr(), from.get_data_ptr(),
sycl::range<3>(to.get_pitch(), to.get_y(), 1),
sycl::range<3>(from.get_pitch(), from.get_y(), 1), to_id, from_id,
size, direction);
}
/// memcpy 2D matrix with pitch.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
size_t to_pitch, size_t from_pitch, size_t x, size_t y,
memcpy_direction direction = automatic) {
return dpct_memcpy(q, to_ptr, from_ptr, sycl::range<3>(to_pitch, y, 1),
sycl::range<3>(from_pitch, y, 1),
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0),
sycl::range<3>(x, y, 1), direction);
}
} // namespace detail
/// Asynchronously copies \p size bytes from the address specified by \p
/// from_ptr to the address specified by \p to_ptr. The value of \p direction is
/// used to set the copy direction, it can be \a host_to_host, \a
/// host_to_device, \a device_to_host, \a device_to_device or \a automatic. The
/// return of the function does NOT guarantee the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param from_ptr Pointer to source memory address.
/// \param size Number of bytes to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static void async_dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction);
}
/// Asynchronously copies 2D matrix specified by \p x and \p y from the address
/// specified by \p from_ptr to the address specified by \p to_ptr, while \p
/// \p from_pitch and \p to_pitch are the range of dim x in bytes of the matrix
/// specified by \p from_ptr and \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device, \a
/// device_to_host, \a device_to_device or \a automatic. The return of the
/// function does NOT guarantee the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param to_pitch Range of dim x in bytes of destination matrix.
/// \param from_ptr Pointer to source memory address.
/// \param from_pitch Range of dim x in bytes of source matrix.
/// \param x Range of dim x of matrix to be copied.
/// \param y Range of dim y of matrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void
async_dpct_memcpy(void *to_ptr, size_t to_pitch, const void *from_ptr,
size_t from_pitch, size_t x, size_t y,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch, from_pitch, x, y,
direction);
}
/// Asynchronously copies a subset of a 3D matrix specified by \p to to another
/// 3D matrix specified by \p from. The from and to position info are specified
/// by \p from_pos and \p to_pos The copied matrix size is specified by \p size.
/// The value of \p direction is used to set the copy direction, it can be \a
/// host_to_host, \a host_to_device, \a device_to_host, \a device_to_device or
/// \a automatic. The return of the function does NOT guarantee the copy is
/// completed.
///
/// \param to Destination matrix info.
/// \param to_pos Position of destination.
/// \param from Source matrix info.
/// \param from_pos Position of destination.
/// \param size Range of the submatrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void
async_dpct_memcpy(pitched_data to, sycl::id<3> to_pos, pitched_data from,
sycl::id<3> from_pos, sycl::range<3> size,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to, to_pos, from, from_pos, size, direction);
}
/// Asynchronously sets \p value to the first \p size bytes starting from \p
/// dev_ptr. The return of the function does NOT guarantee the memset operation
/// is completed.
///
/// \param dev_ptr Pointer to the device memory address.
/// \param value Value to be set.
/// \param size Number of bytes to be set to the value.
/// \returns no return value.
static void async_dpct_memset(void *dev_ptr, int value, size_t size,
sycl::queue &q = dpct::get_default_queue()) {
detail::dpct_memset(q, dev_ptr, value, size);
}
/// Sets \p value to the 2D memory region pointed by \p ptr in \p q. \p x and
/// \p y specify the setted 2D memory size. \p pitch is the bytes in linear
/// dimension, including padding bytes. The return of the function does NOT
/// guarantee the memset operation is completed.
///
/// \param ptr Pointer to the device memory region.
/// \param pitch Bytes in linear dimension, including padding bytes.
/// \param value Value to be set.
/// \param x The setted memory size in linear dimension.
/// \param y The setted memory size in second dimension.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void async_dpct_memset(void *ptr, size_t pitch, int val, size_t x,
size_t y,
sycl::queue &q = get_default_queue()) {
detail::dpct_memset(q, ptr, pitch, val, x, y);
}
/// Sets \p value to the 3D memory region specified by \p pitch in \p q. \p size
/// specify the setted 3D memory size. The return of the function does NOT
/// guarantee the memset operation is completed.
///
/// \param pitch Specify the 3D memory region.
/// \param value Value to be set.
/// \param size The setted 3D memory size.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void async_dpct_memset(pitched_data pitch, int val,
sycl::range<3> size,
sycl::queue &q = get_default_queue()) {
detail::dpct_memset(q, pitch, val, size);
}
} // namespace dpct
#endif // __DPCT_MEMORY_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/03_SYCL_Migration_Jacobi_Iterative/dpct_output/include/dpct/atomic.hpp | //==---- atomic.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_ATOMIC_HPP__
#define __DPCT_ATOMIC_HPP__
#include <sycl/sycl.hpp>
namespace dpct {
/// Atomically add the value operand to the value at the addr and assign the
/// result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to add to the value at \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_add(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_add(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_add(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_add(operand);
}
/// Atomically add the value operand to the value at the addr and assign the
/// result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to add to the value at \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_add(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_add(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_add<T1, addressSpace>(addr, operand, memoryOrder);
}
} // namespace dpct
#endif // __DPCT_ATOMIC_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/01_SYCL_Migration_Simple_VectorAdd/sycl_migrated_optimized/vectoradd.dp.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <iostream>
#include <vector>
#define N 16
//# kernel code to perform VectorAdd on GPU
void VectorAddKernel(float* A, float* B, float* C,
const sycl::nd_item<3> &item_ct1)
{
C[item_ct1.get_local_id(2)] =
A[item_ct1.get_local_id(2)] + B[item_ct1.get_local_id(2)];
}
int main()
{
// sycl queue with out of order execution allowed
sycl::queue q_ct1;
std::cout << "Device: " << q_ct1.get_device().get_info<sycl::info::device::name>() << "\n";
//# Initialize vectors on host
float A[N] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
float B[N] = {2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2};
float C[N] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
//# Allocate memory on device
float *d_A, *d_B, *d_C;
d_A = sycl::malloc_device<float>(N, q_ct1);
d_B = sycl::malloc_device<float>(N, q_ct1);
d_C = sycl::malloc_device<float>(N, q_ct1);
//# copy vector data from host to device
auto e1 = q_ct1.memcpy(d_A, A, N * sizeof(float));
auto e2 = q_ct1.memcpy(d_B, B, N * sizeof(float));
//# sumbit task to compute VectorAdd on device
auto e3 = q_ct1.parallel_for(
sycl::nd_range<3>(sycl::range<3>(1, 1, N), sycl::range<3>(1, 1, N)), {e1, e2},
[=](sycl::nd_item<3> item_ct1) {
VectorAddKernel(d_A, d_B, d_C, item_ct1);
});
//# copy result of vector data from device to host
q_ct1.memcpy(C, d_C, N * sizeof(float), e3).wait();
//# print result on host
for (int i = 0; i < N; i++) std::cout<< C[i] << " ";
std::cout << "\n";
//# free allocation on device
sycl::free(d_A, q_ct1);
sycl::free(d_B, q_ct1);
sycl::free(d_C, q_ct1);
return 0;
} | cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/01_SYCL_Migration_Simple_VectorAdd/sycl_migrated_optimized/include/dpct/dpct.hpp | //==---- dpct.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_HPP__
#define __DPCT_HPP__
#include <sycl/sycl.hpp>
#include <iostream>
#include <limits.h>
#include <math.h>
#include "device.hpp"
#include "memory.hpp"
#endif // __DPCT_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/01_SYCL_Migration_Simple_VectorAdd/sycl_migrated_optimized/include/dpct/device.hpp | //==---- device.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_DEVICE_HPP__
#define __DPCT_DEVICE_HPP__
#include <sycl/sycl.hpp>
#include <algorithm>
#include <cstring>
#include <iostream>
#include <mutex>
#include <set>
#include <sstream>
#include <map>
#include <vector>
#include <thread>
#if defined(__linux__)
#include <unistd.h>
#include <sys/syscall.h>
#endif
#if defined(_WIN64)
#define NOMINMAX
#include <windows.h>
#endif
namespace dpct {
/// SYCL default exception handler
auto exception_handler = [](sycl::exception_list exceptions) {
for (std::exception_ptr const &e : exceptions) {
try {
std::rethrow_exception(e);
} catch (sycl::exception const &e) {
std::cerr << "Caught asynchronous SYCL exception:" << std::endl
<< e.what() << std::endl
<< "Exception caught at file:" << __FILE__
<< ", line:" << __LINE__ << std::endl;
}
}
};
/// dpct device extension
class device_ext : public sycl::device {
public:
device_ext() : sycl::device(), _ctx(*this) {}
~device_ext() {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
for (auto &task : _tasks) {
if (task.joinable())
task.join();
}
_tasks.clear();
_queues.clear();
}
device_ext(const sycl::device &base)
: sycl::device(base), _ctx(*this) {
_saved_queue = _default_queue = create_queue(true);
}
sycl::queue &default_queue() { return *_default_queue; }
sycl::queue *create_queue(bool enable_exception_handler = false) {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
sycl::async_handler eh = {};
if (enable_exception_handler) {
eh = exception_handler;
}
auto property = get_default_property_list_for_queue();
_queues.push_back(std::make_shared<sycl::queue>(
_ctx, *this, eh, property));
return _queues.back().get();
}
private:
sycl::property_list get_default_property_list_for_queue() const {
#ifdef DPCT_PROFILING_ENABLED
auto property =
sycl::property_list{sycl::property::queue::enable_profiling(),
sycl::property::queue::in_order()};
#else
auto property =
sycl::property_list{sycl::property::queue::in_order()};
#endif
return property;
}
sycl::queue *_default_queue;
sycl::queue *_saved_queue;
sycl::context _ctx;
std::vector<std::shared_ptr<sycl::queue>> _queues;
mutable std::recursive_mutex m_mutex;
std::vector<std::thread> _tasks;
};
static inline unsigned int get_tid() {
#if defined(__linux__)
return syscall(SYS_gettid);
#elif defined(_WIN64)
return GetCurrentThreadId();
#else
#error "Only support Windows and Linux."
#endif
}
/// device manager
class dev_mgr {
public:
device_ext ¤t_device() {
unsigned int dev_id=current_device_id();
check_id(dev_id);
return *_devs[dev_id];
}
unsigned int current_device_id() const {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
auto it=_thread2dev_map.find(get_tid());
if(it != _thread2dev_map.end())
return it->second;
return DEFAULT_DEVICE_ID;
}
/// Returns the instance of device manager singleton.
static dev_mgr &instance() {
static dev_mgr d_m;
return d_m;
}
dev_mgr(const dev_mgr &) = delete;
dev_mgr &operator=(const dev_mgr &) = delete;
dev_mgr(dev_mgr &&) = delete;
dev_mgr &operator=(dev_mgr &&) = delete;
private:
mutable std::recursive_mutex m_mutex;
dev_mgr() {
sycl::device default_device =
sycl::device(sycl::default_selector_v);
_devs.push_back(std::make_shared<device_ext>(default_device));
std::vector<sycl::device> sycl_all_devs =
sycl::device::get_devices(sycl::info::device_type::all);
// Collect other devices except for the default device.
if (default_device.is_cpu())
_cpu_device = 0;
for (auto &dev : sycl_all_devs) {
if (dev == default_device) {
continue;
}
_devs.push_back(std::make_shared<device_ext>(dev));
if (_cpu_device == -1 && dev.is_cpu()) {
_cpu_device = _devs.size() - 1;
}
}
}
void check_id(unsigned int id) const {
if (id >= _devs.size()) {
throw std::runtime_error("invalid device id");
}
}
std::vector<std::shared_ptr<device_ext>> _devs;
/// DEFAULT_DEVICE_ID is used, if current_device_id() can not find current
/// thread id in _thread2dev_map, which means default device should be used
/// for the current thread.
const unsigned int DEFAULT_DEVICE_ID = 0;
/// thread-id to device-id map.
std::map<unsigned int, unsigned int> _thread2dev_map;
int _cpu_device = -1;
};
/// Util function to get the default queue of current device in
/// dpct device manager.
static inline sycl::queue &get_default_queue() {
return dev_mgr::instance().current_device().default_queue();
}
/// Util function to get the current device.
static inline device_ext &get_current_device() {
return dev_mgr::instance().current_device();
}
} // namespace dpct
#endif // __DPCT_DEVICE_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/01_SYCL_Migration_Simple_VectorAdd/sycl_migrated_optimized/include/dpct/memory.hpp | //==---- memory.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MEMORY_HPP__
#define __DPCT_MEMORY_HPP__
#include <sycl/sycl.hpp>
#include <cassert>
#include <cstdint>
#include <cstring>
#include <mutex>
#include <unordered_map>
#include <map>
#include <utility>
#include <thread>
#include <type_traits>
#if defined(__linux__)
#include <sys/mman.h>
#elif defined(_WIN64)
#define NOMINMAX
#include <windows.h>
#else
#error "Only support Windows and Linux."
#endif
#include "device.hpp"
namespace dpct {
enum memcpy_direction {
host_to_host,
host_to_device,
device_to_host,
device_to_device,
automatic
};
/// Pitched 2D/3D memory data.
class pitched_data {
public:
pitched_data() : pitched_data(nullptr, 0, 0, 0) {}
pitched_data(void *data, size_t pitch, size_t x, size_t y)
: _data(data), _pitch(pitch), _x(x), _y(y) {}
void *get_data_ptr() { return _data; }
size_t get_pitch() { return _pitch; }
size_t get_y() { return _y; }
private:
void *_data;
size_t _pitch, _x, _y;
};
namespace detail {
enum class pointer_access_attribute {
host_only = 0,
device_only,
host_device,
end
};
static pointer_access_attribute get_pointer_attribute(sycl::queue &q,
const void *ptr) {
switch (sycl::get_pointer_type(ptr, q.get_context())) {
case sycl::usm::alloc::unknown:
return pointer_access_attribute::host_only;
case sycl::usm::alloc::device:
return pointer_access_attribute::device_only;
case sycl::usm::alloc::shared:
case sycl::usm::alloc::host:
return pointer_access_attribute::host_device;
}
}
static memcpy_direction deduce_memcpy_direction(sycl::queue &q, void *to_ptr,
const void *from_ptr,
memcpy_direction dir) {
switch (dir) {
case memcpy_direction::host_to_host:
case memcpy_direction::host_to_device:
case memcpy_direction::device_to_host:
case memcpy_direction::device_to_device:
return dir;
case memcpy_direction::automatic: {
// table[to_attribute][from_attribute]
static const memcpy_direction
direction_table[static_cast<unsigned>(pointer_access_attribute::end)]
[static_cast<unsigned>(pointer_access_attribute::end)] =
{{memcpy_direction::host_to_host,
memcpy_direction::device_to_host,
memcpy_direction::host_to_host},
{memcpy_direction::host_to_device,
memcpy_direction::device_to_device,
memcpy_direction::device_to_device},
{memcpy_direction::host_to_host,
memcpy_direction::device_to_device,
memcpy_direction::device_to_device}};
return direction_table[static_cast<unsigned>(get_pointer_attribute(
q, to_ptr))][static_cast<unsigned>(get_pointer_attribute(q, from_ptr))];
}
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
}
static sycl::event
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction,
const std::vector<sycl::event> &dep_events = {}) {
if (!size)
return sycl::event{};
return q.memcpy(to_ptr, from_ptr, size, dep_events);
}
// Get actual copy range and make sure it will not exceed range.
static inline size_t get_copy_range(sycl::range<3> size, size_t slice,
size_t pitch) {
return slice * (size.get(2) - 1) + pitch * (size.get(1) - 1) + size.get(0);
}
static inline size_t get_offset(sycl::id<3> id, size_t slice,
size_t pitch) {
return slice * id.get(2) + pitch * id.get(1) + id.get(0);
}
/// copy 3D matrix specified by \p size from 3D matrix specified by \p from_ptr
/// and \p from_range to another specified by \p to_ptr and \p to_range.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
sycl::range<3> to_range, sycl::range<3> from_range,
sycl::id<3> to_id, sycl::id<3> from_id,
sycl::range<3> size, memcpy_direction direction,
const std::vector<sycl::event> &dep_events = {}) {
// RAII for host pointer
class host_buffer {
void *_buf;
size_t _size;
sycl::queue &_q;
const std::vector<sycl::event> &_deps; // free operation depends
public:
host_buffer(size_t size, sycl::queue &q,
const std::vector<sycl::event> &deps)
: _buf(std::malloc(size)), _size(size), _q(q), _deps(deps) {}
void *get_ptr() const { return _buf; }
size_t get_size() const { return _size; }
~host_buffer() {
if (_buf) {
_q.submit([&](sycl::handler &cgh) {
cgh.depends_on(_deps);
cgh.host_task([buf = _buf] { std::free(buf); });
});
}
}
};
std::vector<sycl::event> event_list;
size_t to_slice = to_range.get(1) * to_range.get(0),
from_slice = from_range.get(1) * from_range.get(0);
unsigned char *to_surface =
(unsigned char *)to_ptr + get_offset(to_id, to_slice, to_range.get(0));
const unsigned char *from_surface =
(const unsigned char *)from_ptr +
get_offset(from_id, from_slice, from_range.get(0));
if (to_slice == from_slice && to_slice == size.get(1) * size.get(0)) {
return {dpct_memcpy(q, to_surface, from_surface, to_slice * size.get(2),
direction, dep_events)};
}
direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction);
size_t size_slice = size.get(1) * size.get(0);
switch (direction) {
case host_to_host:
for (size_t z = 0; z < size.get(2); ++z) {
unsigned char *to_ptr = to_surface;
const unsigned char *from_ptr = from_surface;
if (to_range.get(0) == from_range.get(0) &&
to_range.get(0) == size.get(0)) {
event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size_slice,
direction, dep_events));
} else {
for (size_t y = 0; y < size.get(1); ++y) {
event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size.get(0),
direction, dep_events));
to_ptr += to_range.get(0);
from_ptr += from_range.get(0);
}
}
to_surface += to_slice;
from_surface += from_slice;
}
break;
case host_to_device: {
host_buffer buf(get_copy_range(size, to_slice, to_range.get(0)), q,
event_list);
std::vector<sycl::event> host_events;
if (to_slice == size_slice) {
// Copy host data to a temp host buffer with the shape of target.
host_events =
dpct_memcpy(q, buf.get_ptr(), from_surface, to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size,
host_to_host, dep_events);
} else {
// Copy host data to a temp host buffer with the shape of target.
host_events = dpct_memcpy(
q, buf.get_ptr(), from_surface, to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host,
// If has padding data, not sure whether it is useless. So fill temp
// buffer with it.
std::vector<sycl::event>{
dpct_memcpy(q, buf.get_ptr(), to_surface, buf.get_size(),
device_to_host, dep_events)});
}
// Copy from temp host buffer to device with only one submit.
event_list.push_back(dpct_memcpy(q, to_surface, buf.get_ptr(),
buf.get_size(), host_to_device,
host_events));
break;
}
case device_to_host: {
host_buffer buf(get_copy_range(size, from_slice, from_range.get(0)), q,
event_list);
// Copy from host temp buffer to host target with reshaping.
event_list = dpct_memcpy(
q, to_surface, buf.get_ptr(), to_range, from_range, sycl::id<3>(0, 0, 0),
sycl::id<3>(0, 0, 0), size, host_to_host,
// Copy from device to temp host buffer with only one submit.
std::vector<sycl::event>{dpct_memcpy(q, buf.get_ptr(), from_surface,
buf.get_size(),
device_to_host, dep_events)});
break;
}
case device_to_device:
event_list.push_back(q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
cgh.parallel_for<class dpct_memcpy_3d_detail>(
size,
[=](sycl::id<3> id) {
to_surface[get_offset(id, to_slice, to_range.get(0))] =
from_surface[get_offset(id, from_slice, from_range.get(0))];
});
}));
break;
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
return event_list;
}
/// memcpy 2D/3D matrix specified by pitched_data.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, pitched_data to, sycl::id<3> to_id,
pitched_data from, sycl::id<3> from_id, sycl::range<3> size,
memcpy_direction direction = automatic) {
return dpct_memcpy(q, to.get_data_ptr(), from.get_data_ptr(),
sycl::range<3>(to.get_pitch(), to.get_y(), 1),
sycl::range<3>(from.get_pitch(), from.get_y(), 1), to_id, from_id,
size, direction);
}
/// memcpy 2D matrix with pitch.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
size_t to_pitch, size_t from_pitch, size_t x, size_t y,
memcpy_direction direction = automatic) {
return dpct_memcpy(q, to_ptr, from_ptr, sycl::range<3>(to_pitch, y, 1),
sycl::range<3>(from_pitch, y, 1),
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0),
sycl::range<3>(x, y, 1), direction);
}
} // namespace detail
/// Synchronously copies \p size bytes from the address specified by \p from_ptr
/// to the address specified by \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device,
/// \a device_to_host, \a device_to_device or \a automatic. The function will
/// return after the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param from_ptr Pointer to source memory address.
/// \param size Number of bytes to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static void dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction).wait();
}
/// Synchronously copies 2D matrix specified by \p x and \p y from the address
/// specified by \p from_ptr to the address specified by \p to_ptr, while \p
/// from_pitch and \p to_pitch are the range of dim x in bytes of the matrix
/// specified by \p from_ptr and \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device, \a
/// device_to_host, \a device_to_device or \a automatic. The function will
/// return after the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param to_pitch Range of dim x in bytes of destination matrix.
/// \param from_ptr Pointer to source memory address.
/// \param from_pitch Range of dim x in bytes of source matrix.
/// \param x Range of dim x of matrix to be copied.
/// \param y Range of dim y of matrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void dpct_memcpy(void *to_ptr, size_t to_pitch,
const void *from_ptr, size_t from_pitch,
size_t x, size_t y,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
sycl::event::wait(detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch,
from_pitch, x, y, direction));
}
/// Synchronously copies a subset of a 3D matrix specified by \p to to another
/// 3D matrix specified by \p from. The from and to position info are specified
/// by \p from_pos and \p to_pos The copied matrix size is specified by \p size.
/// The value of \p direction is used to set the copy direction, it can be \a
/// host_to_host, \a host_to_device, \a device_to_host, \a device_to_device or
/// \a automatic. The function will return after the copy is completed.
///
/// \param to Destination matrix info.
/// \param to_pos Position of destination.
/// \param from Source matrix info.
/// \param from_pos Position of destination.
/// \param size Range of the submatrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void dpct_memcpy(pitched_data to, sycl::id<3> to_pos,
pitched_data from, sycl::id<3> from_pos,
sycl::range<3> size,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
sycl::event::wait(
detail::dpct_memcpy(q, to, to_pos, from, from_pos, size, direction));
}
} // namespace dpct
#endif // __DPCT_MEMORY_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/01_SYCL_Migration_Simple_VectorAdd/sycl_migrated/vectoradd.dp.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include <iostream>
#include <vector>
#define N 16
//# kernel code to perform VectorAdd on GPU
void VectorAddKernel(float* A, float* B, float* C,
const sycl::nd_item<3> &item_ct1)
{
C[item_ct1.get_local_id(2)] =
A[item_ct1.get_local_id(2)] + B[item_ct1.get_local_id(2)];
}
int main()
{
dpct::device_ext &dev_ct1 = dpct::get_current_device();
sycl::queue &q_ct1 = dev_ct1.default_queue();
std::cout << "Device: " << q_ct1.get_device().get_info<sycl::info::device::name>() << "\n";
//# Initialize vectors on host
float A[N] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
float B[N] = {2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2};
float C[N] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
//# Allocate memory on device
float *d_A, *d_B, *d_C;
d_A = sycl::malloc_device<float>(N, q_ct1);
d_B = sycl::malloc_device<float>(N, q_ct1);
d_C = sycl::malloc_device<float>(N, q_ct1);
//# copy vector data from host to device
q_ct1.memcpy(d_A, A, N * sizeof(float));
q_ct1.memcpy(d_B, B, N * sizeof(float)).wait();
//# sumbit task to compute VectorAdd on device
q_ct1.parallel_for(
sycl::nd_range<3>(sycl::range<3>(1, 1, N), sycl::range<3>(1, 1, N)),
[=](sycl::nd_item<3> item_ct1) {
VectorAddKernel(d_A, d_B, d_C, item_ct1);
});
//# copy result of vector data from device to host
q_ct1.memcpy(C, d_C, N * sizeof(float)).wait();
//# print result on host
for (int i = 0; i < N; i++) std::cout<< C[i] << " ";
std::cout << "\n";
//# free allocation on device
sycl::free(d_A, q_ct1);
sycl::free(d_B, q_ct1);
sycl::free(d_C, q_ct1);
return 0;
} | cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/01_SYCL_Migration_Simple_VectorAdd/sycl_migrated/include/dpct/dpct.hpp | //==---- dpct.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_HPP__
#define __DPCT_HPP__
#include <sycl/sycl.hpp>
#include <iostream>
#include <limits.h>
#include <math.h>
#include "device.hpp"
#include "memory.hpp"
#endif // __DPCT_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/01_SYCL_Migration_Simple_VectorAdd/sycl_migrated/include/dpct/device.hpp | //==---- device.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_DEVICE_HPP__
#define __DPCT_DEVICE_HPP__
#include <sycl/sycl.hpp>
#include <algorithm>
#include <cstring>
#include <iostream>
#include <mutex>
#include <set>
#include <sstream>
#include <map>
#include <vector>
#include <thread>
#if defined(__linux__)
#include <unistd.h>
#include <sys/syscall.h>
#endif
#if defined(_WIN64)
#define NOMINMAX
#include <windows.h>
#endif
namespace dpct {
/// SYCL default exception handler
auto exception_handler = [](sycl::exception_list exceptions) {
for (std::exception_ptr const &e : exceptions) {
try {
std::rethrow_exception(e);
} catch (sycl::exception const &e) {
std::cerr << "Caught asynchronous SYCL exception:" << std::endl
<< e.what() << std::endl
<< "Exception caught at file:" << __FILE__
<< ", line:" << __LINE__ << std::endl;
}
}
};
/// dpct device extension
class device_ext : public sycl::device {
public:
device_ext() : sycl::device(), _ctx(*this) {}
~device_ext() {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
for (auto &task : _tasks) {
if (task.joinable())
task.join();
}
_tasks.clear();
_queues.clear();
}
device_ext(const sycl::device &base)
: sycl::device(base), _ctx(*this) {
_saved_queue = _default_queue = create_queue(true);
}
sycl::queue &default_queue() { return *_default_queue; }
sycl::queue *create_queue(bool enable_exception_handler = false) {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
sycl::async_handler eh = {};
if (enable_exception_handler) {
eh = exception_handler;
}
auto property = get_default_property_list_for_queue();
_queues.push_back(std::make_shared<sycl::queue>(
_ctx, *this, eh, property));
return _queues.back().get();
}
private:
sycl::property_list get_default_property_list_for_queue() const {
#ifdef DPCT_PROFILING_ENABLED
auto property =
sycl::property_list{sycl::property::queue::enable_profiling(),
sycl::property::queue::in_order()};
#else
auto property =
sycl::property_list{sycl::property::queue::in_order()};
#endif
return property;
}
sycl::queue *_default_queue;
sycl::queue *_saved_queue;
sycl::context _ctx;
std::vector<std::shared_ptr<sycl::queue>> _queues;
mutable std::recursive_mutex m_mutex;
std::vector<std::thread> _tasks;
};
static inline unsigned int get_tid() {
#if defined(__linux__)
return syscall(SYS_gettid);
#elif defined(_WIN64)
return GetCurrentThreadId();
#else
#error "Only support Windows and Linux."
#endif
}
/// device manager
class dev_mgr {
public:
device_ext ¤t_device() {
unsigned int dev_id=current_device_id();
check_id(dev_id);
return *_devs[dev_id];
}
unsigned int current_device_id() const {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
auto it=_thread2dev_map.find(get_tid());
if(it != _thread2dev_map.end())
return it->second;
return DEFAULT_DEVICE_ID;
}
/// Returns the instance of device manager singleton.
static dev_mgr &instance() {
static dev_mgr d_m;
return d_m;
}
dev_mgr(const dev_mgr &) = delete;
dev_mgr &operator=(const dev_mgr &) = delete;
dev_mgr(dev_mgr &&) = delete;
dev_mgr &operator=(dev_mgr &&) = delete;
private:
mutable std::recursive_mutex m_mutex;
dev_mgr() {
sycl::device default_device =
sycl::device(sycl::default_selector_v);
_devs.push_back(std::make_shared<device_ext>(default_device));
std::vector<sycl::device> sycl_all_devs =
sycl::device::get_devices(sycl::info::device_type::all);
// Collect other devices except for the default device.
if (default_device.is_cpu())
_cpu_device = 0;
for (auto &dev : sycl_all_devs) {
if (dev == default_device) {
continue;
}
_devs.push_back(std::make_shared<device_ext>(dev));
if (_cpu_device == -1 && dev.is_cpu()) {
_cpu_device = _devs.size() - 1;
}
}
}
void check_id(unsigned int id) const {
if (id >= _devs.size()) {
throw std::runtime_error("invalid device id");
}
}
std::vector<std::shared_ptr<device_ext>> _devs;
/// DEFAULT_DEVICE_ID is used, if current_device_id() can not find current
/// thread id in _thread2dev_map, which means default device should be used
/// for the current thread.
const unsigned int DEFAULT_DEVICE_ID = 0;
/// thread-id to device-id map.
std::map<unsigned int, unsigned int> _thread2dev_map;
int _cpu_device = -1;
};
/// Util function to get the default queue of current device in
/// dpct device manager.
static inline sycl::queue &get_default_queue() {
return dev_mgr::instance().current_device().default_queue();
}
/// Util function to get the current device.
static inline device_ext &get_current_device() {
return dev_mgr::instance().current_device();
}
} // namespace dpct
#endif // __DPCT_DEVICE_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/01_SYCL_Migration_Simple_VectorAdd/sycl_migrated/include/dpct/memory.hpp | //==---- memory.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MEMORY_HPP__
#define __DPCT_MEMORY_HPP__
#include <sycl/sycl.hpp>
#include <cassert>
#include <cstdint>
#include <cstring>
#include <mutex>
#include <unordered_map>
#include <map>
#include <utility>
#include <thread>
#include <type_traits>
#if defined(__linux__)
#include <sys/mman.h>
#elif defined(_WIN64)
#define NOMINMAX
#include <windows.h>
#else
#error "Only support Windows and Linux."
#endif
#include "device.hpp"
namespace dpct {
enum memcpy_direction {
host_to_host,
host_to_device,
device_to_host,
device_to_device,
automatic
};
/// Pitched 2D/3D memory data.
class pitched_data {
public:
pitched_data() : pitched_data(nullptr, 0, 0, 0) {}
pitched_data(void *data, size_t pitch, size_t x, size_t y)
: _data(data), _pitch(pitch), _x(x), _y(y) {}
void *get_data_ptr() { return _data; }
size_t get_pitch() { return _pitch; }
size_t get_y() { return _y; }
private:
void *_data;
size_t _pitch, _x, _y;
};
namespace detail {
enum class pointer_access_attribute {
host_only = 0,
device_only,
host_device,
end
};
static pointer_access_attribute get_pointer_attribute(sycl::queue &q,
const void *ptr) {
switch (sycl::get_pointer_type(ptr, q.get_context())) {
case sycl::usm::alloc::unknown:
return pointer_access_attribute::host_only;
case sycl::usm::alloc::device:
return pointer_access_attribute::device_only;
case sycl::usm::alloc::shared:
case sycl::usm::alloc::host:
return pointer_access_attribute::host_device;
}
}
static memcpy_direction deduce_memcpy_direction(sycl::queue &q, void *to_ptr,
const void *from_ptr,
memcpy_direction dir) {
switch (dir) {
case memcpy_direction::host_to_host:
case memcpy_direction::host_to_device:
case memcpy_direction::device_to_host:
case memcpy_direction::device_to_device:
return dir;
case memcpy_direction::automatic: {
// table[to_attribute][from_attribute]
static const memcpy_direction
direction_table[static_cast<unsigned>(pointer_access_attribute::end)]
[static_cast<unsigned>(pointer_access_attribute::end)] =
{{memcpy_direction::host_to_host,
memcpy_direction::device_to_host,
memcpy_direction::host_to_host},
{memcpy_direction::host_to_device,
memcpy_direction::device_to_device,
memcpy_direction::device_to_device},
{memcpy_direction::host_to_host,
memcpy_direction::device_to_device,
memcpy_direction::device_to_device}};
return direction_table[static_cast<unsigned>(get_pointer_attribute(
q, to_ptr))][static_cast<unsigned>(get_pointer_attribute(q, from_ptr))];
}
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
}
static sycl::event
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction,
const std::vector<sycl::event> &dep_events = {}) {
if (!size)
return sycl::event{};
return q.memcpy(to_ptr, from_ptr, size, dep_events);
}
// Get actual copy range and make sure it will not exceed range.
static inline size_t get_copy_range(sycl::range<3> size, size_t slice,
size_t pitch) {
return slice * (size.get(2) - 1) + pitch * (size.get(1) - 1) + size.get(0);
}
static inline size_t get_offset(sycl::id<3> id, size_t slice,
size_t pitch) {
return slice * id.get(2) + pitch * id.get(1) + id.get(0);
}
/// copy 3D matrix specified by \p size from 3D matrix specified by \p from_ptr
/// and \p from_range to another specified by \p to_ptr and \p to_range.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
sycl::range<3> to_range, sycl::range<3> from_range,
sycl::id<3> to_id, sycl::id<3> from_id,
sycl::range<3> size, memcpy_direction direction,
const std::vector<sycl::event> &dep_events = {}) {
// RAII for host pointer
class host_buffer {
void *_buf;
size_t _size;
sycl::queue &_q;
const std::vector<sycl::event> &_deps; // free operation depends
public:
host_buffer(size_t size, sycl::queue &q,
const std::vector<sycl::event> &deps)
: _buf(std::malloc(size)), _size(size), _q(q), _deps(deps) {}
void *get_ptr() const { return _buf; }
size_t get_size() const { return _size; }
~host_buffer() {
if (_buf) {
_q.submit([&](sycl::handler &cgh) {
cgh.depends_on(_deps);
cgh.host_task([buf = _buf] { std::free(buf); });
});
}
}
};
std::vector<sycl::event> event_list;
size_t to_slice = to_range.get(1) * to_range.get(0),
from_slice = from_range.get(1) * from_range.get(0);
unsigned char *to_surface =
(unsigned char *)to_ptr + get_offset(to_id, to_slice, to_range.get(0));
const unsigned char *from_surface =
(const unsigned char *)from_ptr +
get_offset(from_id, from_slice, from_range.get(0));
if (to_slice == from_slice && to_slice == size.get(1) * size.get(0)) {
return {dpct_memcpy(q, to_surface, from_surface, to_slice * size.get(2),
direction, dep_events)};
}
direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction);
size_t size_slice = size.get(1) * size.get(0);
switch (direction) {
case host_to_host:
for (size_t z = 0; z < size.get(2); ++z) {
unsigned char *to_ptr = to_surface;
const unsigned char *from_ptr = from_surface;
if (to_range.get(0) == from_range.get(0) &&
to_range.get(0) == size.get(0)) {
event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size_slice,
direction, dep_events));
} else {
for (size_t y = 0; y < size.get(1); ++y) {
event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size.get(0),
direction, dep_events));
to_ptr += to_range.get(0);
from_ptr += from_range.get(0);
}
}
to_surface += to_slice;
from_surface += from_slice;
}
break;
case host_to_device: {
host_buffer buf(get_copy_range(size, to_slice, to_range.get(0)), q,
event_list);
std::vector<sycl::event> host_events;
if (to_slice == size_slice) {
// Copy host data to a temp host buffer with the shape of target.
host_events =
dpct_memcpy(q, buf.get_ptr(), from_surface, to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size,
host_to_host, dep_events);
} else {
// Copy host data to a temp host buffer with the shape of target.
host_events = dpct_memcpy(
q, buf.get_ptr(), from_surface, to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host,
// If has padding data, not sure whether it is useless. So fill temp
// buffer with it.
std::vector<sycl::event>{
dpct_memcpy(q, buf.get_ptr(), to_surface, buf.get_size(),
device_to_host, dep_events)});
}
// Copy from temp host buffer to device with only one submit.
event_list.push_back(dpct_memcpy(q, to_surface, buf.get_ptr(),
buf.get_size(), host_to_device,
host_events));
break;
}
case device_to_host: {
host_buffer buf(get_copy_range(size, from_slice, from_range.get(0)), q,
event_list);
// Copy from host temp buffer to host target with reshaping.
event_list = dpct_memcpy(
q, to_surface, buf.get_ptr(), to_range, from_range, sycl::id<3>(0, 0, 0),
sycl::id<3>(0, 0, 0), size, host_to_host,
// Copy from device to temp host buffer with only one submit.
std::vector<sycl::event>{dpct_memcpy(q, buf.get_ptr(), from_surface,
buf.get_size(),
device_to_host, dep_events)});
break;
}
case device_to_device:
event_list.push_back(q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
cgh.parallel_for<class dpct_memcpy_3d_detail>(
size,
[=](sycl::id<3> id) {
to_surface[get_offset(id, to_slice, to_range.get(0))] =
from_surface[get_offset(id, from_slice, from_range.get(0))];
});
}));
break;
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
return event_list;
}
/// memcpy 2D/3D matrix specified by pitched_data.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, pitched_data to, sycl::id<3> to_id,
pitched_data from, sycl::id<3> from_id, sycl::range<3> size,
memcpy_direction direction = automatic) {
return dpct_memcpy(q, to.get_data_ptr(), from.get_data_ptr(),
sycl::range<3>(to.get_pitch(), to.get_y(), 1),
sycl::range<3>(from.get_pitch(), from.get_y(), 1), to_id, from_id,
size, direction);
}
/// memcpy 2D matrix with pitch.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
size_t to_pitch, size_t from_pitch, size_t x, size_t y,
memcpy_direction direction = automatic) {
return dpct_memcpy(q, to_ptr, from_ptr, sycl::range<3>(to_pitch, y, 1),
sycl::range<3>(from_pitch, y, 1),
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0),
sycl::range<3>(x, y, 1), direction);
}
} // namespace detail
/// Synchronously copies \p size bytes from the address specified by \p from_ptr
/// to the address specified by \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device,
/// \a device_to_host, \a device_to_device or \a automatic. The function will
/// return after the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param from_ptr Pointer to source memory address.
/// \param size Number of bytes to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static void dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction).wait();
}
/// Synchronously copies 2D matrix specified by \p x and \p y from the address
/// specified by \p from_ptr to the address specified by \p to_ptr, while \p
/// from_pitch and \p to_pitch are the range of dim x in bytes of the matrix
/// specified by \p from_ptr and \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device, \a
/// device_to_host, \a device_to_device or \a automatic. The function will
/// return after the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param to_pitch Range of dim x in bytes of destination matrix.
/// \param from_ptr Pointer to source memory address.
/// \param from_pitch Range of dim x in bytes of source matrix.
/// \param x Range of dim x of matrix to be copied.
/// \param y Range of dim y of matrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void dpct_memcpy(void *to_ptr, size_t to_pitch,
const void *from_ptr, size_t from_pitch,
size_t x, size_t y,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
sycl::event::wait(detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch,
from_pitch, x, y, direction));
}
/// Synchronously copies a subset of a 3D matrix specified by \p to to another
/// 3D matrix specified by \p from. The from and to position info are specified
/// by \p from_pos and \p to_pos The copied matrix size is specified by \p size.
/// The value of \p direction is used to set the copy direction, it can be \a
/// host_to_host, \a host_to_device, \a device_to_host, \a device_to_device or
/// \a automatic. The function will return after the copy is completed.
///
/// \param to Destination matrix info.
/// \param to_pos Position of destination.
/// \param from Source matrix info.
/// \param from_pos Position of destination.
/// \param size Range of the submatrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void dpct_memcpy(pitched_data to, sycl::id<3> to_pos,
pitched_data from, sycl::id<3> from_pos,
sycl::range<3> size,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
sycl::event::wait(
detail::dpct_memcpy(q, to, to_pos, from, from_pos, size, direction));
}
} // namespace dpct
#endif // __DPCT_MEMORY_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/01_SYCL_Migration_Simple_VectorAdd/dpct_output/vectoradd.dp.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include <iostream>
#include <vector>
#define N 16
//# kernel code to perform VectorAdd on GPU
void VectorAddKernel(float* A, float* B, float* C,
const sycl::nd_item<3> &item_ct1)
{
C[item_ct1.get_local_id(2)] =
A[item_ct1.get_local_id(2)] + B[item_ct1.get_local_id(2)];
}
int main()
{
dpct::device_ext &dev_ct1 = dpct::get_current_device();
sycl::queue &q_ct1 = dev_ct1.default_queue();
//# Initialize vectors on host
float A[N] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
float B[N] = {2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2};
float C[N] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
//# Allocate memory on device
float *d_A, *d_B, *d_C;
d_A = sycl::malloc_device<float>(N, q_ct1);
d_B = sycl::malloc_device<float>(N, q_ct1);
d_C = sycl::malloc_device<float>(N, q_ct1);
//# copy vector data from host to device
q_ct1.memcpy(d_A, A, N * sizeof(float));
q_ct1.memcpy(d_B, B, N * sizeof(float)).wait();
//# sumbit task to compute VectorAdd on device
q_ct1.parallel_for(
sycl::nd_range<3>(sycl::range<3>(1, 1, N), sycl::range<3>(1, 1, N)),
[=](sycl::nd_item<3> item_ct1) {
VectorAddKernel(d_A, d_B, d_C, item_ct1);
});
//# copy result of vector data from device to host
q_ct1.memcpy(C, d_C, N * sizeof(float)).wait();
//# print result on host
for (int i = 0; i < N; i++) std::cout<< C[i] << " ";
std::cout << "\n";
//# free allocation on device
sycl::free(d_A, q_ct1);
sycl::free(d_B, q_ct1);
sycl::free(d_C, q_ct1);
return 0;
} | cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/01_SYCL_Migration_Simple_VectorAdd/dpct_output/include/dpct/dpct.hpp | //==---- dpct.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_HPP__
#define __DPCT_HPP__
#include <sycl/sycl.hpp>
#include <iostream>
#include <limits.h>
#include <math.h>
#include "device.hpp"
#include "memory.hpp"
#endif // __DPCT_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/01_SYCL_Migration_Simple_VectorAdd/dpct_output/include/dpct/device.hpp | //==---- device.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_DEVICE_HPP__
#define __DPCT_DEVICE_HPP__
#include <sycl/sycl.hpp>
#include <algorithm>
#include <cstring>
#include <iostream>
#include <mutex>
#include <set>
#include <sstream>
#include <map>
#include <vector>
#include <thread>
#if defined(__linux__)
#include <unistd.h>
#include <sys/syscall.h>
#endif
#if defined(_WIN64)
#define NOMINMAX
#include <windows.h>
#endif
namespace dpct {
/// SYCL default exception handler
auto exception_handler = [](sycl::exception_list exceptions) {
for (std::exception_ptr const &e : exceptions) {
try {
std::rethrow_exception(e);
} catch (sycl::exception const &e) {
std::cerr << "Caught asynchronous SYCL exception:" << std::endl
<< e.what() << std::endl
<< "Exception caught at file:" << __FILE__
<< ", line:" << __LINE__ << std::endl;
}
}
};
/// dpct device extension
class device_ext : public sycl::device {
public:
device_ext() : sycl::device(), _ctx(*this) {}
~device_ext() {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
for (auto &task : _tasks) {
if (task.joinable())
task.join();
}
_tasks.clear();
_queues.clear();
}
device_ext(const sycl::device &base)
: sycl::device(base), _ctx(*this) {
_saved_queue = _default_queue = create_queue(true);
}
sycl::queue &default_queue() { return *_default_queue; }
sycl::queue *create_queue(bool enable_exception_handler = false) {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
sycl::async_handler eh = {};
if (enable_exception_handler) {
eh = exception_handler;
}
auto property = get_default_property_list_for_queue();
_queues.push_back(std::make_shared<sycl::queue>(
_ctx, *this, eh, property));
return _queues.back().get();
}
private:
sycl::property_list get_default_property_list_for_queue() const {
#ifdef DPCT_PROFILING_ENABLED
auto property =
sycl::property_list{sycl::property::queue::enable_profiling(),
sycl::property::queue::in_order()};
#else
auto property =
sycl::property_list{sycl::property::queue::in_order()};
#endif
return property;
}
sycl::queue *_default_queue;
sycl::queue *_saved_queue;
sycl::context _ctx;
std::vector<std::shared_ptr<sycl::queue>> _queues;
mutable std::recursive_mutex m_mutex;
std::vector<std::thread> _tasks;
};
static inline unsigned int get_tid() {
#if defined(__linux__)
return syscall(SYS_gettid);
#elif defined(_WIN64)
return GetCurrentThreadId();
#else
#error "Only support Windows and Linux."
#endif
}
/// device manager
class dev_mgr {
public:
device_ext ¤t_device() {
unsigned int dev_id=current_device_id();
check_id(dev_id);
return *_devs[dev_id];
}
unsigned int current_device_id() const {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
auto it=_thread2dev_map.find(get_tid());
if(it != _thread2dev_map.end())
return it->second;
return DEFAULT_DEVICE_ID;
}
/// Returns the instance of device manager singleton.
static dev_mgr &instance() {
static dev_mgr d_m;
return d_m;
}
dev_mgr(const dev_mgr &) = delete;
dev_mgr &operator=(const dev_mgr &) = delete;
dev_mgr(dev_mgr &&) = delete;
dev_mgr &operator=(dev_mgr &&) = delete;
private:
mutable std::recursive_mutex m_mutex;
dev_mgr() {
sycl::device default_device =
sycl::device(sycl::default_selector_v);
_devs.push_back(std::make_shared<device_ext>(default_device));
std::vector<sycl::device> sycl_all_devs =
sycl::device::get_devices(sycl::info::device_type::all);
// Collect other devices except for the default device.
if (default_device.is_cpu())
_cpu_device = 0;
for (auto &dev : sycl_all_devs) {
if (dev == default_device) {
continue;
}
_devs.push_back(std::make_shared<device_ext>(dev));
if (_cpu_device == -1 && dev.is_cpu()) {
_cpu_device = _devs.size() - 1;
}
}
}
void check_id(unsigned int id) const {
if (id >= _devs.size()) {
throw std::runtime_error("invalid device id");
}
}
std::vector<std::shared_ptr<device_ext>> _devs;
/// DEFAULT_DEVICE_ID is used, if current_device_id() can not find current
/// thread id in _thread2dev_map, which means default device should be used
/// for the current thread.
const unsigned int DEFAULT_DEVICE_ID = 0;
/// thread-id to device-id map.
std::map<unsigned int, unsigned int> _thread2dev_map;
int _cpu_device = -1;
};
/// Util function to get the default queue of current device in
/// dpct device manager.
static inline sycl::queue &get_default_queue() {
return dev_mgr::instance().current_device().default_queue();
}
/// Util function to get the current device.
static inline device_ext &get_current_device() {
return dev_mgr::instance().current_device();
}
} // namespace dpct
#endif // __DPCT_DEVICE_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/cuda-to-sycl-migration-training/01_SYCL_Migration_Simple_VectorAdd/dpct_output/include/dpct/memory.hpp | //==---- memory.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MEMORY_HPP__
#define __DPCT_MEMORY_HPP__
#include <sycl/sycl.hpp>
#include <cassert>
#include <cstdint>
#include <cstring>
#include <mutex>
#include <unordered_map>
#include <map>
#include <utility>
#include <thread>
#include <type_traits>
#if defined(__linux__)
#include <sys/mman.h>
#elif defined(_WIN64)
#define NOMINMAX
#include <windows.h>
#else
#error "Only support Windows and Linux."
#endif
#include "device.hpp"
namespace dpct {
enum memcpy_direction {
host_to_host,
host_to_device,
device_to_host,
device_to_device,
automatic
};
/// Pitched 2D/3D memory data.
class pitched_data {
public:
pitched_data() : pitched_data(nullptr, 0, 0, 0) {}
pitched_data(void *data, size_t pitch, size_t x, size_t y)
: _data(data), _pitch(pitch), _x(x), _y(y) {}
void *get_data_ptr() { return _data; }
size_t get_pitch() { return _pitch; }
size_t get_y() { return _y; }
private:
void *_data;
size_t _pitch, _x, _y;
};
namespace detail {
enum class pointer_access_attribute {
host_only = 0,
device_only,
host_device,
end
};
static pointer_access_attribute get_pointer_attribute(sycl::queue &q,
const void *ptr) {
switch (sycl::get_pointer_type(ptr, q.get_context())) {
case sycl::usm::alloc::unknown:
return pointer_access_attribute::host_only;
case sycl::usm::alloc::device:
return pointer_access_attribute::device_only;
case sycl::usm::alloc::shared:
case sycl::usm::alloc::host:
return pointer_access_attribute::host_device;
}
}
static memcpy_direction deduce_memcpy_direction(sycl::queue &q, void *to_ptr,
const void *from_ptr,
memcpy_direction dir) {
switch (dir) {
case memcpy_direction::host_to_host:
case memcpy_direction::host_to_device:
case memcpy_direction::device_to_host:
case memcpy_direction::device_to_device:
return dir;
case memcpy_direction::automatic: {
// table[to_attribute][from_attribute]
static const memcpy_direction
direction_table[static_cast<unsigned>(pointer_access_attribute::end)]
[static_cast<unsigned>(pointer_access_attribute::end)] =
{{memcpy_direction::host_to_host,
memcpy_direction::device_to_host,
memcpy_direction::host_to_host},
{memcpy_direction::host_to_device,
memcpy_direction::device_to_device,
memcpy_direction::device_to_device},
{memcpy_direction::host_to_host,
memcpy_direction::device_to_device,
memcpy_direction::device_to_device}};
return direction_table[static_cast<unsigned>(get_pointer_attribute(
q, to_ptr))][static_cast<unsigned>(get_pointer_attribute(q, from_ptr))];
}
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
}
static sycl::event
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction,
const std::vector<sycl::event> &dep_events = {}) {
if (!size)
return sycl::event{};
return q.memcpy(to_ptr, from_ptr, size, dep_events);
}
// Get actual copy range and make sure it will not exceed range.
static inline size_t get_copy_range(sycl::range<3> size, size_t slice,
size_t pitch) {
return slice * (size.get(2) - 1) + pitch * (size.get(1) - 1) + size.get(0);
}
static inline size_t get_offset(sycl::id<3> id, size_t slice,
size_t pitch) {
return slice * id.get(2) + pitch * id.get(1) + id.get(0);
}
/// copy 3D matrix specified by \p size from 3D matrix specified by \p from_ptr
/// and \p from_range to another specified by \p to_ptr and \p to_range.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
sycl::range<3> to_range, sycl::range<3> from_range,
sycl::id<3> to_id, sycl::id<3> from_id,
sycl::range<3> size, memcpy_direction direction,
const std::vector<sycl::event> &dep_events = {}) {
// RAII for host pointer
class host_buffer {
void *_buf;
size_t _size;
sycl::queue &_q;
const std::vector<sycl::event> &_deps; // free operation depends
public:
host_buffer(size_t size, sycl::queue &q,
const std::vector<sycl::event> &deps)
: _buf(std::malloc(size)), _size(size), _q(q), _deps(deps) {}
void *get_ptr() const { return _buf; }
size_t get_size() const { return _size; }
~host_buffer() {
if (_buf) {
_q.submit([&](sycl::handler &cgh) {
cgh.depends_on(_deps);
cgh.host_task([buf = _buf] { std::free(buf); });
});
}
}
};
std::vector<sycl::event> event_list;
size_t to_slice = to_range.get(1) * to_range.get(0),
from_slice = from_range.get(1) * from_range.get(0);
unsigned char *to_surface =
(unsigned char *)to_ptr + get_offset(to_id, to_slice, to_range.get(0));
const unsigned char *from_surface =
(const unsigned char *)from_ptr +
get_offset(from_id, from_slice, from_range.get(0));
if (to_slice == from_slice && to_slice == size.get(1) * size.get(0)) {
return {dpct_memcpy(q, to_surface, from_surface, to_slice * size.get(2),
direction, dep_events)};
}
direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction);
size_t size_slice = size.get(1) * size.get(0);
switch (direction) {
case host_to_host:
for (size_t z = 0; z < size.get(2); ++z) {
unsigned char *to_ptr = to_surface;
const unsigned char *from_ptr = from_surface;
if (to_range.get(0) == from_range.get(0) &&
to_range.get(0) == size.get(0)) {
event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size_slice,
direction, dep_events));
} else {
for (size_t y = 0; y < size.get(1); ++y) {
event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size.get(0),
direction, dep_events));
to_ptr += to_range.get(0);
from_ptr += from_range.get(0);
}
}
to_surface += to_slice;
from_surface += from_slice;
}
break;
case host_to_device: {
host_buffer buf(get_copy_range(size, to_slice, to_range.get(0)), q,
event_list);
std::vector<sycl::event> host_events;
if (to_slice == size_slice) {
// Copy host data to a temp host buffer with the shape of target.
host_events =
dpct_memcpy(q, buf.get_ptr(), from_surface, to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size,
host_to_host, dep_events);
} else {
// Copy host data to a temp host buffer with the shape of target.
host_events = dpct_memcpy(
q, buf.get_ptr(), from_surface, to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host,
// If has padding data, not sure whether it is useless. So fill temp
// buffer with it.
std::vector<sycl::event>{
dpct_memcpy(q, buf.get_ptr(), to_surface, buf.get_size(),
device_to_host, dep_events)});
}
// Copy from temp host buffer to device with only one submit.
event_list.push_back(dpct_memcpy(q, to_surface, buf.get_ptr(),
buf.get_size(), host_to_device,
host_events));
break;
}
case device_to_host: {
host_buffer buf(get_copy_range(size, from_slice, from_range.get(0)), q,
event_list);
// Copy from host temp buffer to host target with reshaping.
event_list = dpct_memcpy(
q, to_surface, buf.get_ptr(), to_range, from_range, sycl::id<3>(0, 0, 0),
sycl::id<3>(0, 0, 0), size, host_to_host,
// Copy from device to temp host buffer with only one submit.
std::vector<sycl::event>{dpct_memcpy(q, buf.get_ptr(), from_surface,
buf.get_size(),
device_to_host, dep_events)});
break;
}
case device_to_device:
event_list.push_back(q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
cgh.parallel_for<class dpct_memcpy_3d_detail>(
size,
[=](sycl::id<3> id) {
to_surface[get_offset(id, to_slice, to_range.get(0))] =
from_surface[get_offset(id, from_slice, from_range.get(0))];
});
}));
break;
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
return event_list;
}
/// memcpy 2D/3D matrix specified by pitched_data.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, pitched_data to, sycl::id<3> to_id,
pitched_data from, sycl::id<3> from_id, sycl::range<3> size,
memcpy_direction direction = automatic) {
return dpct_memcpy(q, to.get_data_ptr(), from.get_data_ptr(),
sycl::range<3>(to.get_pitch(), to.get_y(), 1),
sycl::range<3>(from.get_pitch(), from.get_y(), 1), to_id, from_id,
size, direction);
}
/// memcpy 2D matrix with pitch.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
size_t to_pitch, size_t from_pitch, size_t x, size_t y,
memcpy_direction direction = automatic) {
return dpct_memcpy(q, to_ptr, from_ptr, sycl::range<3>(to_pitch, y, 1),
sycl::range<3>(from_pitch, y, 1),
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0),
sycl::range<3>(x, y, 1), direction);
}
} // namespace detail
/// Synchronously copies \p size bytes from the address specified by \p from_ptr
/// to the address specified by \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device,
/// \a device_to_host, \a device_to_device or \a automatic. The function will
/// return after the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param from_ptr Pointer to source memory address.
/// \param size Number of bytes to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static void dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction).wait();
}
/// Synchronously copies 2D matrix specified by \p x and \p y from the address
/// specified by \p from_ptr to the address specified by \p to_ptr, while \p
/// from_pitch and \p to_pitch are the range of dim x in bytes of the matrix
/// specified by \p from_ptr and \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device, \a
/// device_to_host, \a device_to_device or \a automatic. The function will
/// return after the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param to_pitch Range of dim x in bytes of destination matrix.
/// \param from_ptr Pointer to source memory address.
/// \param from_pitch Range of dim x in bytes of source matrix.
/// \param x Range of dim x of matrix to be copied.
/// \param y Range of dim y of matrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void dpct_memcpy(void *to_ptr, size_t to_pitch,
const void *from_ptr, size_t from_pitch,
size_t x, size_t y,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
sycl::event::wait(detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch,
from_pitch, x, y, direction));
}
/// Synchronously copies a subset of a 3D matrix specified by \p to to another
/// 3D matrix specified by \p from. The from and to position info are specified
/// by \p from_pos and \p to_pos The copied matrix size is specified by \p size.
/// The value of \p direction is used to set the copy direction, it can be \a
/// host_to_host, \a host_to_device, \a device_to_host, \a device_to_device or
/// \a automatic. The function will return after the copy is completed.
///
/// \param to Destination matrix info.
/// \param to_pos Position of destination.
/// \param from Source matrix info.
/// \param from_pos Position of destination.
/// \param size Range of the submatrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void dpct_memcpy(pitched_data to, sycl::id<3> to_pos,
pitched_data from, sycl::id<3> from_pos,
sycl::range<3> size,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
sycl::event::wait(
detail::dpct_memcpy(q, to, to_pos, from, from_pos, size, direction));
}
} // namespace dpct
#endif // __DPCT_MEMORY_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/06_Intel_VTune_Profiler/src/utils.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include "../include/iso3dfd.h"
/*
* Host-Code
* Utility function to validate grid and block dimensions
*/
bool checkGridDimension(size_t n1, size_t n2, size_t n3, unsigned int dimX,
unsigned int dimY, unsigned int blockZ) {
if (n1 % dimX) {
std::cout << " ERROR: Invalid Grid Size: n1 should be multiple of DIMX - "
<< dimX << "\n";
return true;
}
if (n2 % dimY) {
std::cout << " ERROR: Invalid Grid Size: n2 should be multiple of DIMY - "
<< dimY << "\n";
;
return true;
}
if (n3 % blockZ) {
std::cout << " ERROR: Invalid Grid Size: n3 should be multiple of BLOCKZ - "
<< blockZ << "\n";
;
return true;
}
return false;
}
/*
* Host-Code
* Utility function to validate block sizes
*/
bool checkBlockDimension(sycl::queue& q, unsigned int dimX,
unsigned int dimY) {
auto device = q.get_device();
auto maxBlockSize =
device.get_info<sycl::info::device::max_work_group_size>();
if ((maxBlockSize > 1) && (dimX * dimY > maxBlockSize)) {
std::cout << "ERROR: Invalid block sizes: n1_Tblock * n2_Tblock should be "
"less than or equal to "
<< maxBlockSize << "\n";
;
return true;
}
return false;
}
/*
* Host-Code
* Utility function to print device info
*/
void printTargetInfo(sycl::queue& q, unsigned int dimX, unsigned int dimY) {
auto device = q.get_device();
auto maxBlockSize =
device.get_info<sycl::info::device::max_work_group_size>();
auto maxEUCount =
device.get_info<sycl::info::device::max_compute_units>();
std::cout << " Running on " << device.get_info<sycl::info::device::name>()
<< "\n";
std::cout << " The Device Max Work Group Size is : " << maxBlockSize
<< "\n";
std::cout << " The Device Max EUCount is : " << maxEUCount << "\n";
std::cout << " The blockSize x is : " << dimX << "\n";
std::cout << " The blockSize y is : " << dimY << "\n";
#ifdef USE_SHARED
std::cout << " Using Shared Local Memory Kernel : " << "\n";
#else
std::cout << " Using Global Memory Kernel : " << "\n";
#endif
}
/*
* Host-Code
* Utility function to get input arguments
*/
void usage(std::string programName) {
std::cout << " Incorrect parameters " << "\n";
std::cout << " Usage: ";
std::cout << programName
<< " n1 n2 n3 b1 b2 b3 Iterations [omp|sycl] [gpu|cpu]" << "\n"
<< "\n";
std::cout << " n1 n2 n3 : Grid sizes for the stencil " << "\n";
std::cout << " b1 b2 b3 : cache block sizes for cpu openmp version. "
<< "\n";
std::cout << " Iterations : No. of timesteps. " << "\n";
std::cout << " [omp|sycl] : Optional: Run the OpenMP or the SYCL variant."
<< " Default is to use both for validation " << "\n";
std::cout
<< " [gpu|cpu] : Optional: Device to run the SYCL version"
<< " Default is to use the GPU if available, if not fallback to CPU "
<< "\n"
<< "\n";
}
/*
* Host-Code
* Utility function to print stats
*/
void printStats(double time, size_t n1, size_t n2, size_t n3,
unsigned int nIterations) {
float throughput_mpoints = 0.0f, mflops = 0.0f, normalized_time = 0.0f;
double mbytes = 0.0f;
normalized_time = (double)time / nIterations;
throughput_mpoints = ((n1 - 2 * HALF_LENGTH) * (n2 - 2 * HALF_LENGTH) *
(n3 - 2 * HALF_LENGTH)) /
(normalized_time * 1e3f);
mflops = (7.0f * HALF_LENGTH + 5.0f) * throughput_mpoints;
mbytes = 12.0f * throughput_mpoints;
std::cout << "--------------------------------------" << "\n";
std::cout << "time : " << time / 1e3f << " secs" << "\n";
std::cout << "throughput : " << throughput_mpoints << " Mpts/s"
<< "\n";
std::cout << "flops : " << mflops / 1e3f << " GFlops" << "\n";
std::cout << "bytes : " << mbytes / 1e3f << " GBytes/s" << "\n";
std::cout << "\n"
<< "--------------------------------------" << "\n";
std::cout << "\n"
<< "--------------------------------------" << "\n";
}
/*
* Host-Code
* Utility function to calculate L2-norm between resulting buffer and reference
* buffer
*/
bool within_epsilon(float* output, float* reference, const size_t dimx,
const size_t dimy, const size_t dimz,
const unsigned int radius, const int zadjust = 0,
const float delta = 0.01f) {
FILE* fp = fopen("./error_diff.txt", "w");
if (!fp) fp = stderr;
bool error = false;
double norm2 = 0;
for (size_t iz = 0; iz < dimz; iz++) {
for (size_t iy = 0; iy < dimy; iy++) {
for (size_t ix = 0; ix < dimx; ix++) {
if (ix >= radius && ix < (dimx - radius) && iy >= radius &&
iy < (dimy - radius) && iz >= radius &&
iz < (dimz - radius + zadjust)) {
float difference = fabsf(*reference - *output);
norm2 += difference * difference;
if (difference > delta) {
error = true;
fprintf(fp, " ERROR: (%zu,%zu,%zu)\t%e instead of %e (|e|=%e)\n",
ix, iy, iz, *output, *reference, difference);
}
}
++output;
++reference;
}
}
}
if (fp != stderr) fclose(fp);
norm2 = sqrt(norm2);
if (error) printf("error (Euclidean norm): %.9e\n", norm2);
return error;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/06_Intel_VTune_Profiler/src/iso3dfd.cpp | //==============================================================
// Copyright 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// ISO3DFD: Intel oneAPI SYCL Language Basics Using 3D-Finite-Difference-Wave
// Propagation
//
// ISO3DFD is a finite difference stencil kernel for solving the 3D acoustic
// isotropic wave equation. Kernels in this sample are implemented as 16th order
// in space, 2nd order in time scheme without boundary conditions. Using Data
// Parallel C++, the sample can explicitly run on the GPU and/or CPU to
// calculate a result. If successful, the output will print the device name
// where the SYCL code ran along with the grid computation metrics - flops
// and effective throughput
//
// For comprehensive instructions regarding SYCL Programming, go to
// https://software.intel.com/en-us/oneapi-programming-guide
// and search based on relevant terms noted in the comments.
//
// SYCL material used in this code sample:
//
// SYCL Queues (including device selectors and exception handlers)
// SYCL Custom device selector
// SYCL Buffers and accessors (communicate data between the host and the
// device)
// SYCL Kernels (including parallel_for function and nd-range<3>
// objects)
// Shared Local Memory (SLM) optimizations (SYCL)
// SYCL Basic synchronization (barrier function)
//
#include "../include/iso3dfd.h"
#include <iostream>
#include "../include/device_selector.hpp"
#define MIN(a, b) (a) < (b) ? (a) : (b)
// using namespace sycl;
/*
* Host-Code
* Function used for initialization
*/
void initialize(float* ptr_prev, float* ptr_next, float* ptr_vel, size_t n1,
size_t n2, size_t n3) {
std::cout << "Initializing ... " << "\n";
size_t dim2 = n2 * n1;
for (size_t i = 0; i < n3; i++) {
for (size_t j = 0; j < n2; j++) {
size_t offset = i * dim2 + j * n1;
#pragma omp simd
for (int k = 0; k < n1; k++) {
ptr_prev[offset + k] = 0.0f;
ptr_next[offset + k] = 0.0f;
ptr_vel[offset + k] =
2250000.0f * DT * DT; // Integration of the v*v and dt*dt
}
}
}
// Add a source to initial wavefield as an initial condition
float val = 1.f;
for (int s = 5; s >= 0; s--) {
for (int i = n3 / 2 - s; i < n3 / 2 + s; i++) {
for (int j = n2 / 4 - s; j < n2 / 4 + s; j++) {
size_t offset = i * dim2 + j * n1;
for (int k = n1 / 4 - s; k < n1 / 4 + s; k++) {
ptr_prev[offset + k] = val;
}
}
}
val *= 10;
}
}
/*
* Host-Code
* OpenMP implementation for single iteration of iso3dfd kernel.
* This function is used as reference implementation for verification and
* also to compare performance of OpenMP and SYCL on CPU
* Additional Details:
* https://software.intel.com/en-us/articles/eight-optimizations-for-3-dimensional-finite-difference-3dfd-code-with-an-isotropic-iso
*/
void iso_3dfd_it(float* ptr_next_base, float* ptr_prev_base,
float* ptr_vel_base, float* coeff, const size_t n1,
const size_t n2, const size_t n3, const size_t n1_Tblock,
const size_t n2_Tblock, const size_t n3_Tblock) {
size_t dimn1n2 = n1 * n2;
size_t n3End = n3 - HALF_LENGTH;
size_t n2End = n2 - HALF_LENGTH;
size_t n1End = n1 - HALF_LENGTH;
#pragma omp parallel default(shared)
#pragma omp for schedule(static) collapse(3)
for (size_t bz = HALF_LENGTH; bz < n3End;
bz += n3_Tblock) { // start of cache blocking
for (size_t by = HALF_LENGTH; by < n2End; by += n2_Tblock) {
for (size_t bx = HALF_LENGTH; bx < n1End; bx += n1_Tblock) {
int izEnd = MIN(bz + n3_Tblock, n3End);
int iyEnd = MIN(by + n2_Tblock, n2End);
int ixEnd = MIN(n1_Tblock, n1End - bx);
for (size_t iz = bz; iz < izEnd; iz++) { // start of inner iterations
for (size_t iy = by; iy < iyEnd; iy++) {
float* ptr_next = ptr_next_base + iz * dimn1n2 + iy * n1 + bx;
float* ptr_prev = ptr_prev_base + iz * dimn1n2 + iy * n1 + bx;
float* ptr_vel = ptr_vel_base + iz * dimn1n2 + iy * n1 + bx;
#pragma omp simd
for (size_t ix = 0; ix < ixEnd; ix++) {
float value = 0.0;
value += ptr_prev[ix] * coeff[0];
#pragma unroll(HALF_LENGTH)
for (unsigned int ir = 1; ir <= HALF_LENGTH; ir++) {
value += coeff[ir] *
((ptr_prev[ix + ir] + ptr_prev[ix - ir]) +
(ptr_prev[ix + ir * n1] + ptr_prev[ix - ir * n1]) +
(ptr_prev[ix + ir * dimn1n2] +
ptr_prev[ix - ir * dimn1n2]));
}
ptr_next[ix] =
2.0f * ptr_prev[ix] - ptr_next[ix] + value * ptr_vel[ix];
}
}
} // end of inner iterations
}
}
} // end of cache blocking
}
/*
* Host-Code
* Driver function for ISO3DFD OpenMP code
* Uses ptr_next and ptr_prev as ping-pong buffers to achieve
* accelerated wave propogation
*/
void iso_3dfd(float* ptr_next, float* ptr_prev, float* ptr_vel, float* coeff,
const size_t n1, const size_t n2, const size_t n3,
const unsigned int nreps, const size_t n1_Tblock,
const size_t n2_Tblock, const size_t n3_Tblock) {
for (unsigned int it = 0; it < nreps; it += 1) {
iso_3dfd_it(ptr_next, ptr_prev, ptr_vel, coeff, n1, n2, n3, n1_Tblock,
n2_Tblock, n3_Tblock);
// here's where boundary conditions and halo exchanges happen
// Swap previous & next between iterations
it++;
if (it < nreps)
iso_3dfd_it(ptr_prev, ptr_next, ptr_vel, coeff, n1, n2, n3, n1_Tblock,
n2_Tblock, n3_Tblock);
} // time loop
}
/*
* Host-Code
* Main function to drive the sample application
*/
int main(int argc, char* argv[]) {
// Arrays used to update the wavefield
float* prev_base;
float* next_base;
// Array to store wave velocity
float* vel_base;
// Array to store results for comparison
float* temp;
bool sycl = true;
bool omp = true;
bool error = false;
bool isGPU = true;
size_t n1, n2, n3;
size_t n1_Tblock, n2_Tblock, n3_Tblock;
unsigned int nIterations;
// Read Input Parameters
try {
n1 = std::stoi(argv[1]) + (2 * HALF_LENGTH);
n2 = std::stoi(argv[2]) + (2 * HALF_LENGTH);
n3 = std::stoi(argv[3]) + (2 * HALF_LENGTH);
n1_Tblock = std::stoi(argv[4]);
n2_Tblock = std::stoi(argv[5]);
n3_Tblock = std::stoi(argv[6]);
nIterations = std::stoi(argv[7]);
}
catch (...) {
usage(argv[0]);
return 1;
}
// Read optional arguments to select version and device
for (unsigned int arg = 8; arg < argc; arg++) {
if (std::string(argv[arg]) == "omp" || std::string(argv[arg]) == "OMP") {
omp = true;
sycl = false;
} else if (std::string(argv[arg]) == "sycl" ||
std::string(argv[arg]) == "SYCL") {
omp = false;
sycl = true;
} else if (std::string(argv[arg]) == "gpu" ||
std::string(argv[arg]) == "GPU") {
isGPU = true;
} else if (std::string(argv[arg]) == "cpu" ||
std::string(argv[arg]) == "CPU") {
isGPU = false;
} else {
usage(argv[0]);
return 1;
}
}
// Validate input sizes for the grid and block dimensions
if (checkGridDimension(n1 - 2 * HALF_LENGTH, n2 - 2 * HALF_LENGTH,
n3 - 2 * HALF_LENGTH, n1_Tblock, n2_Tblock,
n3_Tblock)) {
usage(argv[0]);
return 1;
}
// Compute the total size of grid
size_t nsize = n1 * n2 * n3;
prev_base = new float[nsize];
next_base = new float[nsize];
vel_base = new float[nsize];
// Compute coefficients to be used in wavefield update
float coeff[HALF_LENGTH + 1] = {-3.0548446, +1.7777778, -3.1111111e-1,
+7.572087e-2, -1.76767677e-2, +3.480962e-3,
-5.180005e-4, +5.074287e-5, -2.42812e-6};
// Apply the DX DY and DZ to coefficients
coeff[0] = (3.0f * coeff[0]) / (DXYZ * DXYZ);
for (int i = 1; i <= HALF_LENGTH; i++) {
coeff[i] = coeff[i] / (DXYZ * DXYZ);
}
std::cout << "Grid Sizes: " << n1 - 2 * HALF_LENGTH << " "
<< n2 - 2 * HALF_LENGTH << " " << n3 - 2 * HALF_LENGTH << "\n";
std::cout << "Memory Usage: " << ((3 * nsize * sizeof(float)) / (1024 * 1024))
<< " MB" << "\n";
// Check if running OpenMP OR Serial version on CPU
if (omp) {
#if defined(_OPENMP)
std::cout << " ***** Running OpenMP variant *****" << "\n";
#else
std::cout << " ***** Running C++ Serial variant *****" << "\n";
#endif
// Initialize arrays and introduce initial conditions (source)
initialize(prev_base, next_base, vel_base, n1, n2, n3);
// Start timer
auto start = std::chrono::steady_clock::now();
// Invoke the driver function to perform 3D wave propogation
// using OpenMP/Serial version
iso_3dfd(next_base, prev_base, vel_base, coeff, n1, n2, n3, nIterations,
n1_Tblock, n2_Tblock, n3_Tblock);
// End timer
auto end = std::chrono::steady_clock::now();
auto time =
std::chrono::duration_cast<std::chrono::milliseconds>(end - start)
.count();
printStats(time, n1, n2, n3, nIterations);
}
// Check if running both OpenMP/Serial and SYCL version
// Keeping a copy of output buffer from OpenMP version
// for comparison
if (omp && sycl) {
temp = new float[nsize];
if (nIterations % 2)
memcpy(temp, next_base, nsize * sizeof(float));
else
memcpy(temp, prev_base, nsize * sizeof(float));
}
// Check if running SYCL version
if (sycl) {
std::cout << " ***** Running SYCL variant *****" << "\n";
// exception handler
/*
The exception_list parameter is an iterable list of std::exception_ptr
objects. But those pointers are not always directly readable. So, we
rethrow the pointer, catch it, and then we have the exception itself.
Note: depending upon the operation there may be several exceptions.
*/
auto exception_handler = [](exception_list exceptionList) {
for (std::exception_ptr const& e : exceptionList) {
try {
std::rethrow_exception(e);
} catch (exception const& e) {
std::terminate();
}
}
};
// Initialize arrays and introduce initial conditions (source)
initialize(prev_base, next_base, vel_base, n1, n2, n3);
// Initializing a string pattern to allow a custom device selector
// pick a SYCL device as per user's preference and available devices
// Default value of pattern is set to CPU
std::string pattern("CPU");
std::string patterngpu("Graphics");
// Replacing the pattern string to Gen if running on a GPU
if (isGPU) {
pattern.replace(0, 3, patterngpu);
}
// Create a custom device selector using SYCL device selector class
MyDeviceSelector device_sel(pattern);
// Create a device queue using SYCL class queue with a custom
// device selector
queue q(device_sel, exception_handler);
// Validate if the block sizes selected are
// within range for the selected SYCL device
if (checkBlockDimension(q, n1_Tblock, n2_Tblock)) {
usage(argv[0]);
return 1;
}
// Start timer
auto start = std::chrono::steady_clock::now();
// Invoke the driver function to perform 3D wave propogation
// using SYCL version on the selected SYCL device
iso_3dfd_device(q, next_base, prev_base, vel_base, coeff, n1, n2, n3,
n1_Tblock, n2_Tblock, n3_Tblock, n3 - HALF_LENGTH,
nIterations);
// Wait for the commands to complete. Enforce synchronization on the command
// queue
q.wait_and_throw();
// End timer
auto end = std::chrono::steady_clock::now();
auto time =
std::chrono::duration_cast<std::chrono::milliseconds>(end - start)
.count();
std::cout << "SYCL time: " << time << " ms" << "\n";
printStats(time, n1, n2, n3, nIterations);
}
// If running both OpenMP/Serial and SYCL version
// Comparing results
if (omp && sycl) {
if (nIterations % 2) {
error = within_epsilon(next_base, temp, n1, n2, n3, HALF_LENGTH, 0, 0.1f);
if (error) std::cout << "Error = " << error << "\n";
} else {
error = within_epsilon(prev_base, temp, n1, n2, n3, HALF_LENGTH, 0, 0.1f);
if (error) std::cout << "Error = " << error << "\n";
}
delete[] temp;
}
delete[] prev_base;
delete[] next_base;
delete[] vel_base;
return error ? 1 : 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/06_Intel_VTune_Profiler/src/iso3dfd_kernels.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// ISO3DFD: Data Parallel C++ Language Basics Using 3D-Finite-Difference-Wave
// Propagation
//
// ISO3DFD is a finite difference stencil kernel for solving the 3D acoustic
// isotropic wave equation which can be used as a proxy for propogating a
// seismic wave. Kernels in this sample are implemented as 16th order in space,
// with symmetric coefficients, and 2nd order in time scheme without boundary
// conditions.. Using Data Parallel C++, the sample can explicitly run on the
// GPU and/or CPU to propagate a seismic wave which is a compute intensive task.
// If successful, the output will print the device name
// where the SYCL code ran along with the grid computation metrics - flops
// and effective throughput.
//
// For comprehensive instructions regarding SYCL Programming, go to
// https://software.intel.com/en-us/oneapi-programming-guide
// and search based on relevant terms noted in the comments.
//
// SYCL material used in this code sample:
//
// SYCL Queues (including device selectors and exception handlers)
// SYCL Custom device selector
// SYCL Buffers and accessors (communicate data between the host and the
// device)
// SYCL Kernels (including parallel_for function and nd-range<3>
// objects)
// Shared Local Memory (SLM) optimizations (SYCL)
// SYCL Basic synchronization (barrier function)
//
#include "../include/iso3dfd.h"
/*
* Device-Code - Optimized for GPU
* SYCL implementation for single iteration of iso3dfd kernel
* using shared local memory optimizations
*
* ND-Range kernel is used to spawn work-items in x, y dimension
* Each work-item then traverses in the z-dimension
*
* z-dimension slicing can be used to vary the total number
* global work-items.
*
* SLM Padding can be used to eliminate SLM bank conflicts if
* there are any
*/
void iso_3dfd_iteration_slm(sycl::nd_item<3> it, float *next, float *prev,
float *vel, const float *coeff, float *tab,
size_t nx, size_t nxy, size_t bx, size_t by,
size_t z_offset, int full_end_z) {
// Compute local-id for each work-item
size_t id0 = it.get_local_id(2);
size_t id1 = it.get_local_id(1);
// Compute the position in local memory each work-item
// will fetch data from global memory into shared
// local memory
size_t size0 = it.get_local_range(2) + 2 * HALF_LENGTH + PAD;
size_t identifiant = (id0 + HALF_LENGTH) + (id1 + HALF_LENGTH) * size0;
// We compute the start and the end position in the grid
// for each work-item.
// Each work-items local value gid is updated to track the
// current cell/grid point it is working with.
// This position is calculated with the help of slice-ID and number of
// grid points each work-item will process.
// Offset of HALF_LENGTH is also used to account for HALO
size_t begin_z = it.get_global_id(0) * z_offset + HALF_LENGTH;
size_t end_z = begin_z + z_offset;
if (end_z > full_end_z) end_z = full_end_z;
size_t gid = (it.get_global_id(2) + bx) + ((it.get_global_id(1) + by) * nx) +
(begin_z * nxy);
// front and back temporary arrays are used to ensure
// the grid values in z-dimension are read once, shifted in
// these array and re-used multiple times before being discarded
//
// This is an optimization technique to enable data-reuse and
// improve overall FLOPS to BYTES read ratio
float front[HALF_LENGTH + 1];
float back[HALF_LENGTH];
float c[HALF_LENGTH + 1];
for (unsigned int iter = 0; iter < HALF_LENGTH; iter++) {
front[iter] = prev[gid + iter * nxy];
}
c[0] = coeff[0];
for (unsigned int iter = 1; iter <= HALF_LENGTH; iter++) {
back[iter - 1] = prev[gid - iter * nxy];
c[iter] = coeff[iter];
}
// Shared Local Memory (SLM) optimizations (SYCL)
// Set some flags to indicate if the current work-item
// should read from global memory to shared local memory buffer
// or not
const unsigned int items_X = it.get_local_range(2);
const unsigned int items_Y = it.get_local_range(1);
bool copyHaloY = false, copyHaloX = false;
if (id1 < HALF_LENGTH) copyHaloY = true;
if (id0 < HALF_LENGTH) copyHaloX = true;
for (size_t i = begin_z; i < end_z; i++) {
// Shared Local Memory (SLM) optimizations (SYCL)
// If work-item is flagged to read into SLM buffer
if (copyHaloY) {
tab[identifiant - HALF_LENGTH * size0] = prev[gid - HALF_LENGTH * nx];
tab[identifiant + items_Y * size0] = prev[gid + items_Y * nx];
}
if (copyHaloX) {
tab[identifiant - HALF_LENGTH] = prev[gid - HALF_LENGTH];
tab[identifiant + items_X] = prev[gid + items_X];
}
tab[identifiant] = front[0];
// SYCL Basic synchronization (barrier function)
// Force synchronization within a work-group
// using barrier function to ensure
// all the work-items have completed reading into the SLM buffer
it.barrier(access::fence_space::local_space);
// Only one new data-point read from global memory
// in z-dimension (depth)
front[HALF_LENGTH] = prev[gid + HALF_LENGTH * nxy];
// Stencil code to update grid point at position given by global id (gid)
// New time step for grid point is computed based on the values of the
// the immediate neighbors - horizontal, vertical and depth
// directions(HALF_LENGTH number of points in each direction),
// as well as the value of grid point at a previous time step
//
// Neighbors in the depth (z-dimension) are read out of
// front and back arrays
// Neighbors in the horizontal and vertical (x, y dimension) are
// read from the SLM buffers
float value = c[0] * front[0];
#pragma unroll(HALF_LENGTH)
for (unsigned int iter = 1; iter <= HALF_LENGTH; iter++) {
value +=
c[iter] * (front[iter] + back[iter - 1] + tab[identifiant + iter] +
tab[identifiant - iter] + tab[identifiant + iter * size0] +
tab[identifiant - iter * size0]);
}
next[gid] = 2.0f * front[0] - next[gid] + value * vel[gid];
// Update the gid to advance in the z-dimension
gid += nxy;
// Input data in front and back are shifted to discard the
// oldest value and read one new value.
for (unsigned int iter = HALF_LENGTH - 1; iter > 0; iter--) {
back[iter] = back[iter - 1];
}
back[0] = front[0];
for (unsigned int iter = 0; iter < HALF_LENGTH; iter++) {
front[iter] = front[iter + 1];
}
// SYCL Basic synchronization (barrier function)
// Force synchronization within a work-group
// using barrier function to ensure that SLM buffers
// are not overwritten by next set of work-items
// (highly unlikely but not impossible)
it.barrier(access::fence_space::local_space);
}
}
/*
* Device-Code - Optimized for GPU, CPU
* SYCL implementation for single iteration of iso3dfd kernel
* without using any shared local memory optimizations
*
*
* ND-Range kernel is used to spawn work-items in x, y dimension
* Each work-item can then traverse in the z-dimension
*
* z-dimension slicing can be used to vary the total number
* global work-items.
*
*/
void iso_3dfd_iteration_global(sycl::nd_item<3> it, float *next,
float *prev, float *vel, const float *coeff,
int nx, int nxy, int bx, int by, int z_offset,
int full_end_z) {
// We compute the start and the end position in the grid
// for each work-item.
// Each work-items local value gid is updated to track the
// current cell/grid point it is working with.
// This position is calculated with the help of slice-ID and number of
// grid points each work-item will process.
// Offset of HALF_LENGTH is also used to account for HALO
size_t begin_z = it.get_global_id(0) * z_offset + HALF_LENGTH;
size_t end_z = begin_z + z_offset;
if (end_z > full_end_z) end_z = full_end_z;
size_t gid = (it.get_global_id(2) + bx) + ((it.get_global_id(1) + by) * nx) +
(begin_z * nxy);
// front and back temporary arrays are used to ensure
// the grid values in z-dimension are read once, shifted in
// these array and re-used multiple times before being discarded
//
// This is an optimization technique to enable data-reuse and
// improve overall FLOPS to BYTES read ratio
float front[HALF_LENGTH + 1];
float back[HALF_LENGTH];
float c[HALF_LENGTH + 1];
for (unsigned int iter = 0; iter <= HALF_LENGTH; iter++) {
front[iter] = prev[gid + iter * nxy];
}
c[0] = coeff[0];
for (unsigned int iter = 1; iter <= HALF_LENGTH; iter++) {
c[iter] = coeff[iter];
back[iter - 1] = prev[gid - iter * nxy];
}
// Stencil code to update grid point at position given by global id (gid)
// New time step for grid point is computed based on the values of the
// the immediate neighbors - horizontal, vertical and depth
// directions(HALF_LENGTH number of points in each direction),
// as well as the value of grid point at a previous time step
float value = c[0] * front[0];
#pragma unroll(HALF_LENGTH)
for (unsigned int iter = 1; iter <= HALF_LENGTH; iter++) {
value += c[iter] *
(front[iter] + back[iter - 1] + prev[gid + iter] +
prev[gid - iter] + prev[gid + iter * nx] + prev[gid - iter * nx]);
}
next[gid] = 2.0f * front[0] - next[gid] + value * vel[gid];
// Update the gid and position in z-dimension and check if there
// is more work to do
gid += nxy;
begin_z++;
while (begin_z < end_z) {
// Input data in front and back are shifted to discard the
// oldest value and read one new value.
for (unsigned int iter = HALF_LENGTH - 1; iter > 0; iter--) {
back[iter] = back[iter - 1];
}
back[0] = front[0];
for (unsigned int iter = 0; iter < HALF_LENGTH; iter++) {
front[iter] = front[iter + 1];
}
// Only one new data-point read from global memory
// in z-dimension (depth)
front[HALF_LENGTH] = prev[gid + HALF_LENGTH * nxy];
// Stencil code to update grid point at position given by global id (gid)
float value = c[0] * front[0];
#pragma unroll(HALF_LENGTH)
for (unsigned int iter = 1; iter <= HALF_LENGTH; iter++) {
value += c[iter] * (front[iter] + back[iter - 1] + prev[gid + iter] +
prev[gid - iter] + prev[gid + iter * nx] +
prev[gid - iter * nx]);
}
next[gid] = 2.0f * front[0] - next[gid] + value * vel[gid];
gid += nxy;
begin_z++;
}
}
/*
* Host-side SYCL Code
*
* Driver function for ISO3DFD SYCL code
* Uses ptr_next and ptr_prev as ping-pong buffers to achieve
* accelerated wave propogation
*
* This function uses SYCL buffers to facilitate host to device
* buffer copies
*
*/
bool iso_3dfd_device(sycl::queue &q, float *ptr_next, float *ptr_prev,
float *ptr_vel, float *ptr_coeff, size_t n1, size_t n2,
size_t n3, size_t n1_Tblock, size_t n2_Tblock,
size_t n3_Tblock, size_t end_z, unsigned int nIterations) {
size_t nx = n1;
size_t nxy = n1 * n2;
size_t bx = HALF_LENGTH;
size_t by = HALF_LENGTH;
// Display information about the selected device
printTargetInfo(q, n1_Tblock, n2_Tblock);
size_t sizeTotal = (size_t)(nxy * n3);
{ // Begin buffer scope
// Create buffers using SYCL class buffer
buffer<float, 1> b_ptr_next(ptr_next, range<1>{sizeTotal});
buffer<float, 1> b_ptr_prev(ptr_prev, range<1>{sizeTotal});
buffer<float, 1> b_ptr_vel(ptr_vel, range<1>{sizeTotal});
buffer<float, 1> b_ptr_coeff(ptr_coeff, range<1>{HALF_LENGTH + 1});
// Iterate over time steps
for (unsigned int k = 0; k < nIterations; k += 1) {
// Submit command group for execution
q.submit([&](handler &cgh) {
// Create accessors
auto next = b_ptr_next.get_access<access::mode::read_write>(cgh);
auto prev = b_ptr_prev.get_access<access::mode::read_write>(cgh);
auto vel = b_ptr_vel.get_access<access::mode::read>(cgh);
auto coeff =
b_ptr_coeff.get_access<access::mode::read,
access::target::constant_buffer>(cgh);
// Define local and global range
// Define local ND range of work-items
// Size of each SYCL work-group selected here is a product of
// n2_Tblock and n1_Tblock which can be controlled by the input
// command line arguments
auto local_nd_range = range<3>(1, n2_Tblock, n1_Tblock);
// Define global ND range of work-items
// Size of total number of work-items is selected based on the
// total grid size in first and second dimensions (XY-plane)
//
// Each of the work-item then works on computing
// one or more grid points. This value can be controlled by the
// input command line argument n3_Tblock
//
// Effectively this implementation enables slicing of the full
// grid into smaller grid slices which can be computed in parallel
// to allow auto-scaling of the total number of work-items
// spawned to achieve full occupancy for small or larger accelerator
// devices
auto global_nd_range =
range<3>((n3 - 2 * HALF_LENGTH) / n3_Tblock, (n2 - 2 * HALF_LENGTH),
(n1 - 2 * HALF_LENGTH));
#ifdef USE_SHARED
// Using 3D-stencil kernel with Shared Local Memory (SLM)
// optimizations (SYCL) to improve effective FLOPS to BYTES
// ratio. By default, SLM code path is disabled in this
// code sample.
// SLM code path can be enabled by recompiling the SYCL source
// as follows:
// cmake -DSHARED_KERNEL=1 ..
// make -j`nproc`
// Define a range for SLM Buffer
// Padding can be used to avoid SLM bank conflicts
// By default padding is disabled in the sample code
auto localRange_ptr_prev =
range<1>((n1_Tblock + (2 * HALF_LENGTH) + PAD) *
(n2_Tblock + (2 * HALF_LENGTH)));
// Create an accessor for SLM buffer
accessor<float, 1, access::mode::read_write, access::target::local> tab(
localRange_ptr_prev, cgh);
// Send a SYCL kernel (lambda) for parallel execution
// The function that executes a single iteration is called
// "iso_3dfd_iteration_slm"
// alternating the 'next' and 'prev' parameters which effectively
// swaps their content at every iteration.
if (k % 2 == 0)
cgh.parallel_for<class iso_3dfd_kernel>(
nd_range<3>{global_nd_range, local_nd_range}, [=](nd_item<3> it) {
iso_3dfd_iteration_slm(it, next.get_pointer(),
prev.get_pointer(), vel.get_pointer(),
coeff.get_pointer(), tab.get_pointer(),
nx, nxy, bx, by, n3_Tblock, end_z);
});
else
cgh.parallel_for<class iso_3dfd_kernel_2>(
nd_range<3>{global_nd_range, local_nd_range}, [=](nd_item<3> it) {
iso_3dfd_iteration_slm(it, prev.get_pointer(),
next.get_pointer(), vel.get_pointer(),
coeff.get_pointer(), tab.get_pointer(),
nx, nxy, bx, by, n3_Tblock, end_z);
});
#else
// Use Global Memory version of the 3D-Stencil kernel.
// This code path is enabled by default
// Send a SYCL kernel (lambda) for parallel execution
// The function that executes a single iteration is called
// "iso_3dfd_iteration_global"
// alternating the 'next' and 'prev' parameters which effectively
// swaps their content at every iteration.
if (k % 2 == 0)
cgh.parallel_for<class iso_3dfd_kernel>(
nd_range<3>{global_nd_range, local_nd_range}, [=](nd_item<3> it) {
iso_3dfd_iteration_global(it, next.get_pointer(),
prev.get_pointer(), vel.get_pointer(),
coeff.get_pointer(), nx, nxy, bx, by,
n3_Tblock, end_z);
});
else
cgh.parallel_for<class iso_3dfd_kernel_2>(
nd_range<3>{global_nd_range, local_nd_range}, [=](nd_item<3> it) {
iso_3dfd_iteration_global(it, prev.get_pointer(),
next.get_pointer(), vel.get_pointer(),
coeff.get_pointer(), nx, nxy, bx, by,
n3_Tblock, end_z);
});
#endif
});
}
} // end buffer scope
return true;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/06_Intel_VTune_Profiler/include/device_selector.hpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#ifndef DEVICESELECTOR_HPP
#define DEVICESELECTOR_HPP
#include <cstring>
#include <iostream>
#include <string>
#include "sycl/sycl.hpp"
// This is the class provided to SYCL runtime by the application to decide
// on which device to run, or whether to run at all.
// When selecting a device, SYCL runtime first takes (1) a selector provided by
// the program or a default one and (2) the set of all available devices. Then
// it passes each device to the '()' operator of the selector. Device, for
// which '()' returned the highest number, is selected. If a negative number
// was returned for all devices, then the selection process will cause an
// exception.
class MyDeviceSelector : public ::sycl::device_selector {
public:
MyDeviceSelector(const std::string &p) : pattern(p) {
// std::cout << "Looking for \"" << p << "\" devices" << "\n";
}
// This is the function which gives a "rating" to devices.
virtual int operator()(const ::sycl::device &device) const override {
// The template parameter to device.get_info can be a variety of properties
// defined by the SYCL spec's cl::sycl::info:: enum. Properties may have
// different types. Here we query name which is a string.
const std::string name = device.get_info<::sycl::info::device::name>();
// std::cout << "Trying device: " << name << "..." << "\n";
// std::cout << " Vendor: " <<
// device.get_info<cl::sycl::info::device::vendor>() << "\n";
// Device with pattern in the name is prioritized:
return (name.find(pattern) != std::string::npos) ? 100 : 1;
}
private:
std::string pattern;
};
#endif
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/06_Intel_VTune_Profiler/include/iso3dfd.h | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
#include <chrono>
#include <cmath>
#include <cstring>
#include <ctime>
/*
* Parameters to define coefficients
* HALF_LENGTH: Radius of the stencil
* Sample source code is tested for HALF_LENGTH=8 resulting in
* 16th order Stencil finite difference kernel
*/
#define DT 0.002f
#define DXYZ 50.0f
#define HALF_LENGTH 8
/*
* Padding to test and eliminate shared local memory bank conflicts for
* the shared local memory(slm) version of the kernel executing on GPU
*/
#define PAD 0
bool iso_3dfd_device(sycl::queue&, float*, float*, float*, float*, size_t,
size_t, size_t, size_t, size_t, size_t, size_t,
unsigned int);
void printTargetInfo(sycl::queue&, unsigned int, unsigned int);
void usage(std::string);
void printStats(double, size_t, size_t, size_t, unsigned int);
bool within_epsilon(float*, float*, const size_t, const size_t, const size_t,
const unsigned int, const int, const float);
bool checkGridDimension(size_t, size_t, size_t, unsigned int, unsigned int,
unsigned int);
bool checkBlockDimension(sycl::queue&, unsigned int, unsigned int);
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/src/inclusive_scan.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <iostream>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
const int num_elements = 6;
auto R = range(num_elements);
//Initialize the input vector for Keys
std::vector<int> input_keys{ 0,0,0,1,1,1 };
//Initialize the input vector for Values
std::vector<int> input_values{ 1,2,3,4,5,6 };
//Output vectors where we get the results back
std::vector<int> output_values(num_elements, 0);
//Create buffers for the above vectors
buffer buf_in(input_keys);
buffer buf_seq(input_values);
buffer buf_out(output_values);
// create buffer iterators
auto keys_begin = oneapi::dpl::begin(buf_in);
auto keys_end = oneapi::dpl::end(buf_in);
auto vals_begin = oneapi::dpl::begin(buf_seq);
auto result_begin = oneapi::dpl::begin(buf_out);
// use policy for algorithms execution
//Create queue with default selector
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
auto policy = make_device_policy(q);
auto iter_res = oneapi::dpl::inclusive_scan_by_segment(policy, keys_begin, keys_end, vals_begin, result_begin);
auto count_res = std::distance(result_begin,iter_res);
// 3.Checking results
host_accessor result_vals(buf_out,read_only);
std::cout<< "Keys = [ ";
std::copy(input_keys.begin(),input_keys.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Values = [ ";
std::copy(input_values.begin(),input_values.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Output Values = [ ";
std::copy(output_values.begin(),output_values.begin() + count_res,std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/src/dpl_usm_alloc.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
using namespace sycl;
using namespace oneapi::dpl::execution;
const int N = 4;
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# USM allocator
usm_allocator<int, usm::alloc::shared> alloc(q);
std::vector<int, decltype(alloc)> v(N, alloc);
//# Parallel STL algorithm with USM allocator
oneapi::dpl::fill(make_device_policy(q), v.begin(), v.end(), 20);
q.wait();
for (int i = 0; i < v.size(); i++) std::cout << v[i] << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/src/minimum_function.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <oneapi/dpl/numeric>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
queue q;
constexpr int N = 8;
//Input vector
std::vector<int> v{3,-1,-4,1,-5,-9,2,6};
//create a separate scope for buffer destruction
std::vector<int>result(N);
{
buffer buf(v);
buffer buf_res(result);
//oneDPL buffer iterators for both the input and the result vectors
auto start_v = oneapi::dpl::begin(buf);
auto end_v = oneapi::dpl::end(buf);
auto start_res = oneapi::dpl::begin(buf_res);
auto end_res = oneapi::dpl::end(buf_res);
//use std::fill to initialize the result vector
oneapi::dpl::fill(oneapi::dpl::execution::dpcpp_default,start_res, end_res, 0);
//usage of dpl::minimum<> function call within the std::exclusive_scan function
oneapi::dpl::exclusive_scan(oneapi::dpl::execution::dpcpp_default, start_v, end_v, start_res, int(0), oneapi::dpl::minimum<int>() );
}
for(int i = 0; i < result.size(); i++) std::cout << result[i] << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/src/lower_bound.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <iostream>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
const int num_elements = 5;
auto R = range(num_elements);
//Create queue with default selector
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//Initialize the input vector for search
std::vector<int> input_seq{0, 2, 2, 2, 3, 3, 3, 3, 6, 6};
//Initialize the input vector for search pattern
std::vector<int> input_pattern{0, 2, 4, 7, 6};
//Output vector where we get the results back
std::vector<int> out_values(num_elements,0);
buffer buf_in(input_seq);
buffer buf_seq(input_pattern);
buffer buf_out(out_values);
// create buffer iterators
auto keys_begin = oneapi::dpl::begin(buf_in);
auto keys_end = oneapi::dpl::end(buf_in);
auto vals_begin = oneapi::dpl::begin(buf_seq);
auto vals_end = oneapi::dpl::end(buf_seq);
auto result_begin = oneapi::dpl::begin(buf_out);
// use policy for algorithms execution
auto policy = make_device_policy(q);
//Calling the onedpl upper_bound algorithm. We pass in the policy, the buffer iterators for the input vectors and the output.
// Default comparator is the operator < used here.
oneapi::dpl::lower_bound(policy,keys_begin,keys_end,vals_begin,vals_end,result_begin);
// 3.Checking results by creating the host accessors
host_accessor result_vals(buf_out,read_only);
std::cout<< "Input Sequence = [ ";
std::copy(input_seq.begin(),input_seq.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Search Sequence = [ ";
std::copy(input_pattern.begin(),input_pattern.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Search Results = [ ";
std::copy(out_values.begin(),out_values.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/src/dpl_simple.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
using namespace sycl;
constexpr int N = 4;
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
std::vector<int> v(N);
//# Parallel STL fill function with device policy
oneapi::dpl::fill(oneapi::dpl::execution::make_device_policy(q), v.begin(), v.end(), 20);
for(int i = 0; i < v.size(); i++) std::cout << v[i] << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/src/ranges.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include<oneapi/dpl/execution>
#include<oneapi/dpl/algorithm>
#include<oneapi/dpl/ranges>
#include<iostream>
#include<vector>
using namespace sycl;
using namespace oneapi::dpl::experimental::ranges;
int main()
{
std::vector<int> v(20);
{
buffer A(v);
auto view = iota_view(0, 20);
auto rev_view = views::reverse(view);
auto range_res = all_view<int, cl::sycl::access::mode::write>(A);
copy(oneapi::dpl::execution::dpcpp_default, rev_view, range_res);
}
for (auto x : v)
std::cout << x << " ";
std::cout << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/src/discard_iterator.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <iostream>
#include <tuple>
using namespace sycl;
using namespace oneapi::dpl::execution;
using std::get;
int main() {
const int num_elements = 10;
//Initialize the input vector for search
std::vector<int> input_seq{2, 4, 12, 24, 34, 48, 143, 63, 76, 69};
//Initialize the stencil values
std::vector<int> input_pattern{1, 2, 4, 1, 6, 1, 2, 1, 7, 1};
//Output vector where we get the results back
std::vector<int> out_values(num_elements,0);
buffer buf_in(input_seq);
buffer buf_seq(input_pattern);
buffer buf_out(out_values);
// create buffer iterators
auto keys_begin = oneapi::dpl::begin(buf_in);
auto keys_end = oneapi::dpl::end(buf_in);
auto vals_begin = oneapi::dpl::begin(buf_seq);
auto vals_end = oneapi::dpl::end(buf_seq);
auto result_begin = oneapi::dpl::begin(buf_out);
// use policy for algorithms execution
auto policy = oneapi::dpl::execution::dpcpp_default;
auto zipped_first = oneapi::dpl::make_zip_iterator(keys_begin, vals_begin);
auto iter_res = oneapi::dpl::copy_if(dpl::execution::dpcpp_default,zipped_first, zipped_first + num_elements,
dpl::make_zip_iterator(result_begin, dpl::discard_iterator()),
[](auto t){return get<1>(t) == 1;});
// 3.Checking results by creating the host accessors
host_accessor result_vals(buf_out,read_only);
std::cout<< "Input Sequence = [ ";
std::copy(input_seq.begin(),input_seq.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Sequence to search = [ ";
std::copy(input_pattern.begin(),input_pattern.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Results with stencil value of 1 = [ ";
std::copy(out_values.begin(),out_values.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/src/binary_search.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <iostream>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
//const int n = 10;
//const int k = 5;
const int num_elements = 5;
auto R = range(num_elements);
//Create queue with default selector
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//Initialize the input vector for search
std::vector<int> input_seq{0, 2, 2, 2, 3, 3, 3, 3, 6, 6};
//Initialize the input vector for search pattern
std::vector<int> input_pattern{0, 2, 4, 7, 6};
//Output vector where we get the results back
std::vector<int> output_values(num_elements,0);
//Create buffers for the above vectors
buffer buf_in(input_seq);
buffer buf_seq(input_pattern);
buffer buf_out(output_values);
// create buffer iterators
auto keys_begin = oneapi::dpl::begin(buf_in);
auto keys_end = oneapi::dpl::end(buf_in);
auto vals_begin = oneapi::dpl::begin(buf_seq);
auto vals_end = oneapi::dpl::end(buf_seq);
auto result_begin = oneapi::dpl::begin(buf_out);
// use policy for algorithms execution
auto policy = make_device_policy(q);
//function object to be passed to sort function
//Calling the onedpl binary search algorithm. We pass in the policy, the buffer iterators for the input vectors and the output.
// Default comparator is the operator < used here.
const auto i = oneapi::dpl::binary_search(policy,keys_begin,keys_end,vals_begin,vals_end,result_begin);
// 3.Checking results by creating the host accessors
host_accessor result_vals(buf_out,read_only);
std::cout<< "Input sequence = [";
std::copy(input_seq.begin(),input_seq.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Search sequence = [";
std::copy(input_pattern.begin(),input_pattern.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Search results = [";
std::copy(output_values.begin(),output_values.end(),std::ostream_iterator<bool>(std::cout," "));
std::cout <<"]"<< "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/src/dpl_usm_pointer.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
using namespace sycl;
using namespace oneapi::dpl::execution;
const int N = 4;
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# USM allocation on device
int* data = malloc_shared<int>(N, q);
//# Parallel STL algorithm using USM pointer
oneapi::dpl::fill(make_device_policy(q), data, data + N, 20);
q.wait();
for (int i = 0; i < N; i++) std::cout << data[i] << "\n";
free(data, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/src/upper_bound.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <iostream>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
const int num_elements = 5;
auto R = range(num_elements);
//Create queue with default selector
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//Initialize the input vector for search
std::vector<int> input_seq{0, 2, 2, 2, 3, 3, 3, 3, 6, 6};
//Initialize the input vector for search pattern
std::vector<int> input_pattern{0, 2, 4, 7, 6};
//Output vector where we get the results back
std::vector<int> out_values(num_elements,0);
buffer buf_in(input_seq);
buffer buf_seq(input_pattern);
buffer buf_out(out_values);
// create buffer iterators
auto keys_begin = oneapi::dpl::begin(buf_in);
auto keys_end = oneapi::dpl::end(buf_in);
auto vals_begin = oneapi::dpl::begin(buf_seq);
auto vals_end = oneapi::dpl::end(buf_seq);
auto result_begin = oneapi::dpl::begin(buf_out);
// use policy for algorithms execution
auto policy = make_device_policy(q);
//Calling the onedpl upper_bound algorithm. We pass in the policy, the buffer iterators for the input vectors and the output.
// Default comparator is the operator < used here.
oneapi::dpl::upper_bound(make_device_policy(q),keys_begin,keys_end,vals_begin,vals_end,result_begin);
// 3.Checking results by creating the host accessors
host_accessor result_vals(buf_out,read_only);
std::cout<< "Input Sequence = [ ";
std::copy(input_seq.begin(),input_seq.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Search Sequence = [ ";
std::copy(input_pattern.begin(),input_pattern.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Search Results = [ ";
std::copy(out_values.begin(),out_values.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/src/dpl_sortdouble.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
std::vector<int> v{2,3,1,4};
oneapi::dpl::for_each(make_device_policy(q), v.begin(), v.end(), [](int &a){ a *= 2; });
oneapi::dpl::sort(make_device_policy(q), v.begin(), v.end());
for(int i = 0; i < v.size(); i++) std::cout << v[i] << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/src/transform_iterator.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <oneapi/dpl/numeric>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
dpl::counting_iterator<int> first(0);
dpl::counting_iterator<int> last(100);
auto func = [](const auto &x){ return x * 2; };
auto transform_first = dpl::make_transform_iterator(first, func);
auto transform_last = transform_first + (last - first);
auto sum = dpl::reduce(dpl::execution::dpcpp_default,
transform_first, transform_last); // sum is (0 + -1 + ... + -9) = -45
std::cout <<"Reduce output using Transform Iterator: "<<sum << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/src/dpl_buffer.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main(){
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
std::vector<int> v{2,3,1,4};
//# Create a buffer and use buffer iterators in Parallel STL algorithms
{
buffer buf(v);
auto buf_begin = oneapi::dpl::begin(buf);
auto buf_end = oneapi::dpl::end(buf);
oneapi::dpl::for_each(make_device_policy(q), buf_begin, buf_end, [](int &a){ a *= 3; });
oneapi::dpl::sort(make_device_policy(q), buf_begin, buf_end);
}
for(int i = 0; i < v.size(); i++) std::cout << v[i] << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/src/zip_iterator.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
constexpr int num_elements = 16;
std::vector<int> input_v1(num_elements, 2), input_v2(num_elements, 5), input_v3(num_elements, 0);
//Zip Iterator zips up the iterators of individual containers of interest.
auto start = oneapi::dpl::make_zip_iterator(input_v1.begin(), input_v2.begin(), input_v3.begin());
auto end = oneapi::dpl::make_zip_iterator(input_v1.end(), input_v2.end(), input_v3.end());
//create device policy
auto exec_policy = make_device_policy(q);
oneapi::dpl::for_each(exec_policy, start, end, [](auto t) {
//The zip iterator is used for expressing bounds in PSTL algorithms.
using std::get;
get<2>(t) = get<1>(t) * get<0>(t);
});
for (auto it = input_v3.begin(); it < input_v3.end(); it++)
std::cout << (*it) <<" ";
std::cout << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/src/maximum_function.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <oneapi/dpl/numeric>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
queue q;
constexpr int N = 8;
std::vector<int> v{-3,1,4,-1,5,9,-2,6};
//create a separate scope for buffer destruction
std::vector<int>result(N);
{
buffer<int,1> buf(v.data(), range<1>(N));
buffer<int,1> buf_res(result.data(), range<1>(N));
//oneDPL buffer iterators for both the input and the result vectors
auto start_v = oneapi::dpl::begin(buf);
auto end_v = oneapi::dpl::end(buf);
auto start_res = oneapi::dpl::begin(buf_res);
auto end_res = oneapi::dpl::end(buf_res);
//use std::fill to initialize the result vector
oneapi::dpl::fill(oneapi::dpl::execution::dpcpp_default,start_res, end_res, 0);
//usage of onedpl::maximum<> function call within the std::exclusive_scan function
std::exclusive_scan(oneapi::dpl::execution::dpcpp_default, start_v, end_v, start_res, int(0), oneapi::dpl::maximum<int>() );
}
for(int i = 0; i < result.size(); i++) std::cout << result[i] << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/src/counting_iterator.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <oneapi/dpl/numeric>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
oneapi::dpl::counting_iterator<int> count_a(0);
oneapi::dpl::counting_iterator<int> count_b = count_a + 100;
int init = count_a[0]; // OK: init == 0
//*count_b = 7; // ERROR: counting_iterator doesn't provide write operations
auto sum = oneapi::dpl::reduce(dpl::execution::dpcpp_default,
count_a, count_b, init); // sum is (0 + 0 + 1 + ... + 99) = 4950
std::cout << "The Sum is: " <<sum<<"\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/src/exclusive_scan.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <iostream>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
using T = int;
const int num_elements = 6;
auto R = range(num_elements);
//Initialize the input vector for Keys
std::vector<int> input_keys{ 0,0,0,1,1,1 };
//Initialize the input vector for Values
std::vector<int> input_values{ 1,2,3,4,5,6 };
//Output vectors where we get the results back
std::vector<int> output_values(num_elements, 0);
//Create buffers for the above vectors
buffer buf_in(input_keys);
buffer buf_seq(input_values);
//buffer buf_out(output_values);
buffer buf_out(output_values);
// create buffer iterators
auto keys_begin = oneapi::dpl::begin(buf_in);
auto keys_end = oneapi::dpl::end(buf_in);
auto vals_begin = oneapi::dpl::begin(buf_seq);
auto result_begin = oneapi::dpl::begin(buf_out);
// use policy for algorithms execution
//Create queue with default selector
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
auto policy = make_device_policy(q);
auto iter_res = oneapi::dpl::exclusive_scan_by_segment(policy, keys_begin, keys_end, vals_begin, result_begin,T(0));
auto count_res = std::distance(result_begin,iter_res);
// 3.Checking results
host_accessor result_vals(buf_out,read_only);
std::cout<< "Keys = [ ";
std::copy(input_keys.begin(),input_keys.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Values = [ ";
std::copy(input_values.begin(),input_values.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Output Values = [ ";
std::copy(output_values.begin(),output_values.begin() + count_res,std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/src/reduce_segment.cpp |
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
const int num_elements = 6;
auto R = range(num_elements);
//Create queue with default selector
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//Initialize the input vector for Keys
std::vector<int> input_keys{ 0,0,0,1,1,1 };
//Initialize the input vector for Values
std::vector<int> input_values{ 1,2,3,4,5,6 };
//Output vectors where we get the results back
std::vector<int> output_keys(num_elements, 0);
std::vector<int> output_values(num_elements, 0);
//Create buffers for the above vectors
buffer buf_in(input_keys);
buffer buf_seq(input_values);
buffer buf_out_keys(output_keys.data(),R);
buffer buf_out_vals(output_values.data(),R);
// create buffer iterators
auto keys_begin = oneapi::dpl::begin(buf_in);
auto keys_end = oneapi::dpl::end(buf_in);
auto vals_begin = oneapi::dpl::begin(buf_seq);
auto result_key_begin = oneapi::dpl::begin(buf_out_keys);
auto result_vals_begin = oneapi::dpl::begin(buf_out_vals);
// use policy for algorithms execution
auto policy = make_device_policy(q);
//auto pair_iters = make_pair <std::vector::iterator, std::vector::iterator>
//Calling the oneDPL reduce by search algorithm. We pass in the policy, the buffer iterators for the input vectors and the output.
// Default comparator is the operator < used here.
// onedpl::reduce_by_segment returns a pair of iterators to the result_key_begin and result_vals_begin respectively
int count_keys,count_vals = 0;
auto pair_iters = oneapi::dpl::reduce_by_segment(make_device_policy(q), keys_begin, keys_end, vals_begin, result_key_begin, result_vals_begin);
auto iter_keys = std::get<0>(pair_iters);
// get the count of the items in the result_keys using std::distance
count_keys = std::distance(result_key_begin,iter_keys);
//get the second iterator
auto iter_vals = std::get<1>(pair_iters);
count_vals = std::distance(result_vals_begin,iter_vals);
// 3.Checking results by creating the host accessors
host_accessor result_keys(buf_out_keys,read_only);
host_accessor result_vals(buf_out_vals,read_only);
std::cout<< "Keys = [ ";
std::copy(input_keys.begin(),input_keys.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Values = [ ";
std::copy(input_values.begin(),input_values.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Output Keys = [ ";
std::copy(output_keys.begin(),output_keys.begin() + count_keys,std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Output Values = [ ";
std::copy(output_values.begin(),output_values.begin() + count_vals,std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/src/permutation_iterator.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/execution>
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/iterator>
using namespace sycl;
using namespace std;
struct multiply_index_by_two {
template <typename Index>
Index operator[](const Index& i) const
{
return i * 2;
}
};
int main() {
//queue q;
const int num_elelemts = 100;
std::vector<float> result(num_elelemts, 0);
oneapi::dpl::counting_iterator<int> first(0);
oneapi::dpl::counting_iterator<int> last(20);
// first and last are iterators that define a contiguous range of input elements
// compute the number of elements in the range between the first and last that are accessed
// by the permutation iterator
size_t num_elements = std::distance(first, last) / 2 + std::distance(first, last) % 2;
using namespace oneapi;
auto permutation_first = oneapi::dpl::make_permutation_iterator(first, multiply_index_by_two());
auto permutation_last = permutation_first + num_elements;
auto it = ::std::copy(oneapi::dpl::execution::dpcpp_default, permutation_first, permutation_last, result.begin());
auto count = ::std::distance(result.begin(),it);
for(int i = 0; i < count; i++) ::std::cout << result[i] << " ";
// for (auto it = result.begin(); it < result.end(); it++)
// ::std::cout << (*it) <<" ";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/lab/inclusive_scan.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <iostream>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
const int num_elements = 6;
auto R = range(num_elements);
//Initialize the input vector for Keys
std::vector<int> input_keys{ 0,0,0,1,1,1 };
//Initialize the input vector for Values
std::vector<int> input_values{ 1,2,3,4,5,6 };
//Output vectors where we get the results back
std::vector<int> output_values(num_elements, 0);
//Create buffers for the above vectors
buffer buf_in(input_keys);
buffer buf_seq(input_values);
buffer buf_out(output_values);
// create buffer iterators
auto keys_begin = oneapi::dpl::begin(buf_in);
auto keys_end = oneapi::dpl::end(buf_in);
auto vals_begin = oneapi::dpl::begin(buf_seq);
auto result_begin = oneapi::dpl::begin(buf_out);
// use policy for algorithms execution
//Create queue with default selector
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
auto policy = make_device_policy(q);
auto iter_res = oneapi::dpl::inclusive_scan_by_segment(policy, keys_begin, keys_end, vals_begin, result_begin);
auto count_res = std::distance(result_begin,iter_res);
// 3.Checking results
host_accessor result_vals(buf_out,read_only);
std::cout<< "Keys = [ ";
std::copy(input_keys.begin(),input_keys.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Values = [ ";
std::copy(input_values.begin(),input_values.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Output Values = [ ";
std::copy(output_values.begin(),output_values.begin() + count_res,std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/lab/dpl_usm_alloc.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
using namespace sycl;
using namespace oneapi::dpl::execution;
const int N = 4;
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# USM allocator
usm_allocator<int, usm::alloc::shared> alloc(q);
std::vector<int, decltype(alloc)> v(N, alloc);
//# Parallel STL algorithm with USM allocator
oneapi::dpl::fill(make_device_policy(q), v.begin(), v.end(), 20);
q.wait();
for (int i = 0; i < v.size(); i++) std::cout << v[i] << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/lab/minimum_function.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <oneapi/dpl/numeric>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
queue q;
constexpr int N = 8;
//Input vector
std::vector<int> v{3,-1,-4,1,-5,-9,2,6};
//create a separate scope for buffer destruction
std::vector<int>result(N);
{
buffer buf(v);
buffer buf_res(result);
//oneDPL buffer iterators for both the input and the result vectors
auto start_v = oneapi::dpl::begin(buf);
auto end_v = oneapi::dpl::end(buf);
auto start_res = oneapi::dpl::begin(buf_res);
auto end_res = oneapi::dpl::end(buf_res);
//use std::fill to initialize the result vector
oneapi::dpl::fill(oneapi::dpl::execution::dpcpp_default,start_res, end_res, 0);
//usage of dpl::minimum<> function call within the std::exclusive_scan function
oneapi::dpl::exclusive_scan(oneapi::dpl::execution::dpcpp_default, start_v, end_v, start_res, int(0), oneapi::dpl::minimum<int>() );
}
for(int i = 0; i < result.size(); i++) std::cout << result[i] << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/lab/lower_bound.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <iostream>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
const int num_elements = 5;
auto R = range(num_elements);
//Create queue with default selector
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//Initialize the input vector for search
std::vector<int> input_seq{0, 2, 2, 2, 3, 3, 3, 3, 6, 6};
//Initialize the input vector for search pattern
std::vector<int> input_pattern{0, 2, 4, 7, 6};
//Output vector where we get the results back
std::vector<int> out_values(num_elements,0);
buffer buf_in(input_seq);
buffer buf_seq(input_pattern);
buffer buf_out(out_values);
// create buffer iterators
auto keys_begin = oneapi::dpl::begin(buf_in);
auto keys_end = oneapi::dpl::end(buf_in);
auto vals_begin = oneapi::dpl::begin(buf_seq);
auto vals_end = oneapi::dpl::end(buf_seq);
auto result_begin = oneapi::dpl::begin(buf_out);
// use policy for algorithms execution
auto policy = make_device_policy(q);
//Calling the onedpl upper_bound algorithm. We pass in the policy, the buffer iterators for the input vectors and the output.
// Default comparator is the operator < used here.
oneapi::dpl::lower_bound(policy,keys_begin,keys_end,vals_begin,vals_end,result_begin);
// 3.Checking results by creating the host accessors
host_accessor result_vals(buf_out,read_only);
std::cout<< "Input Sequence = [ ";
std::copy(input_seq.begin(),input_seq.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Search Sequence = [ ";
std::copy(input_pattern.begin(),input_pattern.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Search Results = [ ";
std::copy(out_values.begin(),out_values.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/lab/dpl_simple.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
using namespace sycl;
constexpr int N = 4;
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
std::vector<int> v(N);
//# Parallel STL fill function with device policy
oneapi::dpl::fill(oneapi::dpl::execution::make_device_policy(q), v.begin(), v.end(), 20);
for(int i = 0; i < v.size(); i++) std::cout << v[i] << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/lab/ranges.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include<oneapi/dpl/execution>
#include<oneapi/dpl/algorithm>
#include<oneapi/dpl/ranges>
#include<iostream>
#include<vector>
using namespace sycl;
using namespace oneapi::dpl::experimental::ranges;
int main()
{
std::vector<int> v(20);
{
buffer A(v);
auto view = iota_view(0, 20);
auto rev_view = views::reverse(view);
auto range_res = all_view<int, sycl::access::mode::write>(A);
copy(oneapi::dpl::execution::dpcpp_default, rev_view, range_res);
}
for (auto x : v)
std::cout << x << " ";
std::cout << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/lab/discard_iterator.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <iostream>
#include <tuple>
using namespace sycl;
using namespace oneapi::dpl::execution;
using std::get;
int main() {
const int num_elements = 10;
//Initialize the input vector for search
std::vector<int> input_seq{2, 4, 12, 24, 34, 48, 143, 63, 76, 69};
//Initialize the stencil values
std::vector<int> input_pattern{1, 2, 4, 1, 6, 1, 2, 1, 7, 1};
//Output vector where we get the results back
std::vector<int> out_values(num_elements,0);
buffer buf_in(input_seq);
buffer buf_seq(input_pattern);
buffer buf_out(out_values);
// create buffer iterators
auto keys_begin = oneapi::dpl::begin(buf_in);
auto keys_end = oneapi::dpl::end(buf_in);
auto vals_begin = oneapi::dpl::begin(buf_seq);
auto vals_end = oneapi::dpl::end(buf_seq);
auto result_begin = oneapi::dpl::begin(buf_out);
// use policy for algorithms execution
auto policy = oneapi::dpl::execution::dpcpp_default;
auto zipped_first = oneapi::dpl::make_zip_iterator(keys_begin, vals_begin);
auto iter_res = oneapi::dpl::copy_if(dpl::execution::dpcpp_default,zipped_first, zipped_first + num_elements,
dpl::make_zip_iterator(result_begin, dpl::discard_iterator()),
[](auto t){return get<1>(t) == 1;});
// 3.Checking results by creating the host accessors
host_accessor result_vals(buf_out,read_only);
std::cout<< "Input Sequence = [ ";
std::copy(input_seq.begin(),input_seq.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Sequence to search = [ ";
std::copy(input_pattern.begin(),input_pattern.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Results with stencil value of 1 = [ ";
std::copy(out_values.begin(),out_values.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/lab/binary_search.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <iostream>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
//const int n = 10;
//const int k = 5;
const int num_elements = 5;
auto R = range(num_elements);
//Create queue with default selector
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//Initialize the input vector for search
std::vector<int> input_seq{0, 2, 2, 2, 3, 3, 3, 3, 6, 6};
//Initialize the input vector for search pattern
std::vector<int> input_pattern{0, 2, 4, 7, 6};
//Output vector where we get the results back
std::vector<int> output_values(num_elements,0);
//Create buffers for the above vectors
buffer buf_in(input_seq);
buffer buf_seq(input_pattern);
buffer buf_out(output_values);
// create buffer iterators
auto keys_begin = oneapi::dpl::begin(buf_in);
auto keys_end = oneapi::dpl::end(buf_in);
auto vals_begin = oneapi::dpl::begin(buf_seq);
auto vals_end = oneapi::dpl::end(buf_seq);
auto result_begin = oneapi::dpl::begin(buf_out);
// use policy for algorithms execution
auto policy = make_device_policy(q);
//function object to be passed to sort function
//Calling the onedpl binary search algorithm. We pass in the policy, the buffer iterators for the input vectors and the output.
// Default comparator is the operator < used here.
const auto i = oneapi::dpl::binary_search(policy,keys_begin,keys_end,vals_begin,vals_end,result_begin);
// 3.Checking results by creating the host accessors
host_accessor result_vals(buf_out,read_only);
std::cout<< "Input sequence = [";
std::copy(input_seq.begin(),input_seq.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Search sequence = [";
std::copy(input_pattern.begin(),input_pattern.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Search results = [";
std::copy(output_values.begin(),output_values.end(),std::ostream_iterator<bool>(std::cout," "));
std::cout <<"]"<< "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/lab/dpl_usm_pointer.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
using namespace sycl;
using namespace oneapi::dpl::execution;
const int N = 4;
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# USM allocation on device
int* data = malloc_shared<int>(N, q);
//# Parallel STL algorithm using USM pointer
oneapi::dpl::fill(make_device_policy(q), data, data + N, 20);
q.wait();
for (int i = 0; i < N; i++) std::cout << data[i] << "\n";
free(data, q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/lab/upper_bound.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <iostream>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
const int num_elements = 5;
auto R = range(num_elements);
//Create queue with default selector
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//Initialize the input vector for search
std::vector<int> input_seq{0, 2, 2, 2, 3, 3, 3, 3, 6, 6};
//Initialize the input vector for search pattern
std::vector<int> input_pattern{0, 2, 4, 7, 6};
//Output vector where we get the results back
std::vector<int> out_values(num_elements,0);
buffer buf_in(input_seq);
buffer buf_seq(input_pattern);
buffer buf_out(out_values);
// create buffer iterators
auto keys_begin = oneapi::dpl::begin(buf_in);
auto keys_end = oneapi::dpl::end(buf_in);
auto vals_begin = oneapi::dpl::begin(buf_seq);
auto vals_end = oneapi::dpl::end(buf_seq);
auto result_begin = oneapi::dpl::begin(buf_out);
// use policy for algorithms execution
auto policy = make_device_policy(q);
//Calling the onedpl upper_bound algorithm. We pass in the policy, the buffer iterators for the input vectors and the output.
// Default comparator is the operator < used here.
oneapi::dpl::upper_bound(make_device_policy(q),keys_begin,keys_end,vals_begin,vals_end,result_begin);
// 3.Checking results by creating the host accessors
host_accessor result_vals(buf_out,read_only);
std::cout<< "Input Sequence = [ ";
std::copy(input_seq.begin(),input_seq.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Search Sequence = [ ";
std::copy(input_pattern.begin(),input_pattern.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Search Results = [ ";
std::copy(out_values.begin(),out_values.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/lab/dpl_sortdouble.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
std::vector<int> v{2,3,1,4};
oneapi::dpl::for_each(make_device_policy(q), v.begin(), v.end(), [](int &a){ a *= 2; });
oneapi::dpl::sort(make_device_policy(q), v.begin(), v.end());
for(int i = 0; i < v.size(); i++) std::cout << v[i] << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/lab/transform_iterator.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <oneapi/dpl/numeric>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
dpl::counting_iterator<int> first(0);
dpl::counting_iterator<int> last(100);
auto func = [](const auto &x){ return x * 2; };
auto transform_first = dpl::make_transform_iterator(first, func);
auto transform_last = transform_first + (last - first);
auto sum = dpl::reduce(dpl::execution::dpcpp_default,
transform_first, transform_last); // sum is (0 + -1 + ... + -9) = -45
std::cout <<"Reduce output using Transform Iterator: "<<sum << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/lab/dpl_buffer.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main(){
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
std::vector<int> v{2,3,1,4};
//# Create a buffer and use buffer iterators in Parallel STL algorithms
{
buffer buf(v);
auto buf_begin = oneapi::dpl::begin(buf);
auto buf_end = oneapi::dpl::end(buf);
oneapi::dpl::for_each(make_device_policy(q), buf_begin, buf_end, [](int &a){ a *= 3; });
oneapi::dpl::sort(make_device_policy(q), buf_begin, buf_end);
}
for(int i = 0; i < v.size(); i++) std::cout << v[i] << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/lab/zip_iterator.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
constexpr int num_elements = 16;
std::vector<int> input_v1(num_elements, 2), input_v2(num_elements, 5), input_v3(num_elements, 0);
//Zip Iterator zips up the iterators of individual containers of interest.
auto start = oneapi::dpl::make_zip_iterator(input_v1.begin(), input_v2.begin(), input_v3.begin());
auto end = oneapi::dpl::make_zip_iterator(input_v1.end(), input_v2.end(), input_v3.end());
//create device policy
auto exec_policy = make_device_policy(q);
oneapi::dpl::for_each(exec_policy, start, end, [](auto t) {
//The zip iterator is used for expressing bounds in PSTL algorithms.
using std::get;
get<2>(t) = get<1>(t) * get<0>(t);
});
for (auto it = input_v3.begin(); it < input_v3.end(); it++)
std::cout << (*it) <<" ";
std::cout << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/lab/maximum_function.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <oneapi/dpl/numeric>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
queue q;
constexpr int N = 8;
std::vector<int> v{-3,1,4,-1,5,9,-2,6};
//create a separate scope for buffer destruction
std::vector<int>result(N);
{
buffer<int,1> buf(v.data(), range<1>(N));
buffer<int,1> buf_res(result.data(), range<1>(N));
//oneDPL buffer iterators for both the input and the result vectors
auto start_v = oneapi::dpl::begin(buf);
auto end_v = oneapi::dpl::end(buf);
auto start_res = oneapi::dpl::begin(buf_res);
auto end_res = oneapi::dpl::end(buf_res);
//use std::fill to initialize the result vector
oneapi::dpl::fill(oneapi::dpl::execution::dpcpp_default,start_res, end_res, 0);
//usage of onedpl::maximum<> function call within the std::exclusive_scan function
std::exclusive_scan(oneapi::dpl::execution::dpcpp_default, start_v, end_v, start_res, int(0), oneapi::dpl::maximum<int>() );
}
for(int i = 0; i < result.size(); i++) std::cout << result[i] << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/lab/counting_iterator.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <oneapi/dpl/numeric>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
oneapi::dpl::counting_iterator<int> count_a(0);
oneapi::dpl::counting_iterator<int> count_b = count_a + 100;
int init = count_a[0]; // OK: init == 0
//*count_b = 7; // ERROR: counting_iterator doesn't provide write operations
auto sum = oneapi::dpl::reduce(dpl::execution::dpcpp_default,
count_a, count_b, init); // sum is (0 + 0 + 1 + ... + 99) = 4950
std::cout << "The Sum is: " <<sum<<"\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/lab/exclusive_scan.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <iostream>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
using T = int;
const int num_elements = 6;
auto R = range(num_elements);
//Initialize the input vector for Keys
std::vector<int> input_keys{ 0,0,0,1,1,1 };
//Initialize the input vector for Values
std::vector<int> input_values{ 1,2,3,4,5,6 };
//Output vectors where we get the results back
std::vector<int> output_values(num_elements, 0);
//Create buffers for the above vectors
buffer buf_in(input_keys);
buffer buf_seq(input_values);
//buffer buf_out(output_values);
buffer buf_out(output_values);
// create buffer iterators
auto keys_begin = oneapi::dpl::begin(buf_in);
auto keys_end = oneapi::dpl::end(buf_in);
auto vals_begin = oneapi::dpl::begin(buf_seq);
auto result_begin = oneapi::dpl::begin(buf_out);
// use policy for algorithms execution
//Create queue with default selector
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
auto policy = make_device_policy(q);
auto iter_res = oneapi::dpl::exclusive_scan_by_segment(policy, keys_begin, keys_end, vals_begin, result_begin,T(0));
auto count_res = std::distance(result_begin,iter_res);
// 3.Checking results
host_accessor result_vals(buf_out,read_only);
std::cout<< "Keys = [ ";
std::copy(input_keys.begin(),input_keys.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Values = [ ";
std::copy(input_values.begin(),input_values.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Output Values = [ ";
std::copy(output_values.begin(),output_values.begin() + count_res,std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/lab/reduce_segment.cpp |
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
using namespace sycl;
using namespace oneapi::dpl::execution;
int main() {
const int num_elements = 6;
auto R = range(num_elements);
//Create queue with default selector
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//Initialize the input vector for Keys
std::vector<int> input_keys{ 0,0,0,1,1,1 };
//Initialize the input vector for Values
std::vector<int> input_values{ 1,2,3,4,5,6 };
//Output vectors where we get the results back
std::vector<int> output_keys(num_elements, 0);
std::vector<int> output_values(num_elements, 0);
//Create buffers for the above vectors
buffer buf_in(input_keys);
buffer buf_seq(input_values);
buffer buf_out_keys(output_keys.data(),R);
buffer buf_out_vals(output_values.data(),R);
// create buffer iterators
auto keys_begin = oneapi::dpl::begin(buf_in);
auto keys_end = oneapi::dpl::end(buf_in);
auto vals_begin = oneapi::dpl::begin(buf_seq);
auto result_key_begin = oneapi::dpl::begin(buf_out_keys);
auto result_vals_begin = oneapi::dpl::begin(buf_out_vals);
// use policy for algorithms execution
auto policy = make_device_policy(q);
//auto pair_iters = make_pair <std::vector::iterator, std::vector::iterator>
//Calling the oneDPL reduce by search algorithm. We pass in the policy, the buffer iterators for the input vectors and the output.
// Default comparator is the operator < used here.
// onedpl::reduce_by_segment returns a pair of iterators to the result_key_begin and result_vals_begin respectively
int count_keys,count_vals = 0;
auto pair_iters = oneapi::dpl::reduce_by_segment(make_device_policy(q), keys_begin, keys_end, vals_begin, result_key_begin, result_vals_begin);
auto iter_keys = std::get<0>(pair_iters);
// get the count of the items in the result_keys using std::distance
count_keys = std::distance(result_key_begin,iter_keys);
//get the second iterator
auto iter_vals = std::get<1>(pair_iters);
count_vals = std::distance(result_vals_begin,iter_vals);
// 3.Checking results by creating the host accessors
host_accessor result_keys(buf_out_keys,read_only);
host_accessor result_vals(buf_out_vals,read_only);
std::cout<< "Keys = [ ";
std::copy(input_keys.begin(),input_keys.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Values = [ ";
std::copy(input_values.begin(),input_values.end(),std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Output Keys = [ ";
std::copy(output_keys.begin(),output_keys.begin() + count_keys,std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
std::cout<< "Output Values = [ ";
std::copy(output_values.begin(),output_values.begin() + count_vals,std::ostream_iterator<int>(std::cout," "));
std::cout <<"]"<< "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/lab/permutation_iterator.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/execution>
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/iterator>
using namespace sycl;
using namespace std;
struct multiply_index_by_two {
template <typename Index>
Index operator[](const Index& i) const
{
return i * 2;
}
};
int main() {
//queue q;
const int num_elelemts = 100;
std::vector<float> result(num_elelemts, 0);
oneapi::dpl::counting_iterator<int> first(0);
oneapi::dpl::counting_iterator<int> last(20);
// first and last are iterators that define a contiguous range of input elements
// compute the number of elements in the range between the first and last that are accessed
// by the permutation iterator
size_t num_elements = std::distance(first, last) / 2 + std::distance(first, last) % 2;
using namespace oneapi;
auto permutation_first = oneapi::dpl::make_permutation_iterator(first, multiply_index_by_two());
auto permutation_last = permutation_first + num_elements;
auto it = ::std::copy(oneapi::dpl::execution::dpcpp_default, permutation_first, permutation_last, result.begin());
auto count = ::std::distance(result.begin(),it);
for(int i = 0; i < count; i++) ::std::cout << result[i] << " ";
// for (auto it = result.begin(); it < result.end(); it++)
// ::std::cout << (*it) <<" ";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/stable_sort_by_key/src/main.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <iostream>
#include <sycl/sycl.hpp>
using namespace sycl;
using namespace oneapi::dpl::execution;
using namespace std;
int main() {
const int n = 1000000;
buffer<int> keys_buf{n}; // buffer with keys
buffer<int> vals_buf{n}; // buffer with values
// create objects to iterate over buffers
auto keys_begin = oneapi::dpl::begin(keys_buf);
auto vals_begin = oneapi::dpl::begin(vals_buf);
auto counting_begin = oneapi::dpl::counting_iterator<int>{0};
// use default policy for algorithms execution
auto policy = oneapi::dpl::execution::dpcpp_default;
// 1. Initialization of buffers
// let keys_buf contain {n, n, n-2, n-2, ..., 4, 4, 2, 2}
transform(policy, counting_begin, counting_begin + n, keys_begin,
[n](int i) { return n - (i / 2) * 2; });
// fill vals_buf with the analogue of std::iota using counting_iterator
copy(policy, counting_begin, counting_begin + n, vals_begin);
// 2. Sorting
auto zipped_begin = oneapi::dpl::make_zip_iterator(keys_begin, vals_begin);
// stable sort by keys
stable_sort(
policy, zipped_begin, zipped_begin + n,
// Generic lambda is needed because type of lhs and rhs is unspecified.
[](auto lhs, auto rhs) { return get<0>(lhs) < get<0>(rhs); });
// 3.Checking results
//host_accessor host_keys(keys_buf,read_only);
//host_accessor host_vals(vals_buf,read_only);
auto host_keys = keys_buf.get_access<access::mode::read>();
auto host_vals = vals_buf.get_access<access::mode::read>();
// expected output:
// keys: {2, 2, 4, 4, ..., n - 2, n - 2, n, n}
// vals: {n - 2, n - 1, n - 4, n - 3, ..., 2, 3, 0, 1}
for (int i = 0; i < n; ++i) {
if (host_keys[i] != (i / 2) * 2 &&
host_vals[i] != n - (i / 2) * 2 - (i % 2 == 0 ? 2 : 1)) {
cout << "fail: i = " << i << ", host_keys[i] = " << host_keys[i]
<< ", host_vals[i] = " << host_vals[i] << "\n";
return 1;
}
}
cout << "success\nRun on "
<< policy.queue().get_device().template get_info<info::device::name>()
<< "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/gamma-correction/src/main.cpp | //==============================================================
// Copyright © 2019 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <iomanip>
#include <iostream>
#include <sycl/sycl.hpp>
#include "utils.hpp"
using namespace sycl;
using namespace std;
int main() {
// Image size is width x height
int width = 1440;
int height = 960;
Img<ImgFormat::BMP> image{width, height};
ImgFractal fractal{width, height};
// Lambda to process image with gamma = 2
auto gamma_f = [](ImgPixel &pixel) {
auto v = (0.3f * pixel.r + 0.59f * pixel.g + 0.11f * pixel.b) / 255.0f;
auto gamma_pixel = static_cast<uint8_t>(255 * v * v);
if (gamma_pixel > 255) gamma_pixel = 255;
pixel.set(gamma_pixel, gamma_pixel, gamma_pixel, gamma_pixel);
};
// fill image with created fractal
int index = 0;
image.fill([&index, width, &fractal](ImgPixel &pixel) {
int x = index % width;
int y = index / width;
auto fractal_pixel = fractal(x, y);
if (fractal_pixel < 0) fractal_pixel = 0;
if (fractal_pixel > 255) fractal_pixel = 255;
pixel.set(fractal_pixel, fractal_pixel, fractal_pixel, fractal_pixel);
++index;
});
string original_image = "fractal_original.png";
string processed_image = "fractal_gamma.png";
Img<ImgFormat::BMP> image2 = image;
image.write(original_image);
// call standard serial function for correctness check
image.fill(gamma_f);
// use default policy for algorithms execution
auto policy = oneapi::dpl::execution::dpcpp_default;
// We need to have the scope to have data in image2 after buffer's destruction
{
//# STEP 1: Create a buffer for the image2
//# YOUR CODE GOES HERE
//# STEP 2: Create buffer iterators for the buffer that was created
//# YOUR CODE GOES HERE
//# STEP 3: Call std::for_each with buffer iterators and the gamma correction algorithm gamma_f
//# YOUR CODE GOES HERE
}
image2.write(processed_image);
// check correctness
if (check(image.begin(), image.end(), image2.begin())) {
cout << "success\n";
} else {
cout << "fail\n";
return 1;
}
cout << "Run on "
<< policy.queue().get_device().template get_info<info::device::name>()
<< "\n";
cout << "Original image is in " << original_image << "\n";
cout << "Image after applying gamma correction on the device is in "
<< processed_image << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/gamma-correction/src/utils.hpp | //==============================================================
// Copyright © 2019 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#ifndef _GAMMA_UTILS_HPP
#define _GAMMA_UTILS_HPP
#include "utils/Img.hpp"
#include "utils/ImgAlgorithm.hpp"
#include "utils/ImgFormat.hpp"
#include "utils/ImgPixel.hpp"
#include "utils/Other.hpp"
#endif // _GAMMA_UTILS_HPP
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/gamma-correction/src/main_solution.cpp | //==============================================================
// Copyright © 2019 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include <iomanip>
#include <iostream>
#include <sycl/sycl.hpp>
#include "utils.hpp"
using namespace sycl;
using namespace std;
int main() {
// Image size is width x height
int width = 1440;
int height = 960;
Img<ImgFormat::BMP> image{width, height};
ImgFractal fractal{width, height};
// Lambda to process image with gamma = 2
auto gamma_f = [](ImgPixel &pixel) {
auto v = (0.3f * pixel.r + 0.59f * pixel.g + 0.11f * pixel.b) / 255.0f;
auto gamma_pixel = static_cast<uint8_t>(255 * v * v);
if (gamma_pixel > 255) gamma_pixel = 255;
pixel.set(gamma_pixel, gamma_pixel, gamma_pixel, gamma_pixel);
};
// fill image with created fractal
int index = 0;
image.fill([&index, width, &fractal](ImgPixel &pixel) {
int x = index % width;
int y = index / width;
auto fractal_pixel = fractal(x, y);
if (fractal_pixel < 0) fractal_pixel = 0;
if (fractal_pixel > 255) fractal_pixel = 255;
pixel.set(fractal_pixel, fractal_pixel, fractal_pixel, fractal_pixel);
++index;
});
string original_image = "fractal_original.png";
string processed_image = "fractal_gamma.png";
Img<ImgFormat::BMP> image2 = image;
image.write(original_image);
// call standard serial function for correctness check
image.fill(gamma_f);
// use default policy for algorithms execution
auto policy = oneapi::dpl::execution::dpcpp_default;
// We need to have the scope to have data in image2 after buffer's destruction
{
//# STEP 1: Create a buffer for the image2
buffer<ImgPixel> b(image2.data(), image2.width() * image2.height());
//# STEP 2: Create buffer iterators for the buffer that was created
auto b_begin = oneapi::dpl::begin(b);
auto b_end = oneapi::dpl::end(b);
//# STEP 3: Call std::for_each with buffer iterators and call the gamma correction algorithm
std::for_each(policy, b_begin, b_end, gamma_f);
}
image2.write(processed_image);
// check correctness
if (check(image.begin(), image.end(), image2.begin())) {
cout << "success\n";
} else {
cout << "fail\n";
return 1;
}
cout << "Run on "
<< policy.queue().get_device().template get_info<info::device::name>()
<< "\n";
cout << "Original image is in " << original_image << "\n";
cout << "Image after applying gamma correction on the device is in "
<< processed_image << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/gamma-correction/src/utils/ImgFormat.hpp | //==============================================================
// Copyright © 2019 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#ifndef _GAMMA_UTILS_IMGFORMAT_HPP
#define _GAMMA_UTILS_IMGFORMAT_HPP
#include "ImgPixel.hpp"
#include <fstream>
using namespace std;
namespace ImgFormat {
// struct to store an image in BMP format
struct BMP {
private:
using FileHeader = struct {
// not from specification
// was added for alignemt
// store size of rest of the fields
uint16_t sizeRest; // file header size in bytes
uint16_t type;
uint32_t size; // file size in bytes
uint32_t reserved;
uint32_t offBits; // cumulative header size in bytes
};
using InfoHeader = struct {
// from specification
// store size of rest of the fields
uint32_t size; // info header size in bytes
int32_t width; // image width in pixels
int32_t height; // image height in pixels
uint16_t planes;
uint16_t bitCount; // color depth
uint32_t compression; // compression
uint32_t sizeImage; // image map size in bytes
int32_t xPelsPerMeter; // pixel per metre (y axis)
int32_t yPelsPerMeter; // pixel per metre (y axis)
uint32_t clrUsed; // color pallete (0 is default)
uint32_t clrImportant;
};
FileHeader _fileHeader;
InfoHeader _infoHeader;
public:
BMP(int32_t width, int32_t height) noexcept { reset(width, height); }
void reset(int32_t width, int32_t height) noexcept {
uint32_t padSize = (4 - (width * sizeof(ImgPixel)) % 4) % 4;
uint32_t mapSize = width * height * sizeof(ImgPixel) + height * padSize;
uint32_t allSize = mapSize + _fileHeader.sizeRest + _infoHeader.size;
_fileHeader.sizeRest = 14; // file header size in bytes
_fileHeader.type = 0x4d42;
_fileHeader.size = allSize; // file size in bytes
_fileHeader.reserved = 0;
_fileHeader.offBits = 54; // sizeRest + size -> 14 + 40 -> 54
_infoHeader.size = 40; // info header size in bytes
_infoHeader.width = width; // image width in pixels
_infoHeader.height = height; // image height in pixels
_infoHeader.planes = 1;
_infoHeader.bitCount = 32; // color depth
_infoHeader.compression = 0; // compression
_infoHeader.sizeImage = mapSize; // image map size in bytes
_infoHeader.xPelsPerMeter = 0; // pixel per metre (x axis)
_infoHeader.yPelsPerMeter = 0; // pixel per metre (y axis)
_infoHeader.clrUsed = 0; // color pallete (0 is default)
_infoHeader.clrImportant = 0;
}
template <template <class> class Image, typename Format>
void write(ofstream& ostream, Image<Format> const& image) const {
ostream.write(reinterpret_cast<char const*>(&_fileHeader.type),
_fileHeader.sizeRest);
ostream.write(reinterpret_cast<char const*>(&_infoHeader),
_infoHeader.size);
ostream.write(reinterpret_cast<char const*>(image.data()),
image.width() * image.height() * sizeof(image.data()[0]));
}
FileHeader const& fileHeader() const noexcept { return _fileHeader; }
InfoHeader const& infoHeader() const noexcept { return _infoHeader; }
};
} // namespace ImgFormat
#endif // _GAMMA_UTILS_IMGFORMAT_HPP
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/gamma-correction/src/utils/ImgPixel.hpp | //==============================================================
// Copyright © 2019 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#ifndef _GAMMA_UTILS_IMGPIXEL_HPP
#define _GAMMA_UTILS_IMGPIXEL_HPP
#include <cstdint>
#include <ostream>
using namespace std;
// struct to store a pixel of image
struct ImgPixel {
uint8_t b;
uint8_t g;
uint8_t r;
uint8_t a;
bool operator==(ImgPixel const& other) const {
return (b == other.b) && (g == other.g) && (r == other.r) && (a == other.a);
}
bool operator!=(ImgPixel const& other) const { return !(*this == other); }
void set(uint8_t blue, uint8_t green, uint8_t red, uint8_t alpha) {
b = blue;
g = green;
r = red;
a = alpha;
}
};
ostream& operator<<(ostream& output, ImgPixel const& pixel) {
return output << "(" << unsigned(pixel.r) << ", " << unsigned(pixel.g) << ", "
<< unsigned(pixel.b) << ", " << unsigned(pixel.a) << ")";
}
#endif // _GAMMA_UTILS_IMGPIXEL_HPP
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/gamma-correction/src/utils/Img.hpp | //==============================================================
// Copyright © 2019 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#ifndef _GAMMA_UTILS_IMG_HPP
#define _GAMMA_UTILS_IMG_HPP
#include "ImgPixel.hpp"
#include <algorithm>
#include <fstream>
#include <iostream>
#include <vector>
using namespace std;
// Image class definition
template <typename Format>
class Img {
private:
Format _format;
int32_t _width;
int32_t _height;
vector<ImgPixel> _pixels;
using Iterator = vector<ImgPixel>::iterator;
using ConstIterator = vector<ImgPixel>::const_iterator;
public:
/////////////////////
// SPECIAL METHODS //
/////////////////////
Img(int32_t width, int32_t height);
void reset(int32_t width, int32_t height);
///////////////
// ITERATORS //
///////////////
Iterator begin() noexcept;
Iterator end() noexcept;
ConstIterator begin() const noexcept;
ConstIterator end() const noexcept;
ConstIterator cbegin() const noexcept;
ConstIterator cend() const noexcept;
/////////////
// GETTERS //
/////////////
int32_t width() const noexcept;
int32_t height() const noexcept;
ImgPixel const* data() const noexcept;
ImgPixel* data() noexcept;
///////////////////
// FUNCTIONALITY //
///////////////////
void write(string const& filename) const;
template <typename Functor>
void fill(Functor f);
void fill(ImgPixel pixel);
void fill(ImgPixel pixel, int32_t row, int32_t col);
};
///////////////////////////////////////////////
// IMG CLASS IMPLEMENTATION: SPECIAL METHODS //
///////////////////////////////////////////////
template <typename Format>
Img<Format>::Img(int32_t width, int32_t height) : _format(width, height) {
_pixels.resize(width * height);
_width = width;
_height = height;
}
template <typename Format>
void Img<Format>::reset(int32_t width, int32_t height) {
_pixels.resize(width * height);
_width = width;
_height = height;
_format.reset(width, height);
}
/////////////////////////////////////////
// IMG CLASS IMPLEMENTATION: ITERATORS //
/////////////////////////////////////////
template <typename Format>
typename Img<Format>::Iterator Img<Format>::begin() noexcept {
return _pixels.begin();
}
template <typename Format>
typename Img<Format>::Iterator Img<Format>::end() noexcept {
return _pixels.end();
}
template <typename Format>
typename Img<Format>::ConstIterator Img<Format>::begin() const noexcept {
return _pixels.begin();
}
template <typename Format>
typename Img<Format>::ConstIterator Img<Format>::end() const noexcept {
return _pixels.end();
}
template <typename Format>
typename Img<Format>::ConstIterator Img<Format>::cbegin() const noexcept {
return _pixels.begin();
}
template <typename Format>
typename Img<Format>::ConstIterator Img<Format>::cend() const noexcept {
return _pixels.end();
}
///////////////////////////////////////
// IMG CLASS IMPLEMENTATION: GETTERS //
///////////////////////////////////////
template <typename Format>
int32_t Img<Format>::width() const noexcept {
return _width;
}
template <typename Format>
int32_t Img<Format>::height() const noexcept {
return _height;
}
template <typename Format>
ImgPixel const* Img<Format>::data() const noexcept {
return _pixels.data();
}
template <typename Format>
ImgPixel* Img<Format>::data() noexcept {
return _pixels.data();
}
/////////////////////////////////////////////
// IMG CLASS IMPLEMENTATION: FUNCTIONALITY //
/////////////////////////////////////////////
template <typename Format>
void Img<Format>::write(string const& filename) const {
if (_pixels.empty()) {
cerr << "Img::write:: image is empty\n";
return;
}
ofstream filestream(filename, ios::binary);
_format.write(filestream, *this);
}
template <typename Format>
template <typename Functor>
void Img<Format>::fill(Functor f) {
if (_pixels.empty()) {
cerr << "Img::fill(Functor): image is empty\n";
return;
}
for (auto& pixel : _pixels) f(pixel);
}
template <typename Format>
void Img<Format>::fill(ImgPixel pixel) {
if (_pixels.empty()) {
cerr << "Img::fill(ImgPixel): image is empty\n";
return;
}
fill(_pixels.begin(), _pixels.end(), pixel);
}
template <typename Format>
void Img<Format>::fill(ImgPixel pixel, int row, int col) {
if (_pixels.empty()) {
cerr << "Img::fill(ImgPixel): image is empty\n";
return;
}
if (row >= _height || row < 0 || col >= _width || col < 0) {
cerr << "Img::fill(ImgPixel, int, int): out of range\n";
return;
}
_pixels.at(row * _width + col) = pixel;
}
#endif // _GAMMA_UTILS_IMG_HPP
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/gamma-correction/src/utils/ImgAlgorithm.hpp | //==============================================================
// Copyright © 2019 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#ifndef _GAMMA_UTILS_IMGALGORITHM_HPP
#define _GAMMA_UTILS_IMGALGORITHM_HPP
#include <cmath>
#include <cstdint>
using namespace std;
// struct to store fractal that image will fill from
class ImgFractal {
private:
const int32_t _width;
const int32_t _height;
double _cx = -0.7436;
double _cy = 0.1319;
double _magn = 2000000.0;
int _maxIterations = 1000;
public:
ImgFractal(int32_t width, int32_t height) : _width(width), _height(height) {}
double operator()(int32_t x, int32_t y) const {
double fx = (double(x) - double(_width) / 2) * (1 / _magn) + _cx;
double fy = (double(y) - double(_height) / 2) * (1 / _magn) + _cy;
double res = 0;
double nx = 0;
double ny = 0;
double val = 0;
for (int i = 0; nx * nx + ny * ny <= 4 && i < _maxIterations; ++i) {
val = nx * nx - ny * ny + fx;
ny = 2 * nx * ny + fy;
nx = val;
res += exp(-sqrt(nx * nx + ny * ny));
}
return res;
}
};
#endif // _GAMMA_UTILS_IMGALGORITHM_HPP
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/07_oneDPL_Library/gamma-correction/src/utils/Other.hpp | //==============================================================
// Copyright © 2019 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#ifndef _GAMMA_UTILS_OTHER_HPP
#define _GAMMA_UTILS_OTHER_HPP
#include <chrono>
// function for time measuring
inline double get_time_in_sec() {
namespace ch = std::chrono;
return ch::time_point_cast<ch::milliseconds>(ch::steady_clock::now())
.time_since_epoch()
.count() *
1.e-3;
}
// function to check correctness
template <typename It>
bool check(It begin1, It end1, It begin2) {
for (; begin1 != end1; ++begin1, ++begin2) {
if (*begin1 != *begin2) return false;
}
return true;
}
#endif // _GAMMA_UTILS_OTHER_HPP
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/12_SYCL_Local_Memory_And_Atomics/src/localmem_info.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace cl::sycl;
int main() {
queue q;
//# Print the device info
std::cout << "device name : " << q.get_device().get_info<info::device::name>() << "\n";
std::cout << "local_mem_size: " << q.get_device().get_info<info::device::local_mem_size>() << "\n";
auto local_mem_type = q.get_device().get_info<info::device::local_mem_type>();
if(local_mem_type == info::local_mem_type::local)
std::cout << "local_mem_type: info::local_mem_type::local" << "\n";
else if(local_mem_type == info::local_mem_type::global)
std::cout << "local_mem_type: info::local_mem_type::global" << "\n";
else if(local_mem_type == info::local_mem_type::none)
std::cout << "local_mem_type: info::local_mem_type::none" << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/12_SYCL_Local_Memory_And_Atomics/src/atomics_lab.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
auto data = malloc_shared<int>(N, q);
for (int i = 0; i < N; i++) data[i] = i;
auto min = malloc_shared<int>(1, q);
auto max = malloc_shared<int>(1, q);
min[0] = 0;
max[0] = 0;
//# Reduction Kernel using atomics
q.parallel_for(N, [=](auto i) {
//# STEP 1: create atomic reference for min and max
auto min_atomic = atomic_ref<int, memory_order::relaxed, memory_scope::device, access::address_space::global_space>(min[0]);
auto max_atomic = atomic_ref<int, memory_order::relaxed, memory_scope::device, access::address_space::global_space>(max[0]);
//# STEP 2: add atomic operation for min and max computation
min_atomic.fetch_min(data[i]);
max_atomic.fetch_max(data[i]);
}).wait();
auto mid = 0.0;
//# STEP 3: Compute mid-range using the min and max
mid = (min[0] + max[0]) / 2.0;
std::cout << "Minimum = " << min[0] << "\n";
std::cout << "Maximum = " << max[0] << "\n";
std::cout << "Mid-Range = " << mid << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/12_SYCL_Local_Memory_And_Atomics/src/matrixmul_16x16.cpp | //==============================================================
// Copyright © 2021 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <iomanip>
using namespace sycl;
int main() {
size_t N = 16;
std::cout << "MATRIX_SIZE : " << N << "x" << N << std::endl;
//# Define vectors for matrices
std::vector<float> matrix_a(N*N);
std::vector<float> matrix_b(N*N);
std::vector<float> matrix_c(N*N);
std::vector<float> matrix_d(N*N);
//# Initialize matrices with values
float v1 = 2.f;
float v2 = 3.f;
for (int i=0; i<N; i++)
for (int j=0; j<N; j++){
matrix_a[i*N+j] = v1++;
matrix_b[i*N+j] = v2++;
matrix_c[i*N+j] = 0.f;
matrix_d[i*N+j] = 0.f;
}
//# Define queue with default device for offloading computation
queue q;
std::cout << "Offload Device : " << q.get_device().get_info<info::device::name>() << std::endl;
//# Create buffers for matrices
buffer a(matrix_a);
buffer b(matrix_b);
buffer c(matrix_c);
//# Submit command groups to execute on device
q.submit([&](handler &h){
//# Create accessors to copy buffers to the device
accessor A(a, h, read_only);
accessor B(b, h, read_only);
accessor C(c, h, write_only);
//# Define size for ND-range and work-group size
range<2> global_size(N,N);
range<2> work_group_size(N,N);
//# Parallel Compute Matrix Multiplication
h.parallel_for(nd_range<2>{global_size, work_group_size}, [=](nd_item<2> item){
const int i = item.get_global_id(0);
const int j = item.get_global_id(1);
//# matrix multiplication computation from local memory
float temp = 0.f;
for (int k = 0; k < N; k++) {
temp += A[i*N+k] * B[k*N+j];
}
C[i*N+j] = temp;
});
});
host_accessor ha(c, read_only);
//# Print Output and Verification
auto FAIL = 0;
for (int i=0; i<N; i++){
for (int j=0; j<N; j++){
for(int k=0; k<N; k++){
matrix_d[i*N+j] += matrix_a[i*N+k] * matrix_b[k*N+j];
}
if(matrix_d[i*N+j] != matrix_c[i*N+j]) FAIL = 1;
std::cout << std::setw(6) << matrix_c[i*N+j] << " ";
}
std::cout << "\n";
}
if(FAIL == 1) std::cout << "FAIL\n"; else std::cout << "PASS\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/12_SYCL_Local_Memory_And_Atomics/src/reduction_atomics_usm.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
auto data = malloc_shared<int>(N, q);
for (int i = 0; i < N; i++) data[i] = i;
auto sum = malloc_shared<int>(1, q);
sum[0] = 0;
//# Reduction Kernel using atomics
q.parallel_for(N, [=](auto i) {
auto sum_atomic = atomic_ref<int, memory_order::relaxed, memory_scope::device, access::address_space::global_space>(sum[0]);
sum_atomic += data[i];
}).wait();
std::cout << "Sum = " << sum[0] << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/12_SYCL_Local_Memory_And_Atomics/src/reduction_atomics_buffer.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
std::vector<int> data(N);
for (int i = 0; i < N; i++) data[i] = i;
int sum = 0;
{
//# create buffers for data and sum
buffer buf_data(data);
buffer buf_sum(&sum, range(1));
//# Reduction Kernel using atomics
q.submit([&](auto &h) {
accessor data_acc(buf_data, h, sycl::read_only);
accessor sum_acc(buf_sum, h);
h.parallel_for(N, [=](auto i) {
auto sum_atomic = atomic_ref<int, memory_order::relaxed, memory_scope::device, access::address_space::global_space>(sum_acc[0]);
sum_atomic += data_acc[i];
});
});
}
std::cout << "Sum = " << sum << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/12_SYCL_Local_Memory_And_Atomics/src/matrixmul_16x16_localmem.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <iomanip>
using namespace sycl;
int main() {
size_t N = 16;
std::cout << "MATRIX_SIZE : " << N << "x" << N << std::endl;
//# Define vectors for matrices
std::vector<float> matrix_a(N*N);
std::vector<float> matrix_b(N*N);
std::vector<float> matrix_c(N*N);
std::vector<float> matrix_d(N*N);
//# Initialize matrices with values
float v1 = 2.f;
float v2 = 3.f;
for (int i=0; i<N; i++)
for (int j=0; j<N; j++){
matrix_a[i*N+j] = v1++;
matrix_b[i*N+j] = v2++;
matrix_c[i*N+j] = 0.f;
matrix_d[i*N+j] = 0.f;
}
//# Define queue with default device for offloading computation
queue q;
std::cout << "Offload Device : " << q.get_device().get_info<info::device::name>() << std::endl;
//# Create buffers for matrices
buffer a(matrix_a);
buffer b(matrix_b);
buffer c(matrix_c);
//# Submit command groups to execute on device
q.submit([&](handler &h){
//# Create accessors to copy buffers to the device
accessor A(a, h, read_only);
accessor B(b, h, read_only);
accessor C(c, h, write_only);
//# Define size for ND-range and work-group size
range<2> global_size(N,N);
range<2> work_group_size(N,N);
//# Create local accessors
local_accessor<float, 2> A_local(range<2>(N, N), h);
local_accessor<float, 2> B_local(range<2>(N, N), h);
//# Parallel Compute Matrix Multiplication
h.parallel_for(nd_range<2>{global_size, work_group_size}, [=](nd_item<2> item){
const int i = item.get_global_id(0);
const int j = item.get_global_id(1);
const int x = item.get_local_id(0);
const int y = item.get_local_id(1);
//# copy from global to local memory
A_local[x][y] = A[i * N + j];
B_local[x][y] = B[i * N + j];
//# barrier to sychronize local memory copy across all work items
group_barrier(item.get_group());
//# matrix multiplication computation from local memory
float temp = 0.f;
for (int k = 0; k < N; k++) {
temp += A_local[x][k] * B_local[k][y];
}
C[i*N+j] = temp;
});
});
host_accessor ha(c, read_only);
//# Print Output and Verification
auto FAIL = 0;
for (int i=0; i<N; i++){
for (int j=0; j<N; j++){
for(int k=0; k<N; k++){
matrix_d[i*N+j] += matrix_a[i*N+k] * matrix_b[k*N+j];
}
if(matrix_d[i*N+j] != matrix_c[i*N+j]) FAIL = 1;
std::cout << std::setw(6) << matrix_c[i*N+j] << " ";
}
std::cout << "\n";
}
if(FAIL == 1) std::cout << "FAIL\n"; else std::cout << "PASS\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/12_SYCL_Local_Memory_And_Atomics/lab/localmem_info.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
int main() {
queue q;
//# Print the device info
std::cout << "device name : " << q.get_device().get_info<info::device::name>() << "\n";
std::cout << "local_mem_size: " << q.get_device().get_info<info::device::local_mem_size>() << "\n";
auto local_mem_type = q.get_device().get_info<info::device::local_mem_type>();
if(local_mem_type == info::local_mem_type::local)
std::cout << "local_mem_type: info::local_mem_type::local" << "\n";
else if(local_mem_type == info::local_mem_type::global)
std::cout << "local_mem_type: info::local_mem_type::global" << "\n";
else if(local_mem_type == info::local_mem_type::none)
std::cout << "local_mem_type: info::local_mem_type::none" << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/12_SYCL_Local_Memory_And_Atomics/lab/atomics_lab.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
auto data = malloc_shared<int>(N, q);
for (int i = 0; i < N; i++) data[i] = i;
auto min = malloc_shared<int>(1, q);
auto max = malloc_shared<int>(1, q);
min[0] = 0;
max[0] = 0;
//# Reduction Kernel using atomics
q.parallel_for(N, [=](auto i) {
//# STEP 1: create atomic reference for min and max
//# YOUR CODE GOES HERE
//# STEP 2: add atomic operation for min and max computation
//# YOUR CODE GOES HERE
}).wait();
auto mid = 0.0;
//# STEP 3: Compute mid-range using the min and max
//# YOUR CODE GOES HERE
std::cout << "Minimum = " << min[0] << "\n";
std::cout << "Maximum = " << max[0] << "\n";
std::cout << "Mid-Range = " << mid << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/12_SYCL_Local_Memory_And_Atomics/lab/matrixmul_16x16.cpp | //==============================================================
// Copyright © 2021 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <iomanip>
using namespace sycl;
int main() {
size_t N = 16;
std::cout << "MATRIX_SIZE : " << N << "x" << N << std::endl;
//# Define vectors for matrices
std::vector<float> matrix_a(N*N);
std::vector<float> matrix_b(N*N);
std::vector<float> matrix_c(N*N);
std::vector<float> matrix_d(N*N);
//# Initialize matrices with values
float v1 = 2.f;
float v2 = 3.f;
for (int i=0; i<N; i++)
for (int j=0; j<N; j++){
matrix_a[i*N+j] = v1++;
matrix_b[i*N+j] = v2++;
matrix_c[i*N+j] = 0.f;
matrix_d[i*N+j] = 0.f;
}
//# Define queue with default device for offloading computation
queue q;
std::cout << "Offload Device : " << q.get_device().get_info<info::device::name>() << std::endl;
//# Create buffers for matrices
buffer a(matrix_a);
buffer b(matrix_b);
buffer c(matrix_c);
//# Submit command groups to execute on device
q.submit([&](handler &h){
//# Create accessors to copy buffers to the device
accessor A(a, h, read_only);
accessor B(b, h, read_only);
accessor C(c, h, write_only);
//# Define size for ND-range and work-group size
range<2> global_size(N,N);
range<2> work_group_size(N,N);
//# Parallel Compute Matrix Multiplication
h.parallel_for(nd_range<2>{global_size, work_group_size}, [=](nd_item<2> item){
const int i = item.get_global_id(0);
const int j = item.get_global_id(1);
//# matrix multiplication computation from local memory
float temp = 0.f;
for (int k = 0; k < N; k++) {
temp += A[i*N+k] * B[k*N+j];
}
C[i*N+j] = temp;
});
});
host_accessor ha(c, read_only);
//# Print Output and Verification
auto FAIL = 0;
for (int i=0; i<N; i++){
for (int j=0; j<N; j++){
for(int k=0; k<N; k++){
matrix_d[i*N+j] += matrix_a[i*N+k] * matrix_b[k*N+j];
}
if(matrix_d[i*N+j] != matrix_c[i*N+j]) FAIL = 1;
std::cout << std::setw(6) << matrix_c[i*N+j] << " ";
}
std::cout << "\n";
}
if(FAIL == 1) std::cout << "FAIL\n"; else std::cout << "PASS\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/12_SYCL_Local_Memory_And_Atomics/lab/reduction_atomics_usm.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
auto data = malloc_shared<int>(N, q);
for (int i = 0; i < N; i++) data[i] = i;
auto sum = malloc_shared<int>(1, q);
sum[0] = 0;
//# Reduction Kernel using atomics
q.parallel_for(N, [=](auto i) {
auto sum_atomic = atomic_ref<int, memory_order::relaxed, memory_scope::device, access::address_space::global_space>(sum[0]);
sum_atomic += data[i];
}).wait();
std::cout << "Sum = " << sum[0] << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/12_SYCL_Local_Memory_And_Atomics/lab/reduction_atomics_buffer.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
std::vector<int> data(N);
for (int i = 0; i < N; i++) data[i] = i;
int sum = 0;
{
//# create buffers for data and sum
buffer buf_data(data);
buffer buf_sum(&sum, range(1));
//# Reduction Kernel using atomics
q.submit([&](auto &h) {
accessor data_acc(buf_data, h, sycl::read_only);
accessor sum_acc(buf_sum, h);
h.parallel_for(N, [=](auto i) {
auto sum_atomic = atomic_ref<int, memory_order::relaxed, memory_scope::device, access::address_space::global_space>(sum_acc[0]);
sum_atomic += data_acc[i];
});
});
}
std::cout << "Sum = " << sum << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/12_SYCL_Local_Memory_And_Atomics/lab/matrixmul_16x16_localmem.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <iomanip>
using namespace sycl;
int main() {
size_t N = 16;
std::cout << "MATRIX_SIZE : " << N << "x" << N << std::endl;
//# Define vectors for matrices
std::vector<float> matrix_a(N*N);
std::vector<float> matrix_b(N*N);
std::vector<float> matrix_c(N*N);
std::vector<float> matrix_d(N*N);
//# Initialize matrices with values
float v1 = 2.f;
float v2 = 3.f;
for (int i=0; i<N; i++)
for (int j=0; j<N; j++){
matrix_a[i*N+j] = v1++;
matrix_b[i*N+j] = v2++;
matrix_c[i*N+j] = 0.f;
matrix_d[i*N+j] = 0.f;
}
//# Define queue with default device for offloading computation
queue q;
std::cout << "Offload Device : " << q.get_device().get_info<info::device::name>() << std::endl;
//# Create buffers for matrices
buffer a(matrix_a);
buffer b(matrix_b);
buffer c(matrix_c);
//# Submit command groups to execute on device
q.submit([&](handler &h){
//# Create accessors to copy buffers to the device
accessor A(a, h, read_only);
accessor B(b, h, read_only);
accessor C(c, h, write_only);
//# Define size for ND-range and work-group size
range<2> global_size(N,N);
range<2> work_group_size(N,N);
//# Create local accessors
local_accessor<float, 2> A_local(range<2>(N, N), h);
local_accessor<float, 2> B_local(range<2>(N, N), h);
//# Parallel Compute Matrix Multiplication
h.parallel_for(nd_range<2>{global_size, work_group_size}, [=](nd_item<2> item){
const int i = item.get_global_id(0);
const int j = item.get_global_id(1);
const int x = item.get_local_id(0);
const int y = item.get_local_id(1);
//# copy from global to local memory
A_local[x][y] = A[i * N + j];
B_local[x][y] = B[i * N + j];
//# barrier to sychronize local memory copy across all work items
group_barrier(item.get_group());
//# matrix multiplication computation from local memory
float temp = 0.f;
for (int k = 0; k < N; k++) {
temp += A_local[x][k] * B_local[k][y];
}
C[i*N+j] = temp;
});
});
host_accessor ha(c, read_only);
//# Print Output and Verification
auto FAIL = 0;
for (int i=0; i<N; i++){
for (int j=0; j<N; j++){
for(int k=0; k<N; k++){
matrix_d[i*N+j] += matrix_a[i*N+k] * matrix_b[k*N+j];
}
if(matrix_d[i*N+j] != matrix_c[i*N+j]) FAIL = 1;
std::cout << std::setw(6) << matrix_c[i*N+j] << " ";
}
std::cout << "\n";
}
if(FAIL == 1) std::cout << "FAIL\n"; else std::cout << "PASS\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/src/USM_implicit.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
int *host_array = malloc_host<int>(N, Q);
int *shared_array = malloc_shared<int>(N, Q);
for (int i = 0; i < N; i++) {
// Initialize hostArray on host
host_array[i] = i;
}
// Submit the queue
Q.submit([&](handler &h) {
h.parallel_for(N, [=](id<1> i) {
// access sharedArray and hostArray on device
shared_array[i] = host_array[i] + 1;
});
});
Q.wait();
for (int i = 0; i < N; i++) {
// access sharedArray on host
host_array[i] = shared_array[i];
}
free(shared_array, Q);
free(host_array, Q);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/src/event_graphs.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
#include <sycl/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
int *data = malloc_shared<int>(N, Q);
auto e = Q.parallel_for(N, [=](id<1> i) { data[i] = 1; });
Q.submit([&](handler &h) {
h.depends_on(e);
h.single_task([=]() {
for (int i = 1; i < N; i++)
data[0] += data[i];
});
});
Q.wait();
assert(data[0] == N);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/src/accessors_WAR_WAW.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: MIT
#include <sycl/sycl.hpp>
#include <array>
using namespace sycl;
constexpr int N = 42;
int main() {
std::array<int,N> a, b;
for (int i = 0; i < N; i++) {
a[i] = b[i] = 0;
}
queue Q;
buffer A{a};
buffer B{b};
Q.submit([&](handler &h) {
accessor accA(A, h, read_only);
accessor accB(B, h, write_only);
h.parallel_for( // computeB
N, [=](id<1> i) {
accB[i] = accA[i] + 1;
});
});
Q.submit([&](handler &h) {
// WAR of buffer A
accessor accA(A, h, write_only);
h.parallel_for( // rewriteA
N, [=](id<1> i) {
accA[i] = 21 + 21;
});
});
Q.submit([&](handler &h) {
// WAW of buffer B
accessor accB(B, h, write_only);
h.parallel_for( // rewriteB
N, [=](id<1> i) {
accB[i] = 30 + 12;
});
});
host_accessor host_accA(A, read_only);
host_accessor host_accB(B, read_only);
for (int i = 0; i < N; i++) {
std::cout << host_accA[i] << " " << host_accB[i] << " ";
}
std::cout << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/src/y_pattern_events.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
int *data1 = malloc_shared<int>(N, Q);
int *data2 = malloc_shared<int>(N, Q);
auto e1 = Q.parallel_for(N, [=](id<1> i) { data1[i] = 1; });
auto e2 = Q.parallel_for(N, [=](id<1> i) { data2[i] = 2; });
auto e3 = Q.parallel_for(range{N}, {e1, e2},
[=](id<1> i) { data1[i] += data2[i]; });
Q.single_task(e3, [=]() {
for (int i = 1; i < N; i++)
data1[0] += data1[i];
data1[0] /= 3;
});
Q.wait();
assert(data1[0] == N);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/src/accessors_sample.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <cassert>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
// Create 3 buffers of 42 ints
buffer<int> A{range{N}};
buffer<int> B{range{N}};
buffer<int> C{range{N}};
Q.submit([&](handler &h) {
// create device accessors
accessor aA{A, h, write_only, no_init};
accessor aB{B, h, write_only, no_init};
accessor aC{C, h, write_only, no_init};
h.parallel_for(N, [=](id<1> i) {
aA[i] = 1;
aB[i] = 40;
aC[i] = 0;
});
});
Q.submit([&](handler &h) {
// create device accessors
accessor aA{A, h, read_only};
accessor aB{B, h, read_only};
accessor aC{C, h, read_write};
h.parallel_for(N, [=](id<1> i) { aC[i] += aA[i] + aB[i]; });
});
host_accessor result{C, read_only};
for (int i = 0; i < N; i++) std::cout << result[i] << " ";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/src/linear_buffers_graphs.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
buffer<int> data{range{N}};
Q.submit([&](handler &h) {
accessor a{data, h};
h.parallel_for(N, [=](id<1> i) { a[i] = 1; });
});
Q.submit([&](handler &h) {
accessor a{data, h};
h.single_task([=]() {
for (int i = 1; i < N; i++)
a[0] += a[i];
});
});
host_accessor h_a{data};
assert(h_a[0] == N);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/src/y_pattern_inorder_queues.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q{property::queue::in_order()};
int *data1 = malloc_shared<int>(N, Q);
int *data2 = malloc_shared<int>(N, Q);
Q.parallel_for(N, [=](id<1> i) { data1[i] = 1; });
Q.parallel_for(N, [=](id<1> i) { data2[i] = 2; });
Q.parallel_for(N, [=](id<1> i) { data1[i] += data2[i]; });
Q.single_task([=]() {
for (int i = 1; i < N; i++)
data1[0] += data1[i];
data1[0] /= 3;
});
Q.wait();
assert(data1[0] == N);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/src/Linear_inorder_queues.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q{property::queue::in_order()};
int *data = malloc_shared<int>(N, Q);
Q.parallel_for(N, [=](id<1> i) { data[i] = 1; });
Q.single_task([=]() {
for (int i = 1; i < N; i++)
data[0] += data[i];
});
Q.wait();
assert(data[0] == N);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/src/task_scheduling.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
constexpr int N = 256;
int main() {
queue q;
//# 3 vectors initialized to values
std::vector<int> data1(N, 1);
std::vector<int> data2(N, 2);
std::vector<int> data3(N, 3);
//# STEP 1 : Create buffers for data1, data2 and data3
buffer data1_buf(data1);
buffer data2_buf(data2);
buffer data3_buf(data3);
//# STEP 2 : Create a kernel to update data1 += data3, set accessor permissions
q.submit([&](handler &h) {
accessor a{data1_buf, h};
accessor b{data3_buf, h, read_only};
h.parallel_for(N, [=](auto i) { a[i] += b[i]; });
});
//# STEP 3 : Create a kernel to update data2 *= 2, set accessor permissions
q.submit([&](handler &h) {
accessor a{data2_buf, h};
h.parallel_for(N, [=](auto i) { a[i] *= 2; });
});
//# STEP 4 : Create a kernel to update data3 = data1 + data2, set accessor permissions
q.submit([&](handler &h) {
accessor a{data3_buf, h, write_only};
accessor b{data1_buf, h, read_only};
accessor c{data2_buf, h, read_only};
h.parallel_for(N, [=](auto i) { a[i] = b[i] + c[i]; });
});
//# STEP 5 : Create a host accessor to copy back data3
host_accessor h_a{data3_buf};
std::cout << "Output = " << data3[0] << "\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/src/linear_event_graphs.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
int *data = malloc_shared<int>(N, Q);
auto e = Q.parallel_for(N, [=](id<1> i) { data[i] = 1; });
Q.submit([&](handler &h) {
h.depends_on(e);
h.single_task([=]() {
for (int i = 1; i < N; i++)
data[0] += data[i];
});
});
Q.wait();
assert(data[0] == N);
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/10_SYCL_Graphs_Scheduling_Data_management/src/USM_explicit.cpp | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include<array>
using namespace sycl;
constexpr int N = 42;
int main() {
queue Q;
std::array<int,N> host_array;
int *device_array = malloc_device<int>(N, Q);
for (int i = 0; i < N; i++)
host_array[i] = N;
// Submit the queue
Q.submit([&](handler &h) {
// copy hostArray to deviceArray
h.memcpy(device_array, &host_array[0], N * sizeof(int));
});
Q.wait();
Q.submit([&](handler &h) {
h.parallel_for(N, [=](id<1> i) { device_array[i]++; });
});
Q.wait();
Q.submit([&](handler &h) {
// copy deviceArray back to hostArray
h.memcpy(&host_array[0], device_array, N * sizeof(int));
});
Q.wait();
free(device_array, Q);
return 0;
}
| cpp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.