repo_name
stringclasses
10 values
file_path
stringlengths
29
222
content
stringlengths
24
926k
extention
stringclasses
5 values
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_matrix_mul_SYCLMigration/02_sycl_dpct_migrated/Common/helper_functions.h
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // These are helper functions for the SDK samples (string parsing, // timers, image helpers, etc) #ifndef COMMON_HELPER_FUNCTIONS_H_ #define COMMON_HELPER_FUNCTIONS_H_ #ifdef WIN32 #pragma warning(disable : 4996) #endif // includes, project #include <assert.h> #include <exception.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <algorithm> #include <fstream> #include <iostream> #include <string> #include <vector> // includes, timer, string parsing, image helpers #include <helper_image.h> // helper functions for image compare, dump, data comparisons #include <helper_string.h> // helper functions for string parsing #include <helper_timer.h> // helper functions for timers #ifndef EXIT_WAIVED #define EXIT_WAIVED 2 #endif #endif // COMMON_HELPER_FUNCTIONS_H_
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_matrix_mul_SYCLMigration/02_sycl_dpct_migrated/src/matrixMulCUBLAS.cpp
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Matrix multiplication: C = A * B. * Host code. * * This sample implements matrix multiplication as described in Chapter 3 * of the programming guide and uses the CUBLAS library to demonstrate * the best performance. * SOME PRECAUTIONS: * IF WE WANT TO CALCULATE ROW-MAJOR MATRIX MULTIPLY C = A * B, * WE JUST NEED CALL CUBLAS API IN A REVERSE ORDER: cublasSegemm(B, A)! * The reason is explained as follows: * CUBLAS library uses column-major storage, but C/C++ use row-major storage. * When passing the matrix pointer to CUBLAS, the memory layout alters from * row-major to column-major, which is equivalent to an implicit transpose. * In the case of row-major C/C++ matrix A, B, and a simple matrix multiplication * C = A * B, we can't use the input order like cublasSgemm(A, B) because of * implicit transpose. The actual result of cublasSegemm(A, B) is A(T) * B(T). * If col(A(T)) != row(B(T)), equal to row(A) != col(B), A(T) and B(T) are not * multipliable. Moreover, even if A(T) and B(T) are multipliable, the result C * is a column-based cublas matrix, which means C(T) in C/C++, we need extra * transpose code to convert it to a row-based C/C++ matrix. * To solve the problem, let's consider our desired result C, a row-major matrix. * In cublas format, it is C(T) actually (because of the implicit transpose). * C = A * B, so C(T) = (A * B) (T) = B(T) * A(T). Cublas matrice B(T) and A(T) * happen to be C/C++ matrice B and A (still because of the implicit transpose)! * We don't need extra transpose code, we only need alter the input order! * * CUBLAS provides high-performance matrix multiplication. * See also: * V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra," * in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08), * Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11. */ // Utilities and system includes #include <sycl/sycl.hpp> #include <dpct/dpct.hpp> #include <assert.h> #include <helper_string.h> #include <oneapi/mkl.hpp> #include <dpct/blas_utils.hpp> // helper for shared functions common to CUDA Samples // CUDA runtime // CUDA and CUBLAS functions #include <helper_functions.h> #include <helper_cuda.h> #include <cmath> #include <chrono> #ifndef min #define min(a, b) ((a < b) ? a : b) #endif #ifndef max #define max(a, b) ((a > b) ? a : b) #endif dpct::device_ext &dev_ct1 = dpct::get_current_device(); sycl::queue &q_ct1 = dev_ct1.default_queue(); // Optional Command-line multiplier for matrix sizes typedef struct _matrixSize { unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC; } sMatrixSize; //////////////////////////////////////////////////////////////////////////////// //! Compute reference data set matrix multiply on CPU //! C = A * B //! @param C reference data, computed but preallocated //! @param A matrix A as provided to device //! @param B matrix B as provided to device //! @param hA height of matrix A //! @param wB width of matrix B //////////////////////////////////////////////////////////////////////////////// void matrixMulCPU(float *C, const float *A, const float *B, unsigned int hA, unsigned int wA, unsigned int wB) { for (unsigned int i = 0; i < hA; ++i) for (unsigned int j = 0; j < wB; ++j) { double sum = 0; for (unsigned int k = 0; k < wA; ++k) { double a = A[i * wA + k]; double b = B[k * wB + j]; sum += a * b; } C[i * wB + j] = (float)sum; } } // Allocates a matrix with random float entries. void randomInit(float *data, int size) { for (int i = 0; i < size; ++i) data[i] = rand() / (float)RAND_MAX; } void printDiff(float *data1, float *data2, int width, int height, int iListLength, float fListTol) { printf("Listing first %d Differences > %.6f...\n", iListLength, fListTol); int i, j, k; int error_count = 0; for (j = 0; j < height; j++) { if (error_count < iListLength) { printf("\n Row %d:\n", j); } for (i = 0; i < width; i++) { k = j * width + i; float fDiff = fabs(data1[k] - data2[k]); if (fDiff > fListTol) { if (error_count < iListLength) { printf(" Loc(%d,%d)\tCPU=%.5f\tGPU=%.5f\tDiff=%.6f\n", i, j, data1[k], data2[k], fDiff); } error_count++; } } } printf(" \n Total Errors = %d\n", error_count); } void initializeCUDA(int argc, char **argv, int &devID, int &iSizeMultiple, sMatrixSize &matrix_size) try { // By default, we use device 0, otherwise we override the device ID based on // what is provided at the command line int error; devID = 0; // devID = findCudaDevice(argc, (const char **)argv); if (checkCmdLineFlag(argc, (const char **)argv, "sizemult")) { iSizeMultiple = getCmdLineArgumentInt(argc, (const char **)argv, "sizemult"); } iSizeMultiple = min(iSizeMultiple, 10); iSizeMultiple = max(iSizeMultiple, 1); dpct::device_info deviceProp; dpct::dev_mgr::instance().get_device(devID).get_device_info(deviceProp); printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.get_name(), deviceProp.get_major_version(), deviceProp.get_minor_version()); int block_size = 32; matrix_size.uiWA = 3 * block_size * iSizeMultiple; matrix_size.uiHA = 4 * block_size * iSizeMultiple; matrix_size.uiWB = 2 * block_size * iSizeMultiple; matrix_size.uiHB = 3 * block_size * iSizeMultiple; matrix_size.uiWC = 2 * block_size * iSizeMultiple; matrix_size.uiHC = 4 * block_size * iSizeMultiple; printf("MatrixA(%u,%u), MatrixB(%u,%u), MatrixC(%u,%u)\n", matrix_size.uiHA, matrix_size.uiWA, matrix_size.uiHB, matrix_size.uiWB, matrix_size.uiHC, matrix_size.uiWC); if (matrix_size.uiWA != matrix_size.uiHB || matrix_size.uiHA != matrix_size.uiHC || matrix_size.uiWB != matrix_size.uiWC) { printf("ERROR: Matrix sizes do not match!\n"); exit(-1); } } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test matrix multiply using CUBLAS //////////////////////////////////////////////////////////////////////////////// int matrixMultiply(int argc, char **argv, int devID, sMatrixSize &matrix_size) { dpct::device_info deviceProp; dpct::dev_mgr::instance().get_device(devID).get_device_info(deviceProp); int block_size = 32; // set seed for rand() srand(2006); // allocate host memory for matrices A and B unsigned int size_A = matrix_size.uiWA * matrix_size.uiHA; unsigned int mem_size_A = sizeof(float) * size_A; float *h_A = (float *)malloc(mem_size_A); unsigned int size_B = matrix_size.uiWB * matrix_size.uiHB; unsigned int mem_size_B = sizeof(float) * size_B; float *h_B = (float *)malloc(mem_size_B); // set seed for rand() srand(2006); // initialize host memory randomInit(h_A, size_A); randomInit(h_B, size_B); // allocate device memory float *d_A, *d_B, *d_C; unsigned int size_C = matrix_size.uiWC * matrix_size.uiHC; unsigned int mem_size_C = sizeof(float) * size_C; // allocate host memory for the result float *h_C = (float *)malloc(mem_size_C); float *h_CUBLAS = (float *)malloc(mem_size_C); d_A = (float *)sycl::malloc_device(mem_size_A, dpct::get_default_queue()); d_B = (float *)sycl::malloc_device(mem_size_B, dpct::get_default_queue()); dpct::get_default_queue().memcpy(d_A, h_A, mem_size_A).wait(); dpct::get_default_queue().memcpy(d_B, h_B, mem_size_B).wait(); d_C = (float *)sycl::malloc_device(mem_size_C, dpct::get_default_queue()); // setup execution parameters sycl::range<3> threads(1, block_size, block_size); sycl::range<3> grid(1, matrix_size.uiHC / threads[1], matrix_size.uiWC / threads[2]); // create and start timer printf("Computing result using CUBLAS..."); // execute the kernel int nIter = 30; // CUBLAS version 2.0 { const float alpha = 1.0f; const float beta = 0.0f; sycl::queue *handle; dpct::event_ptr start, stop; std::chrono::time_point<std::chrono::steady_clock> start_ct1; std::chrono::time_point<std::chrono::steady_clock> stop_ct1; handle = &dpct::get_default_queue(); // Perform warmup operation with cublas oneapi::mkl::blas::column_major::gemm( *handle, oneapi::mkl::transpose::nontrans, oneapi::mkl::transpose::nontrans, matrix_size.uiWB, matrix_size.uiHA, matrix_size.uiWA, alpha, d_B, matrix_size.uiWB, d_A, matrix_size.uiWA, beta, d_C, matrix_size.uiWB); // Allocate CUDA events that we'll use for timing start = new sycl::event(); stop = new sycl::event(); // Record the start event start_ct1 = std::chrono::steady_clock::now(); for (int j = 0; j < nIter; j++) { // note cublas is column primary! // need to transpose the order oneapi::mkl::blas::column_major::gemm( *handle, oneapi::mkl::transpose::nontrans, oneapi::mkl::transpose::nontrans, matrix_size.uiWB, matrix_size.uiHA, matrix_size.uiWA, alpha, d_B, matrix_size.uiWB, d_A, matrix_size.uiWA, beta, d_C, matrix_size.uiWB); } printf("done.\n"); // Record the stop event stop_ct1 = std::chrono::steady_clock::now(); // Wait for the stop event to complete float msecTotal = 0.0f; msecTotal = std::chrono::duration<float, std::milli>( stop_ct1 - start_ct1) .count(); // Compute and print the performance float msecPerMatrixMul = msecTotal / nIter; double flopsPerMatrixMul = 2.0 * (double)matrix_size.uiHC * (double)matrix_size.uiWC * (double)matrix_size.uiHB; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf("Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul); // copy result from device to host dpct::get_default_queue().memcpy(h_CUBLAS, d_C, mem_size_C).wait(); // Destroy the handle handle = nullptr; } // compute reference solution printf("Computing result using host CPU..."); float *reference = (float *)malloc(mem_size_C); matrixMulCPU(reference, h_A, h_B, matrix_size.uiHA, matrix_size.uiWA, matrix_size.uiWB); printf("done.\n"); // check result (CUBLAS) bool resCUBLAS = sdkCompareL2fe(reference, h_CUBLAS, size_C, 1.0e-6f); if (resCUBLAS != true) { printDiff(reference, h_CUBLAS, matrix_size.uiWC, matrix_size.uiHC, 100, 1.0e-5f); } printf("Comparing CUBLAS Matrix Multiply with CPU results: %s\n", (true == resCUBLAS) ? "PASS" : "FAIL"); printf( "\nNOTE: The CUDA Samples are not meant for performance measurements. " "Results may vary when GPU Boost is enabled.\n"); // clean up memory free(h_A); free(h_B); free(h_C); free(reference); sycl::free(d_A, dpct::get_default_queue()); sycl::free(d_B, dpct::get_default_queue()); sycl::free(d_C, dpct::get_default_queue()); if (resCUBLAS == true) { return EXIT_SUCCESS; // return value = 1 } else { return EXIT_FAILURE; // return value = 0 } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { printf("[Matrix Multiply CUBLAS] - Starting...\n"); int devID = 0, sizeMult = 5; sMatrixSize matrix_size; initializeCUDA(argc, argv, devID, sizeMult, matrix_size); int matrix_result = matrixMultiply(argc, argv, devID, matrix_size); return matrix_result; }
cpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/Common/helper_timer.h
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Helper Timing Functions #ifndef COMMON_HELPER_TIMER_H_ #define COMMON_HELPER_TIMER_H_ #ifndef EXIT_WAIVED #define EXIT_WAIVED 2 #endif // includes, system #include <vector> // includes, project #include <exception.h> // Definition of the StopWatch Interface, this is used if we don't want to use // the CUT functions But rather in a self contained class interface class StopWatchInterface { public: StopWatchInterface() {} virtual ~StopWatchInterface() {} public: //! Start time measurement virtual void start() = 0; //! Stop time measurement virtual void stop() = 0; //! Reset time counters to zero virtual void reset() = 0; //! Time in msec. after start. If the stop watch is still running (i.e. there //! was no call to stop()) then the elapsed time is returned, otherwise the //! time between the last start() and stop call is returned virtual float getTime() = 0; //! Mean time to date based on the number of times the stopwatch has been //! _stopped_ (ie finished sessions) and the current total time virtual float getAverageTime() = 0; }; ////////////////////////////////////////////////////////////////// // Begin Stopwatch timer class definitions for all OS platforms // ////////////////////////////////////////////////////////////////// #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) // includes, system #define WINDOWS_LEAN_AND_MEAN #include <windows.h> #undef min #undef max //! Windows specific implementation of StopWatch class StopWatchWin : public StopWatchInterface { public: //! Constructor, default StopWatchWin() : start_time(), end_time(), diff_time(0.0f), total_time(0.0f), running(false), clock_sessions(0), freq(0), freq_set(false) { if (!freq_set) { // helper variable LARGE_INTEGER temp; // get the tick frequency from the OS QueryPerformanceFrequency(reinterpret_cast<LARGE_INTEGER *>(&temp)); // convert to type in which it is needed freq = (static_cast<double>(temp.QuadPart)) / 1000.0; // rememeber query freq_set = true; } } // Destructor ~StopWatchWin() {} public: //! Start time measurement inline void start(); //! Stop time measurement inline void stop(); //! Reset time counters to zero inline void reset(); //! Time in msec. after start. If the stop watch is still running (i.e. there //! was no call to stop()) then the elapsed time is returned, otherwise the //! time between the last start() and stop call is returned inline float getTime(); //! Mean time to date based on the number of times the stopwatch has been //! _stopped_ (ie finished sessions) and the current total time inline float getAverageTime(); private: // member variables //! Start of measurement LARGE_INTEGER start_time; //! End of measurement LARGE_INTEGER end_time; //! Time difference between the last start and stop float diff_time; //! TOTAL time difference between starts and stops float total_time; //! flag if the stop watch is running bool running; //! Number of times clock has been started //! and stopped to allow averaging int clock_sessions; //! tick frequency double freq; //! flag if the frequency has been set bool freq_set; }; // functions, inlined //////////////////////////////////////////////////////////////////////////////// //! Start time measurement //////////////////////////////////////////////////////////////////////////////// inline void StopWatchWin::start() { QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time)); running = true; } //////////////////////////////////////////////////////////////////////////////// //! Stop time measurement and increment add to the current diff_time summation //! variable. Also increment the number of times this clock has been run. //////////////////////////////////////////////////////////////////////////////// inline void StopWatchWin::stop() { QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&end_time)); diff_time = static_cast<float>(((static_cast<double>(end_time.QuadPart) - static_cast<double>(start_time.QuadPart)) / freq)); total_time += diff_time; clock_sessions++; running = false; } //////////////////////////////////////////////////////////////////////////////// //! Reset the timer to 0. Does not change the timer running state but does //! recapture this point in time as the current start time if it is running. //////////////////////////////////////////////////////////////////////////////// inline void StopWatchWin::reset() { diff_time = 0; total_time = 0; clock_sessions = 0; if (running) { QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time)); } } //////////////////////////////////////////////////////////////////////////////// //! Time in msec. after start. If the stop watch is still running (i.e. there //! was no call to stop()) then the elapsed time is returned added to the //! current diff_time sum, otherwise the current summed time difference alone //! is returned. //////////////////////////////////////////////////////////////////////////////// inline float StopWatchWin::getTime() { // Return the TOTAL time to date float retval = total_time; if (running) { LARGE_INTEGER temp; QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&temp)); retval += static_cast<float>(((static_cast<double>(temp.QuadPart) - static_cast<double>(start_time.QuadPart)) / freq)); } return retval; } //////////////////////////////////////////////////////////////////////////////// //! Time in msec. for a single run based on the total number of COMPLETED runs //! and the total time. //////////////////////////////////////////////////////////////////////////////// inline float StopWatchWin::getAverageTime() { return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f; } #else // Declarations for Stopwatch on Linux and Mac OSX // includes, system #include <sys/time.h> #include <ctime> //! Windows specific implementation of StopWatch class StopWatchLinux : public StopWatchInterface { public: //! Constructor, default StopWatchLinux() : start_time(), diff_time(0.0), total_time(0.0), running(false), clock_sessions(0) {} // Destructor virtual ~StopWatchLinux() {} public: //! Start time measurement inline void start(); //! Stop time measurement inline void stop(); //! Reset time counters to zero inline void reset(); //! Time in msec. after start. If the stop watch is still running (i.e. there //! was no call to stop()) then the elapsed time is returned, otherwise the //! time between the last start() and stop call is returned inline float getTime(); //! Mean time to date based on the number of times the stopwatch has been //! _stopped_ (ie finished sessions) and the current total time inline float getAverageTime(); private: // helper functions //! Get difference between start time and current time inline float getDiffTime(); private: // member variables //! Start of measurement struct timeval start_time; //! Time difference between the last start and stop float diff_time; //! TOTAL time difference between starts and stops float total_time; //! flag if the stop watch is running bool running; //! Number of times clock has been started //! and stopped to allow averaging int clock_sessions; }; // functions, inlined //////////////////////////////////////////////////////////////////////////////// //! Start time measurement //////////////////////////////////////////////////////////////////////////////// inline void StopWatchLinux::start() { gettimeofday(&start_time, 0); running = true; } //////////////////////////////////////////////////////////////////////////////// //! Stop time measurement and increment add to the current diff_time summation //! variable. Also increment the number of times this clock has been run. //////////////////////////////////////////////////////////////////////////////// inline void StopWatchLinux::stop() { diff_time = getDiffTime(); total_time += diff_time; running = false; clock_sessions++; } //////////////////////////////////////////////////////////////////////////////// //! Reset the timer to 0. Does not change the timer running state but does //! recapture this point in time as the current start time if it is running. //////////////////////////////////////////////////////////////////////////////// inline void StopWatchLinux::reset() { diff_time = 0; total_time = 0; clock_sessions = 0; if (running) { gettimeofday(&start_time, 0); } } //////////////////////////////////////////////////////////////////////////////// //! Time in msec. after start. If the stop watch is still running (i.e. there //! was no call to stop()) then the elapsed time is returned added to the //! current diff_time sum, otherwise the current summed time difference alone //! is returned. //////////////////////////////////////////////////////////////////////////////// inline float StopWatchLinux::getTime() { // Return the TOTAL time to date float retval = total_time; if (running) { retval += getDiffTime(); } return retval; } //////////////////////////////////////////////////////////////////////////////// //! Time in msec. for a single run based on the total number of COMPLETED runs //! and the total time. //////////////////////////////////////////////////////////////////////////////// inline float StopWatchLinux::getAverageTime() { return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f; } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// inline float StopWatchLinux::getDiffTime() { struct timeval t_time; gettimeofday(&t_time, 0); // time difference in milli-seconds return static_cast<float>(1000.0 * (t_time.tv_sec - start_time.tv_sec) + (0.001 * (t_time.tv_usec - start_time.tv_usec))); } #endif // WIN32 //////////////////////////////////////////////////////////////////////////////// //! Timer functionality exported //////////////////////////////////////////////////////////////////////////////// //! Create a new timer //! @return true if a time has been created, otherwise false //! @param name of the new timer, 0 if the creation failed //////////////////////////////////////////////////////////////////////////////// inline bool sdkCreateTimer(StopWatchInterface **timer_interface) { // printf("sdkCreateTimer called object %08x\n", (void *)*timer_interface); #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) *timer_interface = reinterpret_cast<StopWatchInterface *>(new StopWatchWin()); #else *timer_interface = reinterpret_cast<StopWatchInterface *>(new StopWatchLinux()); #endif return (*timer_interface != NULL) ? true : false; } //////////////////////////////////////////////////////////////////////////////// //! Delete a timer //! @return true if a time has been deleted, otherwise false //! @param name of the timer to delete //////////////////////////////////////////////////////////////////////////////// inline bool sdkDeleteTimer(StopWatchInterface **timer_interface) { // printf("sdkDeleteTimer called object %08x\n", (void *)*timer_interface); if (*timer_interface) { delete *timer_interface; *timer_interface = NULL; } return true; } //////////////////////////////////////////////////////////////////////////////// //! Start the time with name \a name //! @param name name of the timer to start //////////////////////////////////////////////////////////////////////////////// inline bool sdkStartTimer(StopWatchInterface **timer_interface) { // printf("sdkStartTimer called object %08x\n", (void *)*timer_interface); if (*timer_interface) { (*timer_interface)->start(); } return true; } //////////////////////////////////////////////////////////////////////////////// //! Stop the time with name \a name. Does not reset. //! @param name name of the timer to stop //////////////////////////////////////////////////////////////////////////////// inline bool sdkStopTimer(StopWatchInterface **timer_interface) { // printf("sdkStopTimer called object %08x\n", (void *)*timer_interface); if (*timer_interface) { (*timer_interface)->stop(); } return true; } //////////////////////////////////////////////////////////////////////////////// //! Resets the timer's counter. //! @param name name of the timer to reset. //////////////////////////////////////////////////////////////////////////////// inline bool sdkResetTimer(StopWatchInterface **timer_interface) { // printf("sdkResetTimer called object %08x\n", (void *)*timer_interface); if (*timer_interface) { (*timer_interface)->reset(); } return true; } //////////////////////////////////////////////////////////////////////////////// //! Return the average time for timer execution as the total time //! for the timer dividied by the number of completed (stopped) runs the timer //! has made. //! Excludes the current running time if the timer is currently running. //! @param name name of the timer to return the time of //////////////////////////////////////////////////////////////////////////////// inline float sdkGetAverageTimerValue(StopWatchInterface **timer_interface) { // printf("sdkGetAverageTimerValue called object %08x\n", (void // *)*timer_interface); if (*timer_interface) { return (*timer_interface)->getAverageTime(); } else { return 0.0f; } } //////////////////////////////////////////////////////////////////////////////// //! Total execution time for the timer over all runs since the last reset //! or timer creation. //! @param name name of the timer to obtain the value of. //////////////////////////////////////////////////////////////////////////////// inline float sdkGetTimerValue(StopWatchInterface **timer_interface) { // printf("sdkGetTimerValue called object %08x\n", (void *)*timer_interface); if (*timer_interface) { return (*timer_interface)->getTime(); } else { return 0.0f; } } #endif // COMMON_HELPER_TIMER_H_
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/Common/helper_string.h
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // These are helper functions for the SDK samples (string parsing, timers, etc) #ifndef COMMON_HELPER_STRING_H_ #define COMMON_HELPER_STRING_H_ #include <stdio.h> #include <stdlib.h> #include <fstream> #include <string> #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) #ifndef _CRT_SECURE_NO_DEPRECATE #define _CRT_SECURE_NO_DEPRECATE #endif #ifndef STRCASECMP #define STRCASECMP _stricmp #endif #ifndef STRNCASECMP #define STRNCASECMP _strnicmp #endif #ifndef STRCPY #define STRCPY(sFilePath, nLength, sPath) strcpy_s(sFilePath, nLength, sPath) #endif #ifndef FOPEN #define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode) #endif #ifndef FOPEN_FAIL #define FOPEN_FAIL(result) (result != 0) #endif #ifndef SSCANF #define SSCANF sscanf_s #endif #ifndef SPRINTF #define SPRINTF sprintf_s #endif #else // Linux Includes #include <string.h> #include <strings.h> #ifndef STRCASECMP #define STRCASECMP strcasecmp #endif #ifndef STRNCASECMP #define STRNCASECMP strncasecmp #endif #ifndef STRCPY #define STRCPY(sFilePath, nLength, sPath) strcpy(sFilePath, sPath) #endif #ifndef FOPEN #define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode)) #endif #ifndef FOPEN_FAIL #define FOPEN_FAIL(result) (result == NULL) #endif #ifndef SSCANF #define SSCANF sscanf #endif #ifndef SPRINTF #define SPRINTF sprintf #endif #endif #ifndef EXIT_WAIVED #define EXIT_WAIVED 2 #endif // CUDA Utility Helper Functions inline int stringRemoveDelimiter(char delimiter, const char *string) { int string_start = 0; while (string[string_start] == delimiter) { string_start++; } if (string_start >= static_cast<int>(strlen(string) - 1)) { return 0; } return string_start; } inline int getFileExtension(char *filename, char **extension) { int string_length = static_cast<int>(strlen(filename)); while (filename[string_length--] != '.') { if (string_length == 0) break; } if (string_length > 0) string_length += 2; if (string_length == 0) *extension = NULL; else *extension = &filename[string_length]; return string_length; } inline bool checkCmdLineFlag(const int argc, const char **argv, const char *string_ref) { bool bFound = false; if (argc >= 1) { for (int i = 1; i < argc; i++) { int string_start = stringRemoveDelimiter('-', argv[i]); const char *string_argv = &argv[i][string_start]; const char *equal_pos = strchr(string_argv, '='); int argv_length = static_cast<int>( equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv); int length = static_cast<int>(strlen(string_ref)); if (length == argv_length && !STRNCASECMP(string_argv, string_ref, length)) { bFound = true; continue; } } } return bFound; } // This function wraps the CUDA Driver API into a template function template <class T> inline bool getCmdLineArgumentValue(const int argc, const char **argv, const char *string_ref, T *value) { bool bFound = false; if (argc >= 1) { for (int i = 1; i < argc; i++) { int string_start = stringRemoveDelimiter('-', argv[i]); const char *string_argv = &argv[i][string_start]; int length = static_cast<int>(strlen(string_ref)); if (!STRNCASECMP(string_argv, string_ref, length)) { if (length + 1 <= static_cast<int>(strlen(string_argv))) { int auto_inc = (string_argv[length] == '=') ? 1 : 0; *value = (T)atoi(&string_argv[length + auto_inc]); } bFound = true; i = argc; } } } return bFound; } inline int getCmdLineArgumentInt(const int argc, const char **argv, const char *string_ref) { bool bFound = false; int value = -1; if (argc >= 1) { for (int i = 1; i < argc; i++) { int string_start = stringRemoveDelimiter('-', argv[i]); const char *string_argv = &argv[i][string_start]; int length = static_cast<int>(strlen(string_ref)); if (!STRNCASECMP(string_argv, string_ref, length)) { if (length + 1 <= static_cast<int>(strlen(string_argv))) { int auto_inc = (string_argv[length] == '=') ? 1 : 0; value = atoi(&string_argv[length + auto_inc]); } else { value = 0; } bFound = true; continue; } } } if (bFound) { return value; } else { return 0; } } inline float getCmdLineArgumentFloat(const int argc, const char **argv, const char *string_ref) { bool bFound = false; float value = -1; if (argc >= 1) { for (int i = 1; i < argc; i++) { int string_start = stringRemoveDelimiter('-', argv[i]); const char *string_argv = &argv[i][string_start]; int length = static_cast<int>(strlen(string_ref)); if (!STRNCASECMP(string_argv, string_ref, length)) { if (length + 1 <= static_cast<int>(strlen(string_argv))) { int auto_inc = (string_argv[length] == '=') ? 1 : 0; value = static_cast<float>(atof(&string_argv[length + auto_inc])); } else { value = 0.f; } bFound = true; continue; } } } if (bFound) { return value; } else { return 0; } } inline bool getCmdLineArgumentString(const int argc, const char **argv, const char *string_ref, char **string_retval) { bool bFound = false; if (argc >= 1) { for (int i = 1; i < argc; i++) { int string_start = stringRemoveDelimiter('-', argv[i]); char *string_argv = const_cast<char *>(&argv[i][string_start]); int length = static_cast<int>(strlen(string_ref)); if (!STRNCASECMP(string_argv, string_ref, length)) { *string_retval = &string_argv[length + 1]; bFound = true; continue; } } } if (!bFound) { *string_retval = NULL; } return bFound; } ////////////////////////////////////////////////////////////////////////////// //! Find the path for a file assuming that //! files are found in the searchPath. //! //! @return the path if succeeded, otherwise 0 //! @param filename name of the file //! @param executable_path optional absolute path of the executable ////////////////////////////////////////////////////////////////////////////// inline char *sdkFindFilePath(const char *filename, const char *executable_path) { // <executable_name> defines a variable that is replaced with the name of the // executable // Typical relative search paths to locate needed companion files (e.g. sample // input data, or JIT source files) The origin for the relative search may be // the .exe file, a .bat file launching an .exe, a browser .exe launching the // .exe or .bat, etc const char *searchPath[] = { "./", // same dir "./data/", // same dir "../../../../Samples/<executable_name>/", // up 4 in tree "../../../Samples/<executable_name>/", // up 3 in tree "../../Samples/<executable_name>/", // up 2 in tree "../../../../Samples/<executable_name>/data/", // up 4 in tree "../../../Samples/<executable_name>/data/", // up 3 in tree "../../Samples/<executable_name>/data/", // up 2 in tree "../../../../Samples/0_Introduction/<executable_name>/", // up 4 in tree "../../../Samples/0_Introduction/<executable_name>/", // up 3 in tree "../../Samples/0_Introduction/<executable_name>/", // up 2 in tree "../../../../Samples/1_Utilities/<executable_name>/", // up 4 in tree "../../../Samples/1_Utilities/<executable_name>/", // up 3 in tree "../../Samples/1_Utilities/<executable_name>/", // up 2 in tree "../../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 4 in tree "../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 3 in tree "../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 2 in tree "../../../../Samples/3_CUDA_Features/<executable_name>/", // up 4 in tree "../../../Samples/3_CUDA_Features/<executable_name>/", // up 3 in tree "../../Samples/3_CUDA_Features/<executable_name>/", // up 2 in tree "../../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 4 in tree "../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 3 in tree "../../Samples/4_CUDA_Libraries/<executable_name>/", // up 2 in tree "../../../../Samples/5_Domain_Specific/<executable_name>/", // up 4 in tree "../../../Samples/5_Domain_Specific/<executable_name>/", // up 3 in tree "../../Samples/5_Domain_Specific/<executable_name>/", // up 2 in tree "../../../../Samples/6_Performance/<executable_name>/", // up 4 in tree "../../../Samples/6_Performance/<executable_name>/", // up 3 in tree "../../Samples/6_Performance/<executable_name>/", // up 2 in tree "../../../../Samples/0_Introduction/<executable_name>/data/", // up 4 in tree "../../../Samples/0_Introduction/<executable_name>/data/", // up 3 in tree "../../Samples/0_Introduction/<executable_name>/data/", // up 2 in tree "../../../../Samples/1_Utilities/<executable_name>/data/", // up 4 in tree "../../../Samples/1_Utilities/<executable_name>/data/", // up 3 in tree "../../Samples/1_Utilities/<executable_name>/data/", // up 2 in tree "../../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 4 in tree "../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 3 in tree "../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 2 in tree "../../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 4 in tree "../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 3 in tree "../../Samples/3_CUDA_Features/<executable_name>/data/", // up 2 in tree "../../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 4 in tree "../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 3 in tree "../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 2 in tree "../../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 4 in tree "../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 3 in tree "../../Samples/5_Domain_Specific/<executable_name>/data/", // up 2 in tree "../../../../Samples/6_Performance/<executable_name>/data/", // up 4 in tree "../../../Samples/6_Performance/<executable_name>/data/", // up 3 in tree "../../Samples/6_Performance/<executable_name>/data/", // up 2 in tree "../../../../Common/data/", // up 4 in tree "../../../Common/data/", // up 3 in tree "../../Common/data/" // up 2 in tree }; // Extract the executable name std::string executable_name; if (executable_path != 0) { executable_name = std::string(executable_path); #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) // Windows path delimiter size_t delimiter_pos = executable_name.find_last_of('\\'); executable_name.erase(0, delimiter_pos + 1); if (executable_name.rfind(".exe") != std::string::npos) { // we strip .exe, only if the .exe is found executable_name.resize(executable_name.size() - 4); } #else // Linux & OSX path delimiter size_t delimiter_pos = executable_name.find_last_of('/'); executable_name.erase(0, delimiter_pos + 1); #endif } // Loop over all search paths and return the first hit for (unsigned int i = 0; i < sizeof(searchPath) / sizeof(char *); ++i) { std::string path(searchPath[i]); size_t executable_name_pos = path.find("<executable_name>"); // If there is executable_name variable in the searchPath // replace it with the value if (executable_name_pos != std::string::npos) { if (executable_path != 0) { path.replace(executable_name_pos, strlen("<executable_name>"), executable_name); } else { // Skip this path entry if no executable argument is given continue; } } #ifdef _DEBUG printf("sdkFindFilePath <%s> in %s\n", filename, path.c_str()); #endif // Test if the file exists path.append(filename); FILE *fp; FOPEN(fp, path.c_str(), "rb"); if (fp != NULL) { fclose(fp); // File found // returning an allocated array here for backwards compatibility reasons char *file_path = reinterpret_cast<char *>(malloc(path.length() + 1)); STRCPY(file_path, path.length() + 1, path.c_str()); return file_path; } if (fp) { fclose(fp); } } // File not found printf("\nerror: sdkFindFilePath: file <%s> not found!\n", filename); return 0; } #endif // COMMON_HELPER_STRING_H_
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/Common/exception.h
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* CUda UTility Library */ #ifndef COMMON_EXCEPTION_H_ #define COMMON_EXCEPTION_H_ // includes, system #include <stdlib.h> #include <exception> #include <iostream> #include <stdexcept> #include <string> //! Exception wrapper. //! @param Std_Exception Exception out of namespace std for easy typing. template <class Std_Exception> class Exception : public Std_Exception { public: //! @brief Static construction interface //! @return Alwayss throws ( Located_Exception<Exception>) //! @param file file in which the Exception occurs //! @param line line in which the Exception occurs //! @param detailed details on the code fragment causing the Exception static void throw_it(const char *file, const int line, const char *detailed = "-"); //! Static construction interface //! @return Alwayss throws ( Located_Exception<Exception>) //! @param file file in which the Exception occurs //! @param line line in which the Exception occurs //! @param detailed details on the code fragment causing the Exception static void throw_it(const char *file, const int line, const std::string &detailed); //! Destructor virtual ~Exception() throw(); private: //! Constructor, default (private) Exception(); //! Constructor, standard //! @param str string returned by what() explicit Exception(const std::string &str); }; //////////////////////////////////////////////////////////////////////////////// //! Exception handler function for arbitrary exceptions //! @param ex exception to handle //////////////////////////////////////////////////////////////////////////////// template <class Exception_Typ> inline void handleException(const Exception_Typ &ex) { std::cerr << ex.what() << std::endl; exit(EXIT_FAILURE); } //! Convenience macros //! Exception caused by dynamic program behavior, e.g. file does not exist #define RUNTIME_EXCEPTION(msg) \ Exception<std::runtime_error>::throw_it(__FILE__, __LINE__, msg) //! Logic exception in program, e.g. an assert failed #define LOGIC_EXCEPTION(msg) \ Exception<std::logic_error>::throw_it(__FILE__, __LINE__, msg) //! Out of range exception #define RANGE_EXCEPTION(msg) \ Exception<std::range_error>::throw_it(__FILE__, __LINE__, msg) //////////////////////////////////////////////////////////////////////////////// //! Implementation // includes, system #include <sstream> //////////////////////////////////////////////////////////////////////////////// //! Static construction interface. //! @param Exception causing code fragment (file and line) and detailed infos. //////////////////////////////////////////////////////////////////////////////// /*static*/ template <class Std_Exception> void Exception<Std_Exception>::throw_it(const char *file, const int line, const char *detailed) { std::stringstream s; // Quiet heavy-weight but exceptions are not for // performance / release versions s << "Exception in file '" << file << "' in line " << line << "\n" << "Detailed description: " << detailed << "\n"; throw Exception(s.str()); } //////////////////////////////////////////////////////////////////////////////// //! Static construction interface. //! @param Exception causing code fragment (file and line) and detailed infos. //////////////////////////////////////////////////////////////////////////////// /*static*/ template <class Std_Exception> void Exception<Std_Exception>::throw_it(const char *file, const int line, const std::string &msg) { throw_it(file, line, msg.c_str()); } //////////////////////////////////////////////////////////////////////////////// //! Constructor, default (private). //////////////////////////////////////////////////////////////////////////////// template <class Std_Exception> Exception<Std_Exception>::Exception() : Std_Exception("Unknown Exception.\n") {} //////////////////////////////////////////////////////////////////////////////// //! Constructor, standard (private). //! String returned by what(). //////////////////////////////////////////////////////////////////////////////// template <class Std_Exception> Exception<Std_Exception>::Exception(const std::string &s) : Std_Exception(s) {} //////////////////////////////////////////////////////////////////////////////// //! Destructor //////////////////////////////////////////////////////////////////////////////// template <class Std_Exception> Exception<Std_Exception>::~Exception() throw() {} // functions, exported #endif // COMMON_EXCEPTION_H_
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/Common/helper_cuda.h
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions for initialization and error checking #ifndef COMMON_HELPER_CUDA_H_ #define COMMON_HELPER_CUDA_H_ #pragma once #include <sycl/sycl.hpp> #include <dpct/dpct.hpp> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <helper_string.h> #ifndef EXIT_WAIVED #define EXIT_WAIVED 2 #endif // Note, it is required that your SDK sample to include the proper header // files, please refer the CUDA examples for examples of the needed CUDA // headers, which may change depending on which CUDA functions are used. // CUDA Runtime error messages #ifdef __DPCT_HPP__ static const char *_cudaGetErrorEnum(dpct::err0 error) { /* DPCT1009:5: SYCL uses exceptions to report errors and does not use the error codes. The original code was commented out and a warning string was inserted. You need to rewrite this code. */ return "cudaGetErrorName is not supported" /*cudaGetErrorName(error)*/; } #endif #ifdef CUDA_DRIVER_API // CUDA Driver API errors static const char *_cudaGetErrorEnum(CUresult error) { static char unknown[] = "<unknown>"; const char *ret = NULL; cuGetErrorName(error, &ret); return ret ? ret : unknown; } #endif #ifdef CUBLAS_API_H_ // cuBLAS API errors static const char *_cudaGetErrorEnum(cublasStatus_t error) { switch (error) { case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS"; case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED"; case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED"; case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE"; case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH"; case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR"; case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED"; case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR"; case CUBLAS_STATUS_NOT_SUPPORTED: return "CUBLAS_STATUS_NOT_SUPPORTED"; case CUBLAS_STATUS_LICENSE_ERROR: return "CUBLAS_STATUS_LICENSE_ERROR"; } return "<unknown>"; } #endif #ifdef _CUFFT_H_ // cuFFT API errors static const char *_cudaGetErrorEnum(cufftResult error) { switch (error) { case CUFFT_SUCCESS: return "CUFFT_SUCCESS"; case CUFFT_INVALID_PLAN: return "CUFFT_INVALID_PLAN"; case CUFFT_ALLOC_FAILED: return "CUFFT_ALLOC_FAILED"; case CUFFT_INVALID_TYPE: return "CUFFT_INVALID_TYPE"; case CUFFT_INVALID_VALUE: return "CUFFT_INVALID_VALUE"; case CUFFT_INTERNAL_ERROR: return "CUFFT_INTERNAL_ERROR"; case CUFFT_EXEC_FAILED: return "CUFFT_EXEC_FAILED"; case CUFFT_SETUP_FAILED: return "CUFFT_SETUP_FAILED"; case CUFFT_INVALID_SIZE: return "CUFFT_INVALID_SIZE"; case CUFFT_UNALIGNED_DATA: return "CUFFT_UNALIGNED_DATA"; case CUFFT_INCOMPLETE_PARAMETER_LIST: return "CUFFT_INCOMPLETE_PARAMETER_LIST"; case CUFFT_INVALID_DEVICE: return "CUFFT_INVALID_DEVICE"; case CUFFT_PARSE_ERROR: return "CUFFT_PARSE_ERROR"; case CUFFT_NO_WORKSPACE: return "CUFFT_NO_WORKSPACE"; case CUFFT_NOT_IMPLEMENTED: return "CUFFT_NOT_IMPLEMENTED"; case CUFFT_LICENSE_ERROR: return "CUFFT_LICENSE_ERROR"; case CUFFT_NOT_SUPPORTED: return "CUFFT_NOT_SUPPORTED"; } return "<unknown>"; } #endif #ifdef CUSPARSEAPI // cuSPARSE API errors static const char *_cudaGetErrorEnum(cusparseStatus_t error) { switch (error) { case CUSPARSE_STATUS_SUCCESS: return "CUSPARSE_STATUS_SUCCESS"; case CUSPARSE_STATUS_NOT_INITIALIZED: return "CUSPARSE_STATUS_NOT_INITIALIZED"; case CUSPARSE_STATUS_ALLOC_FAILED: return "CUSPARSE_STATUS_ALLOC_FAILED"; case CUSPARSE_STATUS_INVALID_VALUE: return "CUSPARSE_STATUS_INVALID_VALUE"; case CUSPARSE_STATUS_ARCH_MISMATCH: return "CUSPARSE_STATUS_ARCH_MISMATCH"; case CUSPARSE_STATUS_MAPPING_ERROR: return "CUSPARSE_STATUS_MAPPING_ERROR"; case CUSPARSE_STATUS_EXECUTION_FAILED: return "CUSPARSE_STATUS_EXECUTION_FAILED"; case CUSPARSE_STATUS_INTERNAL_ERROR: return "CUSPARSE_STATUS_INTERNAL_ERROR"; case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; } return "<unknown>"; } #endif #ifdef CUSOLVER_COMMON_H_ // cuSOLVER API errors static const char *_cudaGetErrorEnum(cusolverStatus_t error) { switch (error) { case CUSOLVER_STATUS_SUCCESS: return "CUSOLVER_STATUS_SUCCESS"; case CUSOLVER_STATUS_NOT_INITIALIZED: return "CUSOLVER_STATUS_NOT_INITIALIZED"; case CUSOLVER_STATUS_ALLOC_FAILED: return "CUSOLVER_STATUS_ALLOC_FAILED"; case CUSOLVER_STATUS_INVALID_VALUE: return "CUSOLVER_STATUS_INVALID_VALUE"; case CUSOLVER_STATUS_ARCH_MISMATCH: return "CUSOLVER_STATUS_ARCH_MISMATCH"; case CUSOLVER_STATUS_MAPPING_ERROR: return "CUSOLVER_STATUS_MAPPING_ERROR"; case CUSOLVER_STATUS_EXECUTION_FAILED: return "CUSOLVER_STATUS_EXECUTION_FAILED"; case CUSOLVER_STATUS_INTERNAL_ERROR: return "CUSOLVER_STATUS_INTERNAL_ERROR"; case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; case CUSOLVER_STATUS_NOT_SUPPORTED: return "CUSOLVER_STATUS_NOT_SUPPORTED "; case CUSOLVER_STATUS_ZERO_PIVOT: return "CUSOLVER_STATUS_ZERO_PIVOT"; case CUSOLVER_STATUS_INVALID_LICENSE: return "CUSOLVER_STATUS_INVALID_LICENSE"; } return "<unknown>"; } #endif #ifdef CURAND_H_ // cuRAND API errors static const char *_cudaGetErrorEnum(int error) { switch (error) { case 0: return "CURAND_STATUS_SUCCESS"; case 100: return "CURAND_STATUS_VERSION_MISMATCH"; case 101: return "CURAND_STATUS_NOT_INITIALIZED"; case 102: return "CURAND_STATUS_ALLOCATION_FAILED"; case 103: return "CURAND_STATUS_TYPE_ERROR"; case 104: return "CURAND_STATUS_OUT_OF_RANGE"; case 105: return "CURAND_STATUS_LENGTH_NOT_MULTIPLE"; case 106: return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED"; case 201: return "CURAND_STATUS_LAUNCH_FAILURE"; case 202: return "CURAND_STATUS_PREEXISTING_FAILURE"; case 203: return "CURAND_STATUS_INITIALIZATION_FAILED"; case 204: return "CURAND_STATUS_ARCH_MISMATCH"; case 999: return "CURAND_STATUS_INTERNAL_ERROR"; } return "<unknown>"; } #endif #ifdef NVJPEGAPI // nvJPEG API errors static const char *_cudaGetErrorEnum(nvjpegStatus_t error) { switch (error) { case NVJPEG_STATUS_SUCCESS: return "NVJPEG_STATUS_SUCCESS"; case NVJPEG_STATUS_NOT_INITIALIZED: return "NVJPEG_STATUS_NOT_INITIALIZED"; case NVJPEG_STATUS_INVALID_PARAMETER: return "NVJPEG_STATUS_INVALID_PARAMETER"; case NVJPEG_STATUS_BAD_JPEG: return "NVJPEG_STATUS_BAD_JPEG"; case NVJPEG_STATUS_JPEG_NOT_SUPPORTED: return "NVJPEG_STATUS_JPEG_NOT_SUPPORTED"; case NVJPEG_STATUS_ALLOCATOR_FAILURE: return "NVJPEG_STATUS_ALLOCATOR_FAILURE"; case NVJPEG_STATUS_EXECUTION_FAILED: return "NVJPEG_STATUS_EXECUTION_FAILED"; case NVJPEG_STATUS_ARCH_MISMATCH: return "NVJPEG_STATUS_ARCH_MISMATCH"; case NVJPEG_STATUS_INTERNAL_ERROR: return "NVJPEG_STATUS_INTERNAL_ERROR"; } return "<unknown>"; } #endif #ifdef NV_NPPIDEFS_H // NPP API errors static const char *_cudaGetErrorEnum(NppStatus error) { switch (error) { case NPP_NOT_SUPPORTED_MODE_ERROR: return "NPP_NOT_SUPPORTED_MODE_ERROR"; case NPP_ROUND_MODE_NOT_SUPPORTED_ERROR: return "NPP_ROUND_MODE_NOT_SUPPORTED_ERROR"; case NPP_RESIZE_NO_OPERATION_ERROR: return "NPP_RESIZE_NO_OPERATION_ERROR"; case NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY: return "NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY"; #if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000 case NPP_BAD_ARG_ERROR: return "NPP_BAD_ARGUMENT_ERROR"; case NPP_COEFF_ERROR: return "NPP_COEFFICIENT_ERROR"; case NPP_RECT_ERROR: return "NPP_RECTANGLE_ERROR"; case NPP_QUAD_ERROR: return "NPP_QUADRANGLE_ERROR"; case NPP_MEM_ALLOC_ERR: return "NPP_MEMORY_ALLOCATION_ERROR"; case NPP_HISTO_NUMBER_OF_LEVELS_ERROR: return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR"; case NPP_INVALID_INPUT: return "NPP_INVALID_INPUT"; case NPP_POINTER_ERROR: return "NPP_POINTER_ERROR"; case NPP_WARNING: return "NPP_WARNING"; case NPP_ODD_ROI_WARNING: return "NPP_ODD_ROI_WARNING"; #else // These are for CUDA 5.5 or higher case NPP_BAD_ARGUMENT_ERROR: return "NPP_BAD_ARGUMENT_ERROR"; case NPP_COEFFICIENT_ERROR: return "NPP_COEFFICIENT_ERROR"; case NPP_RECTANGLE_ERROR: return "NPP_RECTANGLE_ERROR"; case NPP_QUADRANGLE_ERROR: return "NPP_QUADRANGLE_ERROR"; case NPP_MEMORY_ALLOCATION_ERR: return "NPP_MEMORY_ALLOCATION_ERROR"; case NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR: return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR"; case NPP_INVALID_HOST_POINTER_ERROR: return "NPP_INVALID_HOST_POINTER_ERROR"; case NPP_INVALID_DEVICE_POINTER_ERROR: return "NPP_INVALID_DEVICE_POINTER_ERROR"; #endif case NPP_LUT_NUMBER_OF_LEVELS_ERROR: return "NPP_LUT_NUMBER_OF_LEVELS_ERROR"; case NPP_TEXTURE_BIND_ERROR: return "NPP_TEXTURE_BIND_ERROR"; case NPP_WRONG_INTERSECTION_ROI_ERROR: return "NPP_WRONG_INTERSECTION_ROI_ERROR"; case NPP_NOT_EVEN_STEP_ERROR: return "NPP_NOT_EVEN_STEP_ERROR"; case NPP_INTERPOLATION_ERROR: return "NPP_INTERPOLATION_ERROR"; case NPP_RESIZE_FACTOR_ERROR: return "NPP_RESIZE_FACTOR_ERROR"; case NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR: return "NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR"; #if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000 case NPP_MEMFREE_ERR: return "NPP_MEMFREE_ERR"; case NPP_MEMSET_ERR: return "NPP_MEMSET_ERR"; case NPP_MEMCPY_ERR: return "NPP_MEMCPY_ERROR"; case NPP_MIRROR_FLIP_ERR: return "NPP_MIRROR_FLIP_ERR"; #else case NPP_MEMFREE_ERROR: return "NPP_MEMFREE_ERROR"; case NPP_MEMSET_ERROR: return "NPP_MEMSET_ERROR"; case NPP_MEMCPY_ERROR: return "NPP_MEMCPY_ERROR"; case NPP_MIRROR_FLIP_ERROR: return "NPP_MIRROR_FLIP_ERROR"; #endif case NPP_ALIGNMENT_ERROR: return "NPP_ALIGNMENT_ERROR"; case NPP_STEP_ERROR: return "NPP_STEP_ERROR"; case NPP_SIZE_ERROR: return "NPP_SIZE_ERROR"; case NPP_NULL_POINTER_ERROR: return "NPP_NULL_POINTER_ERROR"; case NPP_CUDA_KERNEL_EXECUTION_ERROR: return "NPP_CUDA_KERNEL_EXECUTION_ERROR"; case NPP_NOT_IMPLEMENTED_ERROR: return "NPP_NOT_IMPLEMENTED_ERROR"; case NPP_ERROR: return "NPP_ERROR"; case NPP_SUCCESS: return "NPP_SUCCESS"; case NPP_WRONG_INTERSECTION_QUAD_WARNING: return "NPP_WRONG_INTERSECTION_QUAD_WARNING"; case NPP_MISALIGNED_DST_ROI_WARNING: return "NPP_MISALIGNED_DST_ROI_WARNING"; case NPP_AFFINE_QUAD_INCORRECT_WARNING: return "NPP_AFFINE_QUAD_INCORRECT_WARNING"; case NPP_DOUBLE_SIZE_WARNING: return "NPP_DOUBLE_SIZE_WARNING"; case NPP_WRONG_INTERSECTION_ROI_WARNING: return "NPP_WRONG_INTERSECTION_ROI_WARNING"; #if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x6000 /* These are 6.0 or higher */ case NPP_LUT_PALETTE_BITSIZE_ERROR: return "NPP_LUT_PALETTE_BITSIZE_ERROR"; case NPP_ZC_MODE_NOT_SUPPORTED_ERROR: return "NPP_ZC_MODE_NOT_SUPPORTED_ERROR"; case NPP_QUALITY_INDEX_ERROR: return "NPP_QUALITY_INDEX_ERROR"; case NPP_CHANNEL_ORDER_ERROR: return "NPP_CHANNEL_ORDER_ERROR"; case NPP_ZERO_MASK_VALUE_ERROR: return "NPP_ZERO_MASK_VALUE_ERROR"; case NPP_NUMBER_OF_CHANNELS_ERROR: return "NPP_NUMBER_OF_CHANNELS_ERROR"; case NPP_COI_ERROR: return "NPP_COI_ERROR"; case NPP_DIVISOR_ERROR: return "NPP_DIVISOR_ERROR"; case NPP_CHANNEL_ERROR: return "NPP_CHANNEL_ERROR"; case NPP_STRIDE_ERROR: return "NPP_STRIDE_ERROR"; case NPP_ANCHOR_ERROR: return "NPP_ANCHOR_ERROR"; case NPP_MASK_SIZE_ERROR: return "NPP_MASK_SIZE_ERROR"; case NPP_MOMENT_00_ZERO_ERROR: return "NPP_MOMENT_00_ZERO_ERROR"; case NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR: return "NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR"; case NPP_THRESHOLD_ERROR: return "NPP_THRESHOLD_ERROR"; case NPP_CONTEXT_MATCH_ERROR: return "NPP_CONTEXT_MATCH_ERROR"; case NPP_FFT_FLAG_ERROR: return "NPP_FFT_FLAG_ERROR"; case NPP_FFT_ORDER_ERROR: return "NPP_FFT_ORDER_ERROR"; case NPP_SCALE_RANGE_ERROR: return "NPP_SCALE_RANGE_ERROR"; case NPP_DATA_TYPE_ERROR: return "NPP_DATA_TYPE_ERROR"; case NPP_OUT_OFF_RANGE_ERROR: return "NPP_OUT_OFF_RANGE_ERROR"; case NPP_DIVIDE_BY_ZERO_ERROR: return "NPP_DIVIDE_BY_ZERO_ERROR"; case NPP_RANGE_ERROR: return "NPP_RANGE_ERROR"; case NPP_NO_MEMORY_ERROR: return "NPP_NO_MEMORY_ERROR"; case NPP_ERROR_RESERVED: return "NPP_ERROR_RESERVED"; case NPP_NO_OPERATION_WARNING: return "NPP_NO_OPERATION_WARNING"; case NPP_DIVIDE_BY_ZERO_WARNING: return "NPP_DIVIDE_BY_ZERO_WARNING"; #endif #if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x7000 /* These are 7.0 or higher */ case NPP_OVERFLOW_ERROR: return "NPP_OVERFLOW_ERROR"; case NPP_CORRUPTED_DATA_ERROR: return "NPP_CORRUPTED_DATA_ERROR"; #endif } return "<unknown>"; } #endif template <typename T> void check(T result, char const *const func, const char *const file, int const line) { } #ifdef __DPCT_HPP__ // This will output the proper CUDA error strings in the event // that a CUDA host call returns an error #define checkCudaErrors(val) check((val), #val, __FILE__, __LINE__) // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError(msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line) { /* DPCT1010:6: SYCL uses exceptions to report errors and does not use the error codes. The call was replaced with 0. You need to rewrite this code. */ dpct::err0 err = 0; } // This will only print the proper error string when calling cudaGetLastError // but not exit program incase error detected. #define printLastCudaError(msg) __printLastCudaError(msg, __FILE__, __LINE__) inline void __printLastCudaError(const char *errorMessage, const char *file, const int line) { /* DPCT1010:8: SYCL uses exceptions to report errors and does not use the error codes. The call was replaced with 0. You need to rewrite this code. */ dpct::err0 err = 0; } #endif #ifndef MAX #define MAX(a, b) (a > b ? a : b) #endif // Float To Int conversion inline int ftoi(float value) { return (value >= 0 ? static_cast<int>(value + 0.5) : static_cast<int>(value - 0.5)); } // Beginning of GPU Architecture definitions inline int _ConvertSMVer2Cores(int major, int minor) { // Defines for GPU Architecture types (using the SM version to determine // the # of cores per SM typedef struct dpct_type_113531 { int SM; // 0xMm (hexidecimal notation), M = SM Major version, // and m = SM minor version int Cores; } sSMtoCores; sSMtoCores nGpuArchCoresPerSM[] = { {0x30, 192}, {0x32, 192}, {0x35, 192}, {0x37, 192}, {0x50, 128}, {0x52, 128}, {0x53, 128}, {0x60, 64}, {0x61, 128}, {0x62, 128}, {0x70, 64}, {0x72, 64}, {0x75, 64}, {0x80, 64}, {0x86, 128}, {0x87, 128}, {0x89, 128}, {0x90, 128}, {-1, -1}}; int index = 0; while (nGpuArchCoresPerSM[index].SM != -1) { if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) { return nGpuArchCoresPerSM[index].Cores; } index++; } // If we don't find the values, we default use the previous one // to run properly printf( "MapSMtoCores for SM %d.%d is undefined." " Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[index - 1].Cores); return nGpuArchCoresPerSM[index - 1].Cores; } inline const char* _ConvertSMVer2ArchName(int major, int minor) { // Defines for GPU Architecture types (using the SM version to determine // the GPU Arch name) typedef struct dpct_type_281558 { int SM; // 0xMm (hexidecimal notation), M = SM Major version, // and m = SM minor version const char* name; } sSMtoArchName; sSMtoArchName nGpuArchNameSM[] = { {0x30, "Kepler"}, {0x32, "Kepler"}, {0x35, "Kepler"}, {0x37, "Kepler"}, {0x50, "Maxwell"}, {0x52, "Maxwell"}, {0x53, "Maxwell"}, {0x60, "Pascal"}, {0x61, "Pascal"}, {0x62, "Pascal"}, {0x70, "Volta"}, {0x72, "Xavier"}, {0x75, "Turing"}, {0x80, "Ampere"}, {0x86, "Ampere"}, {0x87, "Ampere"}, {0x89, "Ada"}, {0x90, "Hopper"}, {-1, "Graphics Device"}}; int index = 0; while (nGpuArchNameSM[index].SM != -1) { if (nGpuArchNameSM[index].SM == ((major << 4) + minor)) { return nGpuArchNameSM[index].name; } index++; } // If we don't find the values, we default use the previous one // to run properly printf( "MapSMtoArchName for SM %d.%d is undefined." " Default to use %s\n", major, minor, nGpuArchNameSM[index - 1].name); return nGpuArchNameSM[index - 1].name; } // end of GPU Architecture definitions #ifdef __DPCT_HPP__ // General GPU Device CUDA Initialization inline int gpuDeviceInit(int devID) { int device_count; checkCudaErrors(DPCT_CHECK_ERROR( device_count = dpct::dev_mgr::instance().device_count())); if (device_count == 0) { fprintf(stderr, "gpuDeviceInit() CUDA error: " "no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } if (devID < 0) { devID = 0; } if (devID > device_count - 1) { fprintf(stderr, "\n"); fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", device_count); fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid" " GPU device. <<\n", devID); fprintf(stderr, "\n"); return -devID; } int computeMode = -1, major = 0, minor = 0; /* DPCT1035:10: All SYCL devices can be used by the host to submit tasks. You may need to adjust this code. */ checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1)); checkCudaErrors(DPCT_CHECK_ERROR( major = dpct::dev_mgr::instance().get_device(devID).get_major_version())); checkCudaErrors(DPCT_CHECK_ERROR( minor = dpct::dev_mgr::instance().get_device(devID).get_minor_version())); /* DPCT1035:11: All SYCL devices can be used by the host to submit tasks. You may need to adjust this code. */ if (computeMode == 0) { fprintf(stderr, "Error: device is running in <Compute Mode " "Prohibited>, no threads can use cudaSetDevice().\n"); return -1; } if (major < 1) { fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n"); exit(EXIT_FAILURE); } /* DPCT1093:12: The "devID" device may be not the one intended for use. Adjust the selected device if needed. */ checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID))); printf("gpuDeviceInit() CUDA Device [%d]: \"%s\n", devID, _ConvertSMVer2ArchName(major, minor)); return devID; } // This function returns the best GPU (with maximum GFLOPS) inline int gpuGetMaxGflopsDeviceId() try { int current_device = 0, sm_per_multiproc = 0; int max_perf_device = 0; int device_count = 0; int devices_prohibited = 0; uint64_t max_compute_perf = 0; checkCudaErrors(DPCT_CHECK_ERROR( device_count = dpct::dev_mgr::instance().device_count())); if (device_count == 0) { fprintf(stderr, "gpuGetMaxGflopsDeviceId() CUDA error:" " no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } // Find the best CUDA capable GPU device current_device = 0; while (current_device < device_count) { int computeMode = -1, major = 0, minor = 0; /* DPCT1035:13: All SYCL devices can be used by the host to submit tasks. You may need to adjust this code. */ checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1)); checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance() .get_device(current_device) .get_major_version())); checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance() .get_device(current_device) .get_minor_version())); // If this GPU is not running on Compute Mode prohibited, // then we can add it to the list /* DPCT1035:14: All SYCL devices can be used by the host to submit tasks. You may need to adjust this code. */ if (computeMode != 0) { if (major == 9999 && minor == 9999) { sm_per_multiproc = 1; } else { sm_per_multiproc = _ConvertSMVer2Cores(major, minor); } int multiProcessorCount = 0, clockRate = 0; checkCudaErrors( DPCT_CHECK_ERROR(multiProcessorCount = dpct::dev_mgr::instance() .get_device(current_device) .get_max_compute_units())); dpct::err0 result = DPCT_CHECK_ERROR(clockRate = dpct::dev_mgr::instance() .get_device(current_device) .get_max_clock_frequency()); uint64_t compute_perf = (uint64_t)multiProcessorCount * sm_per_multiproc * clockRate; if (compute_perf > max_compute_perf) { max_compute_perf = compute_perf; max_perf_device = current_device; } } else { devices_prohibited++; } ++current_device; } if (devices_prohibited == device_count) { fprintf(stderr, "gpuGetMaxGflopsDeviceId() CUDA error:" " all devices have compute mode prohibited.\n"); exit(EXIT_FAILURE); } return max_perf_device; } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); } // Initialization code to find the best CUDA Device inline int findCudaDevice(int argc, const char **argv) { int devID = 0; // If the command-line has a device number specified, use it if (checkCmdLineFlag(argc, argv, "device")) { devID = getCmdLineArgumentInt(argc, argv, "device="); if (devID < 0) { printf("Invalid command line parameter\n "); exit(EXIT_FAILURE); } else { devID = gpuDeviceInit(devID); if (devID < 0) { printf("exiting...\n"); exit(EXIT_FAILURE); } } } else { // Otherwise pick the device with highest Gflops/s devID = gpuGetMaxGflopsDeviceId(); /* DPCT1093:15: The "devID" device may be not the one intended for use. Adjust the selected device if needed. */ checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID))); int major = 0, minor = 0; checkCudaErrors(DPCT_CHECK_ERROR( major = dpct::dev_mgr::instance().get_device(devID).get_major_version())); checkCudaErrors(DPCT_CHECK_ERROR( minor = dpct::dev_mgr::instance().get_device(devID).get_minor_version())); printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, _ConvertSMVer2ArchName(major, minor), major, minor); } return devID; } inline int findIntegratedGPU() { int current_device = 0; int device_count = 0; int devices_prohibited = 0; checkCudaErrors(DPCT_CHECK_ERROR( device_count = dpct::dev_mgr::instance().device_count())); if (device_count == 0) { fprintf(stderr, "CUDA error: no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } // Find the integrated GPU which is compute capable while (current_device < device_count) { int computeMode = -1, integrated = -1; /* DPCT1035:16: All SYCL devices can be used by the host to submit tasks. You may need to adjust this code. */ checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1)); checkCudaErrors( DPCT_CHECK_ERROR(integrated = dpct::dev_mgr::instance() .get_device(current_device) .get_integrated())); // If GPU is integrated and is not running on Compute Mode prohibited, // then cuda can map to GLES resource /* DPCT1035:17: All SYCL devices can be used by the host to submit tasks. You may need to adjust this code. */ if (integrated && (computeMode != 0)) { /* DPCT1093:18: The "current_device" device may be not the one intended for use. Adjust the selected device if needed. */ checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(current_device))); int major = 0, minor = 0; checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance() .get_device(current_device) .get_major_version())); checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance() .get_device(current_device) .get_minor_version())); printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", current_device, _ConvertSMVer2ArchName(major, minor), major, minor); return current_device; } else { devices_prohibited++; } current_device++; } if (devices_prohibited == device_count) { fprintf(stderr, "CUDA error:" " No GLES-CUDA Interop capable GPU found.\n"); exit(EXIT_FAILURE); } return -1; } // General check for CUDA GPU SM Capabilities inline bool checkCudaCapabilities(int major_version, int minor_version) { int dev; int major = 0, minor = 0; checkCudaErrors(dev = dpct::dev_mgr::instance().current_device_id()); checkCudaErrors(DPCT_CHECK_ERROR( major = dpct::dev_mgr::instance().get_device(dev).get_major_version())); checkCudaErrors(DPCT_CHECK_ERROR( minor = dpct::dev_mgr::instance().get_device(dev).get_minor_version())); if ((major > major_version) || (major == major_version && minor >= minor_version)) { printf(" Device %d: <%16s >, Compute SM %d.%d detected\n", dev, _ConvertSMVer2ArchName(major, minor), major, minor); return true; } else { printf( " No GPU device was found that can support " "CUDA compute capability %d.%d.\n", major_version, minor_version); return false; } } #endif // end of CUDA Helper Functions #endif // COMMON_HELPER_CUDA_H_
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/Samples/2_Concepts_and_Techniques/sortingNetworks/sortingNetworks_common.dp.hpp
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SORTINGNETWORKS_COMMON_CUH #define SORTINGNETWORKS_COMMON_CUH #include <sycl/sycl.hpp> #include <dpct/dpct.hpp> #include "sortingNetworks_common.h" // Enables maximum occupancy #define SHARED_SIZE_LIMIT 512U // Map to single instructions on G8x / G9x / G100 #define UMUL(a, b) __umul24((a), (b)) #define UMAD(a, b, c) (UMUL((a), (b)) + (c)) inline void Comparator(uint &keyA, uint &valA, uint &keyB, uint &valB, uint dir) { uint t; if ((keyA > keyB) == dir) { t = keyA; keyA = keyB; keyB = t; t = valA; valA = valB; valB = t; } } #endif
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/Samples/2_Concepts_and_Techniques/sortingNetworks/sortingNetworks_common.h
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <sycl/sycl.hpp> //////////////////////////////////////////////////////////////////////////////// // Shortcut definition //////////////////////////////////////////////////////////////////////////////// typedef unsigned int uint; /////////////////////////////////////////////////////////////////////////////// // Sort result validation routines //////////////////////////////////////////////////////////////////////////////// // Sorted keys array validation (check for integrity and proper order) extern "C" uint validateSortedKeys(uint *resKey, uint *srcKey, uint batchSize, uint arrayLength, uint numValues, uint dir); extern "C" int validateValues(uint *resKey, uint *resVal, uint *srcKey, uint batchSize, uint arrayLength); //////////////////////////////////////////////////////////////////////////////// // CUDA sorting networks //////////////////////////////////////////////////////////////////////////////// extern "C" uint oddEvenMergeSort(uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint batchSize, uint arrayLength, uint dir, sycl::queue &q);
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/Samples/2_Concepts_and_Techniques/sortingNetworks/sortingNetworks_validate.cpp
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "sortingNetworks_common.h" //////////////////////////////////////////////////////////////////////////////// // Validate sorted keys array (check for integrity and proper order) //////////////////////////////////////////////////////////////////////////////// extern "C" uint validateSortedKeys(uint *resKey, uint *srcKey, uint batchSize, uint arrayLength, uint numValues, uint dir) { uint *srcHist; uint *resHist; if (arrayLength < 2) { printf("validateSortedKeys(): arrayLength too short, exiting...\n"); return 1; } printf("...inspecting keys array: "); srcHist = (uint *)malloc(numValues * sizeof(uint)); resHist = (uint *)malloc(numValues * sizeof(uint)); int flag = 1; for (uint j = 0; j < batchSize; j++, srcKey += arrayLength, resKey += arrayLength) { // Build histograms for keys arrays memset(srcHist, 0, numValues * sizeof(uint)); memset(resHist, 0, numValues * sizeof(uint)); for (uint i = 0; i < arrayLength; i++) { if (srcKey[i] < numValues && resKey[i] < numValues) { srcHist[srcKey[i]]++; resHist[resKey[i]]++; } else { flag = 0; break; } } if (!flag) { printf("***Set %u source/result key arrays are not limited properly***\n", j); goto brk; } // Compare the histograms for (uint i = 0; i < numValues; i++) if (srcHist[i] != resHist[i]) { flag = 0; break; } if (!flag) { printf("***Set %u source/result keys histograms do not match***\n", j); goto brk; } if (dir) { // Ascending order for (uint i = 0; i < arrayLength - 1; i++) if (resKey[i + 1] < resKey[i]) { flag = 0; break; } } else { // Descending order for (uint i = 0; i < arrayLength - 1; i++) if (resKey[i + 1] > resKey[i]) { flag = 0; break; } } if (!flag) { printf("***Set %u result key array is not ordered properly***\n", j); goto brk; } } brk: free(resHist); free(srcHist); if (flag) printf("OK\n"); return flag; } extern "C" int validateValues(uint *resKey, uint *resVal, uint *srcKey, uint batchSize, uint arrayLength) { int correctFlag = 1, stableFlag = 1; printf("...inspecting keys and values array: "); for (uint i = 0; i < batchSize; i++, resKey += arrayLength, resVal += arrayLength) { for (uint j = 0; j < arrayLength; j++) { if (resKey[j] != srcKey[resVal[j]]) correctFlag = 0; if ((j < arrayLength - 1) && (resKey[j] == resKey[j + 1]) && (resVal[j] > resVal[j + 1])) stableFlag = 0; } } printf(correctFlag ? "OK\n" : "***corrupted!!!***\n"); printf(stableFlag ? "...stability property: stable!\n" : "...stability property: NOT stable\n"); return correctFlag; }
cpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/Samples/2_Concepts_and_Techniques/sortingNetworks/oddEvenMergeSort.dp.cpp
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ //Based on http://www.iti.fh-flensburg.de/lang/algorithmen/sortieren/networks/oemen.htm #include <sycl/sycl.hpp> #include <dpct/dpct.hpp> #include <assert.h> #include <helper_cuda.h> #include "sortingNetworks_common.h" #include "sortingNetworks_common.dp.hpp" //////////////////////////////////////////////////////////////////////////////// // Monolithic Bacther's sort kernel for short arrays fitting into shared memory //////////////////////////////////////////////////////////////////////////////// void oddEvenMergeSortShared(uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint arrayLength, uint dir, const sycl::nd_item<3> &item_ct1, uint *s_key, uint *s_val) { // Handle to thread block group auto cta = item_ct1.get_group(); // Shared memory storage for one or more small vectors // Offset to the beginning of subbatch and load data d_SrcKey += item_ct1.get_group(2) * SHARED_SIZE_LIMIT + item_ct1.get_local_id(2); d_SrcVal += item_ct1.get_group(2) * SHARED_SIZE_LIMIT + item_ct1.get_local_id(2); d_DstKey += item_ct1.get_group(2) * SHARED_SIZE_LIMIT + item_ct1.get_local_id(2); d_DstVal += item_ct1.get_group(2) * SHARED_SIZE_LIMIT + item_ct1.get_local_id(2); s_key[item_ct1.get_local_id(2) + 0] = d_SrcKey[0]; s_val[item_ct1.get_local_id(2) + 0] = d_SrcVal[0]; s_key[item_ct1.get_local_id(2) + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)]; s_val[item_ct1.get_local_id(2) + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)]; for (uint size = 2; size <= arrayLength; size <<= 1) { uint stride = size / 2; uint offset = item_ct1.get_local_id(2) & (stride - 1); { item_ct1.barrier(); uint pos = 2 * item_ct1.get_local_id(2) - (item_ct1.get_local_id(2) & (stride - 1)); Comparator(s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], dir); stride >>= 1; } for (; stride > 0; stride >>= 1) { item_ct1.barrier(); uint pos = 2 * item_ct1.get_local_id(2) - (item_ct1.get_local_id(2) & (stride - 1)); if (offset >= stride) Comparator(s_key[pos - stride], s_val[pos - stride], s_key[pos + 0], s_val[pos + 0], dir); } } item_ct1.barrier(); d_DstKey[0] = s_key[item_ct1.get_local_id(2) + 0]; d_DstVal[0] = s_val[item_ct1.get_local_id(2) + 0]; d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[item_ct1.get_local_id(2) + (SHARED_SIZE_LIMIT / 2)]; d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[item_ct1.get_local_id(2) + (SHARED_SIZE_LIMIT / 2)]; } //////////////////////////////////////////////////////////////////////////////// // Odd-even merge sort iteration kernel // for large arrays (not fitting into shared memory) //////////////////////////////////////////////////////////////////////////////// void oddEvenMergeGlobal(uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint arrayLength, uint size, uint stride, uint dir, const sycl::nd_item<3> &item_ct1) { uint global_comparatorI = item_ct1.get_group(2) * item_ct1.get_local_range(2) + item_ct1.get_local_id(2); // Odd-even merge uint pos = 2 * global_comparatorI - (global_comparatorI & (stride - 1)); if (stride < size / 2) { uint offset = global_comparatorI & ((size / 2) - 1); if (offset >= stride) { uint keyA = d_SrcKey[pos - stride]; uint valA = d_SrcVal[pos - stride]; uint keyB = d_SrcKey[pos + 0]; uint valB = d_SrcVal[pos + 0]; Comparator(keyA, valA, keyB, valB, dir); d_DstKey[pos - stride] = keyA; d_DstVal[pos - stride] = valA; d_DstKey[pos + 0] = keyB; d_DstVal[pos + 0] = valB; } } else { uint keyA = d_SrcKey[pos + 0]; uint valA = d_SrcVal[pos + 0]; uint keyB = d_SrcKey[pos + stride]; uint valB = d_SrcVal[pos + stride]; Comparator(keyA, valA, keyB, valB, dir); d_DstKey[pos + 0] = keyA; d_DstVal[pos + 0] = valA; d_DstKey[pos + stride] = keyB; d_DstVal[pos + stride] = valB; } } //////////////////////////////////////////////////////////////////////////////// // Interface function //////////////////////////////////////////////////////////////////////////////// // Helper function extern "C" uint factorRadix2(uint *log2L, uint L) { if (!L) { *log2L = 0; return 0; } else { for (*log2L = 0; (L & 1) == 0; L >>= 1, *log2L++) ; return L; } } extern "C" uint oddEvenMergeSort(uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint batchSize, uint arrayLength, uint dir, sycl::queue &q) { // Nothing to sort if (arrayLength < 2) return 0; // Only power-of-two array lengths are supported by this implementation uint log2L; uint factorizationRemainder = factorRadix2(&log2L, arrayLength); assert(factorizationRemainder == 1); dir = (dir != 0); uint blockCount = (batchSize * arrayLength) / SHARED_SIZE_LIMIT; uint threadCount = SHARED_SIZE_LIMIT / 2; if (arrayLength <= SHARED_SIZE_LIMIT) { assert(SHARED_SIZE_LIMIT % arrayLength == 0); q.submit([&](sycl::handler &cgh) { sycl::local_accessor<uint, 1> s_key_acc_ct1( sycl::range<1>(SHARED_SIZE_LIMIT), cgh); sycl::local_accessor<uint, 1> s_val_acc_ct1( sycl::range<1>(SHARED_SIZE_LIMIT), cgh); cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, blockCount) * sycl::range<3>(1, 1, threadCount), sycl::range<3>(1, 1, threadCount)), [=](sycl::nd_item<3> item_ct1) { oddEvenMergeSortShared(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, arrayLength, dir, item_ct1, s_key_acc_ct1.get_pointer(), s_val_acc_ct1.get_pointer()); }); }); } else { q.submit([&](sycl::handler &cgh) { sycl::local_accessor<uint, 1> s_key_acc_ct1( sycl::range<1>(SHARED_SIZE_LIMIT), cgh); sycl::local_accessor<uint, 1> s_val_acc_ct1( sycl::range<1>(SHARED_SIZE_LIMIT), cgh); cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, blockCount) * sycl::range<3>(1, 1, threadCount), sycl::range<3>(1, 1, threadCount)), [=](sycl::nd_item<3> item_ct1) { oddEvenMergeSortShared(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, SHARED_SIZE_LIMIT, dir, item_ct1, s_key_acc_ct1.get_pointer(), s_val_acc_ct1.get_pointer()); }); }); for (uint size = 2 * SHARED_SIZE_LIMIT; size <= arrayLength; size <<= 1) for (unsigned stride = size / 2; stride > 0; stride >>= 1) { // Unlike with bitonic sort, combining bitonic merge steps with // stride = [SHARED_SIZE_LIMIT / 2 .. 1] seems to be impossible as there // are dependencies between data elements crossing the SHARED_SIZE_LIMIT // borders q.parallel_for( sycl::nd_range<3>( sycl::range<3>(1, 1, (batchSize * arrayLength) / 512) * sycl::range<3>(1, 1, 256), sycl::range<3>(1, 1, 256)), [=](sycl::nd_item<3> item_ct1) { oddEvenMergeGlobal(d_DstKey, d_DstVal, d_DstKey, d_DstVal, arrayLength, size, stride, dir, item_ct1); }); } } return threadCount; }
cpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/Samples/2_Concepts_and_Techniques/sortingNetworks/main.cpp.dp.cpp
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * This sample implements bitonic sort and odd-even merge sort, algorithms * belonging to the class of sorting networks. * While generally subefficient on large sequences * compared to algorithms with better asymptotic algorithmic complexity * (i.e. merge sort or radix sort), may be the algorithms of choice for sorting * batches of short- or mid-sized arrays. * Refer to the excellent tutorial by H. W. Lang: * http://www.iti.fh-flensburg.de/lang/algorithmen/sortieren/networks/indexen.htm * * Victor Podlozhnyuk, 07/09/2009 */ // CUDA Runtime #include <sycl/sycl.hpp> #include <dpct/dpct.hpp> // Utilities and system includes #include <helper_cuda.h> #include <helper_timer.h> #include "sortingNetworks_common.h" //////////////////////////////////////////////////////////////////////////////// // Test driver //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) try { dpct::err0 error; printf("%s Starting...\n\n", argv[0]); sycl::queue q{sycl::default_selector_v, sycl::property::queue::in_order()}; std::cout << "\nRunning on " << q.get_device().get_info<sycl::info::device::name>() << "\n"; uint *h_InputKey, *h_InputVal, *h_OutputKeyGPU, *h_OutputValGPU; uint *d_InputKey, *d_InputVal, *d_OutputKey, *d_OutputVal; StopWatchInterface *hTimer = NULL; const uint N = 1048576; const uint DIR = 0; const uint numValues = 65536; const uint numIterations = 1; printf("Allocating and initializing host arrays...\n\n"); sdkCreateTimer(&hTimer); h_InputKey = (uint *)malloc(N * sizeof(uint)); h_InputVal = (uint *)malloc(N * sizeof(uint)); h_OutputKeyGPU = (uint *)malloc(N * sizeof(uint)); h_OutputValGPU = (uint *)malloc(N * sizeof(uint)); srand(2001); for (uint i = 0; i < N; i++) { h_InputKey[i] = rand() % numValues; h_InputVal[i] = i; } printf("Allocating and initializing CUDA arrays...\n\n"); error = DPCT_CHECK_ERROR( d_InputKey = sycl::malloc_device<uint>(N, q)); checkCudaErrors(error); error = DPCT_CHECK_ERROR( d_InputVal = sycl::malloc_device<uint>(N, q)); checkCudaErrors(error); error = DPCT_CHECK_ERROR( d_OutputKey = sycl::malloc_device<uint>(N, q)); checkCudaErrors(error); error = DPCT_CHECK_ERROR( d_OutputVal = sycl::malloc_device<uint>(N, q)); checkCudaErrors(error); error = DPCT_CHECK_ERROR(q.memcpy(d_InputKey, h_InputKey, N * sizeof(uint)) .wait()); checkCudaErrors(error); error = DPCT_CHECK_ERROR(q.memcpy(d_InputVal, h_InputVal, N * sizeof(uint)) .wait()); checkCudaErrors(error); int flag = 1; printf("Running GPU OddEven Merge sort (%u identical iterations)...\n\n", numIterations); for (uint arrayLength = 64; arrayLength <= N; arrayLength *= 2) { printf("Testing array length %u (%u arrays per batch)...\n", arrayLength, N / arrayLength); error = DPCT_CHECK_ERROR(q.wait_and_throw()); checkCudaErrors(error); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); uint threadCount = 0; for (uint i = 0; i < numIterations; i++) threadCount = oddEvenMergeSort(d_OutputKey, d_OutputVal, d_InputKey, d_InputVal, N / arrayLength, arrayLength, DIR, q); error = DPCT_CHECK_ERROR(q.wait_and_throw()); checkCudaErrors(error); sdkStopTimer(&hTimer); printf("Average time: %f ms\n\n", sdkGetTimerValue(&hTimer) / numIterations); if (arrayLength == N) { double dTimeSecs = 1.0e-3 * sdkGetTimerValue(&hTimer) / numIterations; printf( "sortingNetworks-oddevenmergesort, Throughput = %.4f MElements/s, Time = %.5f " "s, Size = %u elements, NumDevsUsed = %u, Workgroup = %u\n", (1.0e-6 * (double)arrayLength / dTimeSecs), dTimeSecs, arrayLength, 1, threadCount); } printf("\nValidating the results...\n"); printf("...reading back GPU results\n"); error = DPCT_CHECK_ERROR( q.memcpy(h_OutputKeyGPU, d_OutputKey, N * sizeof(uint)) .wait()); checkCudaErrors(error); error = DPCT_CHECK_ERROR( q.memcpy(h_OutputValGPU, d_OutputVal, N * sizeof(uint)) .wait()); checkCudaErrors(error); int keysFlag = validateSortedKeys(h_OutputKeyGPU, h_InputKey, N / arrayLength, arrayLength, numValues, DIR); int valuesFlag = validateValues(h_OutputKeyGPU, h_OutputValGPU, h_InputKey, N / arrayLength, arrayLength); flag = flag && keysFlag && valuesFlag; printf("\n"); } printf("Shutting down...\n"); sdkDeleteTimer(&hTimer); sycl::free(d_OutputVal, q); sycl::free(d_OutputKey, q); sycl::free(d_InputVal, q); sycl::free(d_InputKey, q); free(h_OutputValGPU); free(h_OutputKeyGPU); free(h_InputVal); free(h_InputKey); exit(flag ? EXIT_SUCCESS : EXIT_FAILURE); } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); }
cpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/ccl_utils.hpp
//==---- ccl_utils.hpp----------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_CCL_UTILS_HPP__ #define __DPCT_CCL_UTILS_HPP__ #include <sycl/sycl.hpp> #include <oneapi/ccl.hpp> #include <unordered_map> #include <memory> #include "device.hpp" namespace dpct { namespace ccl { namespace detail { /// Get stored kvs with specified kvs address. inline std::shared_ptr<oneapi::ccl::kvs> & get_kvs(const oneapi::ccl::kvs::address_type &addr) { struct hash { std::size_t operator()(const oneapi::ccl::kvs::address_type &in) const { return std::hash<std::string_view>()(std::string_view(in.data(), in.size())); } }; static std::unordered_map<oneapi::ccl::kvs::address_type, std::shared_ptr<oneapi::ccl::kvs>, hash> kvs_map; return kvs_map[addr]; } /// Help class to init ccl environment. class ccl_init_helper { public: ccl_init_helper() { oneapi::ccl::init(); } }; } // namespace detail /// Get concatenated library version as an integer. static inline int get_version() { oneapi::ccl::init(); auto ver = oneapi::ccl::get_library_version(); return ver.major * 10000 + ver.minor * 100 + ver.update; } /// Create main kvs and return its address. static inline oneapi::ccl::kvs::address_type create_kvs_address() { oneapi::ccl::init(); auto ptr = oneapi::ccl::create_main_kvs(); auto addr = ptr->get_address(); detail::get_kvs(addr) = ptr; return addr; } /// Get stored kvs with /p addr if exist. Otherwise, create kvs with /p addr. static inline std::shared_ptr<oneapi::ccl::kvs> create_kvs(const oneapi::ccl::kvs::address_type &addr) { oneapi::ccl::init(); auto &ptr = detail::get_kvs(addr); if (!ptr) ptr = oneapi::ccl::create_kvs(addr); return ptr; } /// dpct communicator extension class communicator_wrapper : public dpct::ccl::detail::ccl_init_helper { public: communicator_wrapper( int size, int rank, oneapi::ccl::kvs::address_type id, const oneapi::ccl::comm_attr &attr = oneapi::ccl::default_comm_attr) : _device_comm(oneapi::ccl::create_device( static_cast<sycl::device &>(dpct::get_current_device()))), _context_comm(oneapi::ccl::create_context(dpct::get_default_context())), _comm(oneapi::ccl::create_communicator( size, rank, _device_comm, _context_comm, dpct::ccl::create_kvs(id), attr)) { _queue_init = false; _ccl_stream_ptr = nullptr; } ~communicator_wrapper() { delete _ccl_stream_ptr; }; /// Return the rank in a oneapi::ccl::communicator /// \returns The rank corresponding to communicator object int rank() const { return _comm.rank(); } /// Retrieves the number of rank in oneapi::ccl::communicator /// \returns The number of the ranks int size() const { return _comm.size(); } /// Return underlying native device, which was used in oneapi::ccl::communicator sycl::device get_device() const { return _comm.get_device().get_native(); } /// \brief allreduce is a collective communication operation that performs the global reduction operation /// on values from all ranks of communicator and distributes the result back to all ranks. /// \param send_buf the buffer with @c count elements of @c dtype that stores local data to be reduced /// \param recv_buf [out] the buffer to store reduced result, must have the same dimension as @c send_buf /// \param count the number of elements of type @c dtype in @c send_buf and @c recv_buf /// \param dtype the datatype of elements in @c send_buf and @c recv_buf /// \param rtype the type of the reduction operation to be applied /// \param queue_ptr a sycl::queue ptr associated with the operation /// \return @ref void void allreduce(const void *sendbuff, void *recvbuff, size_t count, oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype, sycl::queue *queue_ptr) { call_func_wrapper( [=](const oneapi::ccl::stream &stream) { return oneapi::ccl::allreduce(sendbuff, recvbuff, count, dtype, rtype, _comm, stream); }, queue_ptr); } /// \brief reduce is a collective communication operation that performs the /// global reduction operation on values from all ranks of the communicator /// and returns the result to the root rank. /// \param send_buf the buffer with @c count elements of @c dtype that stores /// local data to be reduced /// \param recv_buf [out] the buffer to store reduced result, /// must have the same dimension as @c send_buf /// \param count the number of elements of type @c dtype in @c send_buf and @c recv_buf /// \param dtype the datatype of elements in @c send_buf and @c recv_buf /// \param root the rank that gets the result of reduction /// \param rtype the type of the reduction operation to be applied /// \param queue_ptr a sycl::queue ptr associated with the operation /// \return @ref void void reduce(const void *sendbuff, void *recvbuff, size_t count, oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype, int root, sycl::queue *queue_ptr) { call_func_wrapper( [=](const oneapi::ccl::stream &stream) { return oneapi::ccl::reduce(sendbuff, recvbuff, count, dtype, rtype, root, _comm, stream); }, queue_ptr); } /// \brief broadcast is a collective communication operation that broadcasts data /// from one rank of communicator (denoted as root) to all other ranks. /// Only support in-place operation /// \param send_buf the buffer with @c count elements of @c dtype that stores /// local data to be reduced /// \param recv_buf [out] the buffer to store reduced result /// \param count the number of elements of type @c dtype in @c buf /// \param dtype thedatatype of elements in @c buf /// \param root the rank that broadcasts @c buf /// \param queue_ptr a sycl::queue ptr associated with the operation /// \return @ref void void broadcast(void *sendbuff, void *recvbuff, size_t count, oneapi::ccl::datatype dtype, int root, sycl::queue *queue_ptr) { if (sendbuff != recvbuff) { throw std::runtime_error( "oneCCL broadcast only support in-place operation. " "send_buf and recv_buf must be same."); return; } call_func_wrapper( [=](const oneapi::ccl::stream &stream) { return oneapi::ccl::broadcast(recvbuff, count, dtype, root, _comm, stream); }, queue_ptr); } /// \brief reduce_scatter is a collective communication operation that performs the global reduction operation /// on values from all ranks of the communicator and scatters the result in blocks back to all ranks. /// \param send_buf the buffer with @c count elements of @c dtype that stores local data to be reduced /// \param recv_buf [out] the buffer to store reduced result, must have the same dimension as @c send_buf /// \param recv_count the number of elements of type @c dtype in receive block /// \param dtype the datatype of elements in @c send_buf and @c recv_buf /// \param rtype the type of the reduction operation to be applied /// \param queue_ptr a sycl::queue ptr associated with the operation /// \return @ref void void reduce_scatter(const void *sendbuff, void *recvbuff, size_t recv_count, oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype, sycl::queue *queue_ptr) { call_func_wrapper( [=](const oneapi::ccl::stream &stream) { return oneapi::ccl::reduce_scatter(sendbuff, recvbuff, recv_count, dtype, rtype, _comm, stream); }, queue_ptr); } private: oneapi::ccl::device _device_comm; oneapi::ccl::context _context_comm; oneapi::ccl::communicator _comm; sycl::queue _queue; bool _queue_init; oneapi::ccl::stream *_ccl_stream_ptr; template <class Fn> void call_func_wrapper(Fn func, sycl::queue *qptr) { if (_queue_init && *qptr != _queue) { call_func_async(func, qptr); } else { if(!_queue_init) { _queue = *qptr; _queue_init = true; _ccl_stream_ptr = new oneapi::ccl::stream(oneapi::ccl::create_stream(_queue)); } std::invoke(func, *_ccl_stream_ptr); } } class call_func_async { sycl::queue *_q_ptr; struct call_async_impl { oneapi::ccl::stream _ccl_stream_impl; oneapi::ccl::event _ccl_event_impl; template <class Fn> explicit call_async_impl(Fn func, sycl::queue *qptr) : _ccl_stream_impl(oneapi::ccl::create_stream(*qptr)), _ccl_event_impl(std::invoke(func, _ccl_stream_impl)) {} }; call_async_impl *_imp; public: template <class Fn> explicit call_func_async(Fn func, sycl::queue *qptr) : _q_ptr(qptr), _imp(new call_async_impl(func, qptr)) {} ~call_func_async() { _q_ptr->submit([&](sycl::handler &cgh) { cgh.host_task([=] { _imp->_ccl_event_impl.wait(); delete _imp; }); }); } }; }; typedef dpct::ccl::communicator_wrapper *comm_ptr; } // namespace ccl } // namespace dpct #endif // __DPCT_CCL_UTILS_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/util.hpp
//==---- util.hpp ---------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_UTIL_HPP__ #define __DPCT_UTIL_HPP__ #include <sycl/sycl.hpp> #include <complex> #include <type_traits> #include <cassert> #include <cstdint> // TODO: Remove these function definitions once they exist in the DPC++ compiler #if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER) template <typename T> __SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT __attribute__((noduplicate)) T __spirv_GroupNonUniformShuffle(__spv::Scope::Flag, T, unsigned) noexcept; template <typename T> __SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT __attribute__((noduplicate)) T __spirv_GroupNonUniformShuffleDown(__spv::Scope::Flag, T, unsigned) noexcept; template <typename T> __SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT __attribute__((noduplicate)) T __spirv_GroupNonUniformShuffleUp(__spv::Scope::Flag, T, unsigned) noexcept; #endif namespace dpct { namespace detail { template <typename tag, typename T> class generic_error_type { public: generic_error_type() = default; generic_error_type(T value) : value{value} {} operator T() const { return value; } private: T value; }; } // namespace detail using err0 = detail::generic_error_type<struct err0_tag, int>; using err1 = detail::generic_error_type<struct err1_tag, int>; template <int... Ints> struct integer_sequence {}; template <int Size, int... Ints> struct make_index_sequence : public make_index_sequence<Size - 1, Size - 1, Ints...> {}; template <int... Ints> struct make_index_sequence<0, Ints...> : public integer_sequence<Ints...> {}; template <typename T> struct DataType { using T2 = T; }; template <typename T> struct DataType<sycl::vec<T, 2>> { using T2 = std::complex<T>; }; inline void matrix_mem_copy(void *to_ptr, const void *from_ptr, int to_ld, int from_ld, int rows, int cols, int elem_size, memcpy_direction direction = automatic, sycl::queue &queue = dpct::get_default_queue(), bool async = false) { if (to_ptr == from_ptr && to_ld == from_ld) { return; } if (to_ld == from_ld) { size_t copy_size = elem_size * ((cols - 1) * (size_t)to_ld + rows); if (async) detail::dpct_memcpy(queue, (void *)to_ptr, (void *)from_ptr, copy_size, direction); else detail::dpct_memcpy(queue, (void *)to_ptr, (void *)from_ptr, copy_size, direction).wait(); } else { if (async) detail::dpct_memcpy(queue, to_ptr, from_ptr, elem_size * to_ld, elem_size * from_ld, elem_size * rows, cols, direction); else sycl::event::wait(detail::dpct_memcpy( queue, to_ptr, from_ptr, elem_size * to_ld, elem_size * from_ld, elem_size * rows, cols, direction)); } } /// Copy matrix data. The default leading dimension is column. /// \param [out] to_ptr A pointer points to the destination location. /// \param [in] from_ptr A pointer points to the source location. /// \param [in] to_ld The leading dimension the destination matrix. /// \param [in] from_ld The leading dimension the source matrix. /// \param [in] rows The number of rows of the source matrix. /// \param [in] cols The number of columns of the source matrix. /// \param [in] direction The direction of the data copy. /// \param [in] queue The queue where the routine should be executed. /// \param [in] async If this argument is true, the return of the function /// does NOT guarantee the copy is completed. template <typename T> inline void matrix_mem_copy(T *to_ptr, const T *from_ptr, int to_ld, int from_ld, int rows, int cols, memcpy_direction direction = automatic, sycl::queue &queue = dpct::get_default_queue(), bool async = false) { using Ty = typename DataType<T>::T2; matrix_mem_copy((void *)to_ptr, (void *)from_ptr, to_ld, from_ld, rows, cols, sizeof(Ty), direction, queue, async); } /// Cast the high or low 32 bits of a double to an integer. /// \param [in] d The double value. /// \param [in] use_high32 Cast the high 32 bits of the double if true; /// otherwise cast the low 32 bits. inline int cast_double_to_int(double d, bool use_high32 = true) { sycl::vec<double, 1> v0{d}; auto v1 = v0.as<sycl::int2>(); if (use_high32) return v1[1]; return v1[0]; } /// Combine two integers, the first as the high 32 bits and the second /// as the low 32 bits, into a double. /// \param [in] high32 The integer as the high 32 bits /// \param [in] low32 The integer as the low 32 bits inline double cast_ints_to_double(int high32, int low32) { sycl::int2 v0{low32, high32}; auto v1 = v0.as<sycl::vec<double, 1>>(); return v1; } /// Reverse the bit order of an unsigned integer /// \param [in] a Input unsigned integer value /// \returns Value of a with the bit order reversed template <typename T> inline T reverse_bits(T a) { static_assert(std::is_unsigned<T>::value && std::is_integral<T>::value, "unsigned integer required"); if (!a) return 0; T mask = 0; size_t count = 4 * sizeof(T); mask = ~mask >> count; while (count) { a = ((a & mask) << count) | ((a & ~mask) >> count); count = count >> 1; mask = mask ^ (mask << count); } return a; } /// \param [in] a The first value contains 4 bytes /// \param [in] b The second value contains 4 bytes /// \param [in] s The selector value, only lower 16bit used /// \returns the permutation result of 4 bytes selected in the way /// specified by \p s from \p a and \p b inline unsigned int byte_level_permute(unsigned int a, unsigned int b, unsigned int s) { unsigned int ret; ret = ((((std::uint64_t)b << 32 | a) >> (s & 0x7) * 8) & 0xff) | (((((std::uint64_t)b << 32 | a) >> ((s >> 4) & 0x7) * 8) & 0xff) << 8) | (((((std::uint64_t)b << 32 | a) >> ((s >> 8) & 0x7) * 8) & 0xff) << 16) | (((((std::uint64_t)b << 32 | a) >> ((s >> 12) & 0x7) * 8) & 0xff) << 24); return ret; } /// Find position of first least significant set bit in an integer. /// ffs(0) returns 0. /// /// \param [in] a Input integer value /// \returns The position template <typename T> inline int ffs(T a) { static_assert(std::is_integral<T>::value, "integer required"); return (sycl::ctz(a) + 1) % (sizeof(T) * 8 + 1); } /// select_from_sub_group allows work-items to obtain a copy of a value held by /// any other work-item in the sub_group. The input sub_group will be divided /// into several logical sub_groups with id range [0, \p logical_sub_group_size /// - 1]. Each work-item in logical sub_group gets value from another work-item /// whose id is \p remote_local_id. If \p remote_local_id is outside the /// logical sub_group id range, \p remote_local_id will modulo with \p /// logical_sub_group_size. The \p logical_sub_group_size must be a power of 2 /// and not exceed input sub_group size. /// \tparam T Input value type /// \param [in] g Input sub_group /// \param [in] x Input value /// \param [in] remote_local_id Input source work item id /// \param [in] logical_sub_group_size Input logical sub_group size /// \returns The result template <typename T> T select_from_sub_group(sycl::sub_group g, T x, int remote_local_id, int logical_sub_group_size = 32) { unsigned int start_index = g.get_local_linear_id() / logical_sub_group_size * logical_sub_group_size; return sycl::select_from_group( g, x, start_index + remote_local_id % logical_sub_group_size); } /// shift_sub_group_left move values held by the work-items in a sub_group /// directly to another work-item in the sub_group, by shifting values a fixed /// number of work-items to the left. The input sub_group will be divided into /// several logical sub_groups with id range [0, \p logical_sub_group_size - 1]. /// Each work-item in logical sub_group gets value from another work-item whose /// id is caller's id adds \p delta. If calculated id is outside the logical /// sub_group id range, the work-item will get value from itself. The \p /// logical_sub_group_size must be a power of 2 and not exceed input sub_group /// size. /// \tparam T Input value type /// \param [in] g Input sub_group /// \param [in] x Input value /// \param [in] delta Input delta /// \param [in] logical_sub_group_size Input logical sub_group size /// \returns The result template <typename T> T shift_sub_group_left(sycl::sub_group g, T x, unsigned int delta, int logical_sub_group_size = 32) { unsigned int id = g.get_local_linear_id(); unsigned int end_index = (id / logical_sub_group_size + 1) * logical_sub_group_size; T result = sycl::shift_group_left(g, x, delta); if ((id + delta) >= end_index) { result = x; } return result; } /// shift_sub_group_right move values held by the work-items in a sub_group /// directly to another work-item in the sub_group, by shifting values a fixed /// number of work-items to the right. The input sub_group will be divided into /// several logical_sub_groups with id range [0, \p logical_sub_group_size - 1]. /// Each work-item in logical_sub_group gets value from another work-item whose /// id is caller's id subtracts \p delta. If calculated id is outside the /// logical sub_group id range, the work-item will get value from itself. The \p /// logical_sub_group_size must be a power of 2 and not exceed input sub_group /// size. /// \tparam T Input value type /// \param [in] g Input sub_group /// \param [in] x Input value /// \param [in] delta Input delta /// \param [in] logical_sub_group_size Input logical sub_group size /// \returns The result template <typename T> T shift_sub_group_right(sycl::sub_group g, T x, unsigned int delta, int logical_sub_group_size = 32) { unsigned int id = g.get_local_linear_id(); unsigned int start_index = id / logical_sub_group_size * logical_sub_group_size; T result = sycl::shift_group_right(g, x, delta); if ((id - start_index) < delta) { result = x; } return result; } /// permute_sub_group_by_xor permutes values by exchanging values held by pairs /// of work-items identified by computing the bitwise exclusive OR of the /// work-item id and some fixed mask. The input sub_group will be divided into /// several logical sub_groups with id range [0, \p logical_sub_group_size - 1]. /// Each work-item in logical sub_group gets value from another work-item whose /// id is bitwise exclusive OR of the caller's id and \p mask. If calculated id /// is outside the logical sub_group id range, the work-item will get value from /// itself. The \p logical_sub_group_size must be a power of 2 and not exceed /// input sub_group size. /// \tparam T Input value type /// \param [in] g Input sub_group /// \param [in] x Input value /// \param [in] mask Input mask /// \param [in] logical_sub_group_size Input logical sub_group size /// \returns The result template <typename T> T permute_sub_group_by_xor(sycl::sub_group g, T x, unsigned int mask, int logical_sub_group_size = 32) { unsigned int id = g.get_local_linear_id(); unsigned int start_index = id / logical_sub_group_size * logical_sub_group_size; unsigned int target_offset = (id % logical_sub_group_size) ^ mask; return sycl::select_from_group(g, x, target_offset < logical_sub_group_size ? start_index + target_offset : id); } namespace experimental { /// Masked version of select_from_sub_group, which execute masked sub-group /// operation. The parameter member_mask indicating the work-items participating /// the call. Whether the n-th bit is set to 1 representing whether the /// work-item with id n is participating the call. All work-items named in /// member_mask must be executed with the same member_mask, or the result is /// undefined. /// \tparam T Input value type /// \param [in] member_mask Input mask /// \param [in] g Input sub_group /// \param [in] x Input value /// \param [in] remote_local_id Input source work item id /// \param [in] logical_sub_group_size Input logical sub_group size /// \returns The result template <typename T> T select_from_sub_group(unsigned int member_mask, sycl::sub_group g, T x, int remote_local_id, int logical_sub_group_size = 32) { unsigned int start_index = g.get_local_linear_id() / logical_sub_group_size * logical_sub_group_size; unsigned logical_remote_id = start_index + remote_local_id % logical_sub_group_size; #if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER) #if defined(__SPIR__) return __spirv_GroupNonUniformShuffle(__spv::Scope::Subgroup, x, logical_remote_id); #else throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group " "only supports SPIR-V backends."); #endif // __SPIR__ #else (void)g; (void)x; (void)remote_local_id; (void)logical_sub_group_size; (void)member_mask; throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not " "supported on host device and none intel compiler."); #endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER } /// Masked version of shift_sub_group_left, which execute masked sub-group /// operation. The parameter member_mask indicating the work-items participating /// the call. Whether the n-th bit is set to 1 representing whether the /// work-item with id n is participating the call. All work-items named in /// member_mask must be executed with the same member_mask, or the result is /// undefined. /// \tparam T Input value type /// \param [in] member_mask Input mask /// \param [in] g Input sub_group /// \param [in] x Input value /// \param [in] delta Input delta /// \param [in] logical_sub_group_size Input logical sub_group size /// \returns The result template <typename T> T shift_sub_group_left(unsigned int member_mask, sycl::sub_group g, T x, unsigned int delta, int logical_sub_group_size = 32) { unsigned int id = g.get_local_linear_id(); unsigned int end_index = (id / logical_sub_group_size + 1) * logical_sub_group_size; #if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER) #if defined(__SPIR__) T result = __spirv_GroupNonUniformShuffleDown(__spv::Scope::Subgroup, x, delta); if ((id + delta) >= end_index) { result = x; } return result; #else throw sycl::exception(sycl::errc::runtime, "Masked version of shift_sub_group_left " "only supports SPIR-V backends."); #endif // __SPIR__ #else (void)g; (void)x; (void)delta; (void)logical_sub_group_size; (void)member_mask; throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not " "supported on host device and none intel compiler."); #endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER } /// Masked version of shift_sub_group_right, which execute masked sub-group /// operation. The parameter member_mask indicating the work-items participating /// the call. Whether the n-th bit is set to 1 representing whether the /// work-item with id n is participating the call. All work-items named in /// member_mask must be executed with the same member_mask, or the result is /// undefined. /// \tparam T Input value type /// \param [in] member_mask Input mask /// \param [in] g Input sub_group /// \param [in] x Input value /// \param [in] delta Input delta /// \param [in] logical_sub_group_size Input logical sub_group size /// \returns The result template <typename T> T shift_sub_group_right(unsigned int member_mask, sycl::sub_group g, T x, unsigned int delta, int logical_sub_group_size = 32) { unsigned int id = g.get_local_linear_id(); unsigned int start_index = id / logical_sub_group_size * logical_sub_group_size; #if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER) #if defined(__SPIR__) T result = __spirv_GroupNonUniformShuffleUp(__spv::Scope::Subgroup, x, delta); if ((id - start_index) < delta) { result = x; } return result; #else throw sycl::exception(sycl::errc::runtime, "Masked version of shift_sub_group_right " "only supports SPIR-V backends."); #endif // __SPIR__ #else (void)g; (void)x; (void)delta; (void)logical_sub_group_size; (void)member_mask; throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not " "supported on host device and none intel compiler."); #endif // __SYCL_DEVICE_ONLY && __INTEL_LLVM_COMPILER } /// Masked version of permute_sub_group_by_xor, which execute masked sub-group /// operation. The parameter member_mask indicating the work-items participating /// the call. Whether the n-th bit is set to 1 representing whether the /// work-item with id n is participating the call. All work-items named in /// member_mask must be executed with the same member_mask, or the result is /// undefined. /// \tparam T Input value type /// \param [in] member_mask Input mask /// \param [in] g Input sub_group /// \param [in] x Input value /// \param [in] mask Input mask /// \param [in] logical_sub_group_size Input logical sub_group size /// \returns The result template <typename T> T permute_sub_group_by_xor(unsigned int member_mask, sycl::sub_group g, T x, unsigned int mask, int logical_sub_group_size = 32) { unsigned int id = g.get_local_linear_id(); unsigned int start_index = id / logical_sub_group_size * logical_sub_group_size; unsigned int target_offset = (id % logical_sub_group_size) ^ mask; unsigned logical_remote_id = (target_offset < logical_sub_group_size) ? start_index + target_offset : id; #if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER) #if defined(__SPIR__) return __spirv_GroupNonUniformShuffle(__spv::Scope::Subgroup, x, logical_remote_id); #else throw sycl::exception(sycl::errc::runtime, "Masked version of permute_sub_group_by_xor " "only supports SPIR-V backends."); #endif // __SPIR__ #else (void)g; (void)x; (void)mask; (void)logical_sub_group_size; (void)member_mask; throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not " "supported on host device and none intel compiler."); #endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER } } // namespace experimental /// Computes the multiplication of two complex numbers. /// \tparam T Complex element type /// \param [in] x The first input complex number /// \param [in] y The second input complex number /// \returns The result template <typename T> sycl::vec<T, 2> cmul(sycl::vec<T, 2> x, sycl::vec<T, 2> y) { std::complex<T> t1(x[0], x[1]), t2(y[0], y[1]); t1 = t1 * t2; return sycl::vec<T, 2>(t1.real(), t1.imag()); } /// Computes the division of two complex numbers. /// \tparam T Complex element type /// \param [in] x The first input complex number /// \param [in] y The second input complex number /// \returns The result template <typename T> sycl::vec<T, 2> cdiv(sycl::vec<T, 2> x, sycl::vec<T, 2> y) { std::complex<T> t1(x[0], x[1]), t2(y[0], y[1]); t1 = t1 / t2; return sycl::vec<T, 2>(t1.real(), t1.imag()); } /// Computes the magnitude of a complex number. /// \tparam T Complex element type /// \param [in] x The input complex number /// \returns The result template <typename T> T cabs(sycl::vec<T, 2> x) { std::complex<T> t(x[0], x[1]); return std::abs(t); } /// Computes the complex conjugate of a complex number. /// \tparam T Complex element type /// \param [in] x The input complex number /// \returns The result template <typename T> sycl::vec<T, 2> conj(sycl::vec<T, 2> x) { std::complex<T> t(x[0], x[1]); t = std::conj(t); return sycl::vec<T, 2>(t.real(), t.imag()); } inline int get_sycl_language_version() { #ifdef SYCL_LANGUAGE_VERSION return SYCL_LANGUAGE_VERSION; #else return 202000; #endif } namespace experimental { /// Synchronize work items from all work groups within a SYCL kernel. /// \param [in] item: Represents a work group. /// \param [in] counter: An atomic object defined on a device memory which can /// be accessed by work items in all work groups. The initial value of the /// counter should be zero. /// Note: Please make sure that all the work items of all work groups within /// a SYCL kernel can be scheduled actively at the same time on a device. template <int dimensions = 3> inline void nd_range_barrier(const sycl::nd_item<dimensions> &item, sycl::atomic_ref< unsigned int, sycl::memory_order::seq_cst, sycl::memory_scope::device, sycl::access::address_space::global_space> &counter) { static_assert(dimensions == 3, "dimensions must be 3."); unsigned int num_groups = item.get_group_range(2) * item.get_group_range(1) * item.get_group_range(0); item.barrier(); if (item.get_local_linear_id() == 0) { unsigned int inc = 1; unsigned int old_arrive = 0; bool is_group0 = (item.get_group(2) + item.get_group(1) + item.get_group(0) == 0); if (is_group0) { inc = 0x80000000 - (num_groups - 1); } old_arrive = counter.fetch_add(inc); // Synchronize all the work groups while (((old_arrive ^ counter.load()) & 0x80000000) == 0) ; } item.barrier(); } /// Synchronize work items from all work groups within a SYCL kernel. /// \param [in] item: Represents a work group. /// \param [in] counter: An atomic object defined on a device memory which can /// be accessed by work items in all work groups. The initial value of the /// counter should be zero. /// Note: Please make sure that all the work items of all work groups within /// a SYCL kernel can be scheduled actively at the same time on a device. template <> inline void nd_range_barrier(const sycl::nd_item<1> &item, sycl::atomic_ref< unsigned int, sycl::memory_order::seq_cst, sycl::memory_scope::device, sycl::access::address_space::global_space> &counter) { unsigned int num_groups = item.get_group_range(0); item.barrier(); if (item.get_local_linear_id() == 0) { unsigned int inc = 1; unsigned int old_arrive = 0; bool is_group0 = (item.get_group(0) == 0); if (is_group0) { inc = 0x80000000 - (num_groups - 1); } old_arrive = counter.fetch_add(inc); // Synchronize all the work groups while (((old_arrive ^ counter.load()) & 0x80000000) == 0) ; } item.barrier(); } /// The logical-group is a logical collection of some work-items within a /// work-group. /// Note: Please make sure that the logical-group size is a power of 2 in the /// range [1, current_sub_group_size]. class logical_group { sycl::nd_item<3> _item; sycl::group<3> _g; uint32_t _logical_group_size; uint32_t _group_linear_range_in_parent; public: /// Dividing \p parent_group into several logical-groups. /// \param [in] item Current work-item. /// \param [in] parent_group The group to be divided. /// \param [in] size The logical-group size. logical_group(sycl::nd_item<3> item, sycl::group<3> parent_group, uint32_t size) : _item(item), _g(parent_group), _logical_group_size(size) { _group_linear_range_in_parent = (_g.get_local_linear_range() - 1) / _logical_group_size + 1; } /// Returns the index of the work-item within the logical-group. uint32_t get_local_linear_id() const { return _item.get_local_linear_id() % _logical_group_size; } /// Returns the index of the logical-group in the parent group. uint32_t get_group_linear_id() const { return _item.get_local_linear_id() / _logical_group_size; } /// Returns the number of work-items in the logical-group. uint32_t get_local_linear_range() const { if (_g.get_local_linear_range() % _logical_group_size == 0) { return _logical_group_size; } uint32_t last_item_group_id = _g.get_local_linear_range() / _logical_group_size; uint32_t first_of_last_group = last_item_group_id * _logical_group_size; if (_item.get_local_linear_id() >= first_of_last_group) { return _g.get_local_linear_range() - first_of_last_group; } else { return _logical_group_size; } } /// Returns the number of logical-group in the parent group. uint32_t get_group_linear_range() const { return _group_linear_range_in_parent; } }; // The original source of the function calculate_max_active_wg_per_xecore was // under the license below: // // Copyright Intel Corporation // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // /// This function is used for occupancy calculation, it computes the max active /// work-group number per Xe-Core. Ref to /// https://github.com/oneapi-src/oneAPI-samples/tree/master/Tools/GPU-Occupancy-Calculator /// \param [out] num_wg Active work-group number. /// \param [in] wg_size Work-group size. /// \param [in] slm_size Share local memory size. /// \param [in] sg_size Sub-group size. /// \param [in] used_barrier Whether barrier is used. /// \param [in] used_large_grf Whether large General Register File is used. /// \return If no error, returns 0. /// If \p wg_size exceeds the max work-group size, the max work-group size will /// be used instead of \p wg_size and returns -1. inline int calculate_max_active_wg_per_xecore(int *num_wg, int wg_size, int slm_size = 0, int sg_size = 32, bool used_barrier = false, bool used_large_grf = false) { int ret = 0; const int slm_size_per_xe_core = 64 * 1024; const int max_barrier_registers = 32; dpct::device_ext &dev = dpct::get_current_device(); size_t max_wg_size = dev.get_info<sycl::info::device::max_work_group_size>(); if (wg_size > max_wg_size) { wg_size = max_wg_size; ret = -1; } int num_threads_ss = 56; int max_num_wg = 56; if (dev.has(sycl::aspect::ext_intel_gpu_eu_count_per_subslice) && dev.has(sycl::aspect::ext_intel_gpu_hw_threads_per_eu)) { auto eu_count = dev.get_info<sycl::info::device::ext_intel_gpu_eu_count_per_subslice>(); auto threads_count = dev.get_info<sycl::ext::intel::info::device::gpu_hw_threads_per_eu>(); num_threads_ss = eu_count * threads_count; max_num_wg = eu_count * threads_count; } if (used_barrier) { max_num_wg = max_barrier_registers; } // Calculate num_wg_slm int num_wg_slm = 0; if (slm_size == 0) { num_wg_slm = max_num_wg; } else { num_wg_slm = std::floor((float)slm_size_per_xe_core / slm_size); } // Calculate num_wg_threads if (used_large_grf) num_threads_ss = num_threads_ss / 2; int num_threads = std::ceil((float)wg_size / sg_size); int num_wg_threads = std::floor((float)num_threads_ss / num_threads); // Calculate num_wg *num_wg = std::min(num_wg_slm, num_wg_threads); *num_wg = std::min(*num_wg, max_num_wg); return ret; } } // namespace experimental /// If x <= 2, then return a pointer to the deafult queue; /// otherwise, return x reinterpreted as a dpct::queue_ptr. inline queue_ptr int_as_queue_ptr(uintptr_t x) { return x <= 2 ? &get_default_queue() : reinterpret_cast<queue_ptr>(x); } template <int n_nondefault_params, int n_default_params, typename T> class args_selector; /// args_selector is a helper class for extracting arguments from an /// array of pointers to arguments or buffer of arguments to pass to a /// kernel function. /// /// \param R(Ts...) The type of the kernel /// \param n_nondefault_params The number of nondefault parameters of the kernel /// (excluding parameters that like sycl::nd_item, etc.) /// \param n_default_params The number of default parameters of the kernel /// /// Example usage: /// With the following kernel: /// void foo(sycl::float2 *x, int n, sycl::nd_item<3> item_ct1, float f=.1) {} /// and with the declaration: /// args_selector<2, 1, decltype(foo)> selector(kernelParams, extra); /// we have: /// selector.get<0>() returns a reference to sycl::float*, /// selector.get<1>() returns a reference to int, /// selector.get<2>() returns a reference to float template <int n_nondefault_params, int n_default_params, typename R, typename... Ts> class args_selector<n_nondefault_params, n_default_params, R(Ts...)> { private: void **kernel_params; char *args_buffer; template <int i> static constexpr int account_for_default_params() { constexpr int n_total_params = sizeof...(Ts); if constexpr (i >= n_nondefault_params) { return n_total_params - n_default_params + (i - n_nondefault_params); } else { return i; } } public: /// Get the type of the ith argument of R(Ts...) /// \param [in] i Index of parameter to get /// \returns Type of ith parameter template <int i> using arg_type = std::tuple_element_t<account_for_default_params<i>(), std::tuple<Ts...>>; private: template <int i> static constexpr int get_offset() { if constexpr (i == 0) { // we can assume args_buffer is properly aligned to the // first argument return 0; } else { constexpr int prev_off = get_offset<i-1>(); constexpr int prev_past_end = prev_off + sizeof(arg_type<i-1>); using T = arg_type<i>; // is the past-the-end of the i-1st element properly aligned // with the ith element's alignment? if constexpr (prev_past_end % alignof(T) == 0) { return prev_past_end; } // otherwise bump prev_past_end to match alignment else { return prev_past_end + (alignof(T) - (prev_past_end % alignof(T))); } } } static char *get_args_buffer(void **extra) { if (!extra) return nullptr; for (; (std::size_t) *extra != 0; ++extra) { if ((std::size_t) *extra == 1) { return static_cast<char*>(*(extra+1)); } } return nullptr; } public: /// If kernel_params is nonnull, then args_selector will /// extract arguments from kernel_params. Otherwise, it /// will extract them from extra. /// \param [in] kernel_params Array of pointers to arguments /// a or null pointer. /// \param [in] extra Array containing pointer to argument buffer. args_selector(void **kernel_params, void **extra) : kernel_params(kernel_params), args_buffer(get_args_buffer(extra)) {} /// Get a reference to the ith argument extracted from kernel_params /// or extra. /// \param [in] i Index of argument to get /// \returns Reference to the ith argument template <int i> arg_type<i> &get() { if (kernel_params) { return *static_cast<arg_type<i>*>(kernel_params[i]); } else { return *reinterpret_cast<arg_type<i>*>(args_buffer + get_offset<i>()); } } }; #ifdef _WIN32 #define DPCT_EXPORT __declspec(dllexport) #else #define DPCT_EXPORT #endif } // namespace dpct #endif // __DPCT_UTIL_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/image.hpp
//==---- image.hpp --------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_IMAGE_HPP__ #define __DPCT_IMAGE_HPP__ #include <sycl/sycl.hpp> #include "memory.hpp" #include "util.hpp" namespace dpct { enum class image_channel_data_type { signed_int, unsigned_int, fp, }; class image_channel; class image_wrapper_base; namespace detail { /// Image object type traits, with accessor type and sampled data type defined. /// The data type of an image accessor must be one of sycl::int4, sycl::uint4, /// sycl::float4 and sycl::half4. The data type of accessors with 8bits/16bits /// channel width will be 32 bits. sycl::half is an exception. template <class T> struct image_trait { using acc_data_t = sycl::vec<T, 4>; template <int dimensions> using accessor_t = sycl::accessor<acc_data_t, dimensions, sycl::access_mode::read, sycl::access::target::image>; template <int dimensions> using array_accessor_t = sycl::accessor<acc_data_t, dimensions, sycl::access_mode::read, sycl::access::target::image_array>; using data_t = T; using elem_t = T; static constexpr image_channel_data_type data_type = std::is_integral<T>::value ? (std::is_signed<T>::value ? image_channel_data_type::signed_int : image_channel_data_type::unsigned_int) : image_channel_data_type::fp; static constexpr int channel_num = 1; }; template <> struct image_trait<std::uint8_t> : public image_trait<std::uint32_t> { using data_t = std::uint8_t; using elem_t = data_t; }; template <> struct image_trait<std::uint16_t> : public image_trait<std::uint32_t> { using data_t = std::uint16_t; using elem_t = data_t; }; template <> struct image_trait<std::int8_t> : public image_trait<std::int32_t> { using data_t = std::int8_t; using elem_t = data_t; }; template <> struct image_trait<std::int16_t> : public image_trait<std::int32_t> { using data_t = std::int16_t; using elem_t = data_t; }; template <> struct image_trait<char> : public image_trait<typename std::conditional< std::is_signed<char>::value, signed char, unsigned char>::type> {}; template <class T> struct image_trait<sycl::vec<T, 1>> : public image_trait<T> {}; template <class T> struct image_trait<sycl::vec<T, 2>> : public image_trait<T> { using data_t = sycl::vec<T, 2>; static constexpr int channel_num = 2; }; template <class T> struct image_trait<sycl::vec<T, 3>> : public image_trait<sycl::vec<T, 4>> { static constexpr int channel_num = 3; }; template <class T> struct image_trait<sycl::vec<T, 4>> : public image_trait<T> { using data_t = sycl::vec<T, 4>; static constexpr int channel_num = 4; }; /// Functor to fetch data from read result of an image accessor. template <class T> struct fetch_data { using return_t = typename image_trait<T>::data_t; using acc_data_t = typename image_trait<T>::acc_data_t; return_t operator()(acc_data_t &&original_data) { return (return_t)original_data.r(); } }; template <class T> struct fetch_data<sycl::vec<T, 1>> : public fetch_data<T> {}; template <class T> struct fetch_data<sycl::vec<T, 2>> { using return_t = typename image_trait<sycl::vec<T, 2>>::data_t; using acc_data_t = typename image_trait<sycl::vec<T, 2>>::acc_data_t; return_t operator()(acc_data_t &&origin_data) { return return_t(origin_data.r(), origin_data.g()); } }; template <class T> struct fetch_data<sycl::vec<T, 3>> : public fetch_data<sycl::vec<T, 4>> {}; template <class T> struct fetch_data<sycl::vec<T, 4>> { using return_t = typename image_trait<sycl::vec<T, 4>>::data_t; using acc_data_t = typename image_trait<sycl::vec<T, 4>>::acc_data_t; return_t operator()(acc_data_t &&origin_data) { return return_t(origin_data.r(), origin_data.g(), origin_data.b(), origin_data.a()); } }; /// Create image according with given type \p T and \p dims. template <class T> static image_wrapper_base *create_image_wrapper(int dims); /// Create image with given data type \p T, channel order and dims template <class T> static image_wrapper_base *create_image_wrapper(unsigned channel_num, int dims); /// Create image with channel info and specified dimensions. static image_wrapper_base *create_image_wrapper(image_channel channel, int dims); } // namespace detail /// Image channel info, include channel number, order, data width and type class image_channel { image_channel_data_type _type = image_channel_data_type::signed_int; /// Number of channels. unsigned _channel_num = 0; /// Total size of all channels in bytes. unsigned _total_size = 0; /// Size of each channel in bytes. unsigned _channel_size = 0; public: /// Create image channel info according to template argument \p T. template <class T> static image_channel create() { image_channel channel; channel.set_channel_size(detail::image_trait<T>::channel_num, sizeof(typename detail::image_trait<T>::elem_t) * 8); channel.set_channel_data_type(detail::image_trait<T>::data_type); return channel; } image_channel() = default; image_channel_data_type get_channel_data_type() { return _type; } void set_channel_data_type(image_channel_data_type type) { _type = type; } unsigned get_total_size() { return _total_size; } unsigned get_channel_num() { return _channel_num; } void set_channel_num(unsigned channel_num) { _channel_num = channel_num; _total_size = _channel_size * _channel_num; } /// image_channel constructor. /// \param r Channel r width in bits. /// \param g Channel g width in bits. Should be same with \p r, or zero. /// \param b Channel b width in bits. Should be same with \p g, or zero. /// \param a Channel a width in bits. Should be same with \p b, or zero. /// \param data_type Image channel data type: signed_nt, unsigned_int or fp. image_channel(int r, int g, int b, int a, image_channel_data_type data_type) { _type = data_type; if (a) { assert(r == a && "SYCL doesn't support different channel size"); assert(r == b && "SYCL doesn't support different channel size"); assert(r == g && "SYCL doesn't support different channel size"); set_channel_size(4, a); } else if (b) { assert(r == b && "SYCL doesn't support different channel size"); assert(r == g && "SYCL doesn't support different channel size"); set_channel_size(3, b); } else if (g) { assert(r == g && "SYCL doesn't support different channel size"); set_channel_size(2, g); } else { set_channel_size(1, r); } } sycl::image_channel_type get_channel_type() const { if (_channel_size == 4) { if (_type == image_channel_data_type::signed_int) return sycl::image_channel_type::signed_int32; else if (_type == image_channel_data_type::unsigned_int) return sycl::image_channel_type::unsigned_int32; else if (_type == image_channel_data_type::fp) return sycl::image_channel_type::fp32; } else if (_channel_size == 2) { if (_type == image_channel_data_type::signed_int) return sycl::image_channel_type::signed_int16; else if (_type == image_channel_data_type::unsigned_int) return sycl::image_channel_type::unsigned_int16; else if (_type == image_channel_data_type::fp) return sycl::image_channel_type::fp16; } else { if (_type == image_channel_data_type::signed_int) return sycl::image_channel_type::signed_int8; else if (_type == image_channel_data_type::unsigned_int) return sycl::image_channel_type::unsigned_int8; } assert(false && "unexpected channel data kind and channel size"); return sycl::image_channel_type::signed_int32; } void set_channel_type(sycl::image_channel_type type) { switch (type) { case sycl::image_channel_type::unsigned_int8: _type = image_channel_data_type::unsigned_int; _channel_size = 1; break; case sycl::image_channel_type::unsigned_int16: _type = image_channel_data_type::unsigned_int; _channel_size = 2; break; case sycl::image_channel_type::unsigned_int32: _type = image_channel_data_type::unsigned_int; _channel_size = 4; break; case sycl::image_channel_type::signed_int8: _type = image_channel_data_type::signed_int; _channel_size = 1; break; case sycl::image_channel_type::signed_int16: _type = image_channel_data_type::signed_int; _channel_size = 2; break; case sycl::image_channel_type::signed_int32: _type = image_channel_data_type::signed_int; _channel_size = 4; break; case sycl::image_channel_type::fp16: _type = image_channel_data_type::fp; _channel_size = 2; break; case sycl::image_channel_type::fp32: _type = image_channel_data_type::fp; _channel_size = 4; break; default: break; } _total_size = _channel_size * _channel_num; } sycl::image_channel_order get_channel_order() const { switch (_channel_num) { case 1: return sycl::image_channel_order::r; case 2: return sycl::image_channel_order::rg; case 3: return sycl::image_channel_order::rgb; case 4: return sycl::image_channel_order::rgba; default: return sycl::image_channel_order::r; } } /// Get the size for each channel in bits. unsigned get_channel_size() const { return _channel_size * 8; } /// Set channel size. /// \param in_channel_num Channels number to set. /// \param channel_size Size for each channel in bits. void set_channel_size(unsigned in_channel_num, unsigned channel_size) { if (in_channel_num < _channel_num) return; _channel_num = in_channel_num; _channel_size = channel_size / 8; _total_size = _channel_size * _channel_num; } }; /// 2D or 3D matrix data for image. class image_matrix { image_channel _channel; int _range[3] = {1, 1, 1}; int _dims = 0; void *_host_data = nullptr; /// Set range of each dimension. template <int dimensions> void set_range(sycl::range<dimensions> range) { for (int i = 0; i < dimensions; ++i) _range[i] = range[i]; _dims = dimensions; } template <int... DimIdx> sycl::range<sizeof...(DimIdx)> get_range(integer_sequence<DimIdx...>) { return sycl::range<sizeof...(DimIdx)>(_range[DimIdx]...); } public: /// Constructor with channel info and dimension size info. template <int dimensions> image_matrix(image_channel channel, sycl::range<dimensions> range) : _channel(channel) { set_range(range); _host_data = std::malloc(range.size() * _channel.get_total_size()); } image_matrix(sycl::image_channel_type channel_type, unsigned channel_num, size_t x, size_t y) { _channel.set_channel_type(channel_type); _channel.set_channel_num(channel_num); _dims = 1; _range[0] = x; if (y) { _dims = 2; _range[1] = y; } _host_data = std::malloc(_range[0] * _range[1] * _channel.get_total_size()); } /// Construct a new image class with the matrix data. template <int dimensions> sycl::image<dimensions> *create_image() { return create_image<dimensions>(_channel); } /// Construct a new image class with the matrix data. template <int dimensions> sycl::image<dimensions> *create_image(image_channel channel) { return new sycl::image<dimensions>( _host_data, channel.get_channel_order(), channel.get_channel_type(), get_range(make_index_sequence<dimensions>()), sycl::property::image::use_host_ptr()); } /// Get channel info. inline image_channel get_channel() { return _channel; } /// Get range of the image. sycl::range<3> get_range() { return sycl::range<3>(_range[0], _range[1], _range[2]); } /// Get matrix dims. inline int get_dims() { return _dims; } /// Convert to pitched data. pitched_data to_pitched_data() { return pitched_data(_host_data, _range[0], _range[0], _range[1]); } ~image_matrix() { if (_host_data) std::free(_host_data); _host_data = nullptr; } }; using image_matrix_p = image_matrix *; enum class image_data_type { matrix, linear, pitch, unsupport }; /// Image data info. class image_data { public: image_data() { _type = image_data_type::unsupport; } image_data(image_matrix_p matrix_data) { set_data(matrix_data); } image_data(void *data_ptr, size_t x_size, image_channel channel) { set_data(data_ptr, x_size, channel); } image_data(void *data_ptr, size_t x_size, size_t y_size, size_t pitch_size, image_channel channel) { set_data(data_ptr, x_size, y_size, pitch_size, channel); } void set_data(image_matrix_p matrix_data) { _type = image_data_type::matrix; _data = matrix_data; _channel = matrix_data->get_channel(); } void set_data(void *data_ptr, size_t x_size, image_channel channel) { _type = image_data_type::linear; _data = data_ptr; _x = x_size; _channel = channel; } void set_data(void *data_ptr, size_t x_size, size_t y_size, size_t pitch_size, image_channel channel) { _type = image_data_type::pitch; _data = data_ptr; _x = x_size; _y = y_size; _pitch = pitch_size; _channel = channel; } image_data_type get_data_type() const { return _type; } void set_data_type(image_data_type type) { _type = type; } void *get_data_ptr() const { return _data; } void set_data_ptr(void *data) { _data = data; } size_t get_x() const { return _x; } void set_x(size_t x) { _x = x; } size_t get_y() const { return _y; } void set_y(size_t y) { _y = y; } size_t get_pitch() const { return _pitch; } void set_pitch(size_t pitch) { _pitch = pitch; } image_channel get_channel() const { return _channel; } void set_channel(image_channel channel) { _channel = channel; } image_channel_data_type get_channel_data_type() { return _channel.get_channel_data_type(); } void set_channel_data_type(image_channel_data_type type) { _channel.set_channel_data_type(type); } unsigned get_channel_size() { return _channel.get_channel_size(); } void set_channel_size(unsigned channel_num, unsigned channel_size) { return _channel.set_channel_size(channel_num, channel_size); } unsigned get_channel_num() { return _channel.get_channel_num(); } void set_channel_num(unsigned num) { return _channel.set_channel_num(num); } sycl::image_channel_type get_channel_type() { return _channel.get_channel_type(); } void set_channel_type(sycl::image_channel_type type) { return _channel.set_channel_type(type); } private: image_data_type _type; void *_data = nullptr; size_t _x, _y, _pitch; image_channel _channel; }; /// Image sampling info, include addressing mode, filtering mode and /// normalization info. class sampling_info { sycl::addressing_mode _addressing_mode = sycl::addressing_mode::clamp_to_edge; sycl::filtering_mode _filtering_mode = sycl::filtering_mode::nearest; sycl::coordinate_normalization_mode _coordinate_normalization_mode = sycl::coordinate_normalization_mode::unnormalized; public: sycl::addressing_mode get_addressing_mode() { return _addressing_mode; } void set(sycl::addressing_mode addressing_mode) { _addressing_mode = addressing_mode; } sycl::filtering_mode get_filtering_mode() { return _filtering_mode; } void set(sycl::filtering_mode filtering_mode) { _filtering_mode = filtering_mode; } sycl::coordinate_normalization_mode get_coordinate_normalization_mode() { return _coordinate_normalization_mode; } void set(sycl::coordinate_normalization_mode coordinate_normalization_mode) { _coordinate_normalization_mode = coordinate_normalization_mode; } bool is_coordinate_normalized() { return _coordinate_normalization_mode == sycl::coordinate_normalization_mode::normalized; } void set_coordinate_normalization_mode(int is_normalized) { _coordinate_normalization_mode = is_normalized ? sycl::coordinate_normalization_mode::normalized : sycl::coordinate_normalization_mode::unnormalized; } void set(sycl::addressing_mode addressing_mode, sycl::filtering_mode filtering_mode, sycl::coordinate_normalization_mode coordinate_normalization_mode) { set(addressing_mode); set(filtering_mode); set(coordinate_normalization_mode); } void set(sycl::addressing_mode addressing_mode, sycl::filtering_mode filtering_mode, int is_normalized) { set(addressing_mode); set(filtering_mode); set_coordinate_normalization_mode(is_normalized); } sycl::sampler get_sampler() { return sycl::sampler(_coordinate_normalization_mode, _addressing_mode, _filtering_mode); } }; /// Image base class. class image_wrapper_base { sampling_info _sampling_info; image_data _data; public: virtual ~image_wrapper_base() = 0; void attach(image_data data) { set_data(data); } /// Attach matrix data to this class. void attach(image_matrix *matrix) { detach(); image_wrapper_base::set_data(image_data(matrix)); } /// Attach matrix data to this class. void attach(image_matrix *matrix, image_channel channel) { attach(matrix); image_wrapper_base::set_channel(channel); } /// Attach linear data to this class. void attach(const void *ptr, size_t count) { attach(ptr, count, get_channel()); } /// Attach linear data to this class. void attach(const void *ptr, size_t count, image_channel channel) { detach(); image_wrapper_base::set_data(image_data(const_cast<void *>(ptr), count, channel)); } /// Attach 2D data to this class. void attach(const void *data, size_t x, size_t y, size_t pitch) { attach(data, x, y, pitch, get_channel()); } /// Attach 2D data to this class. void attach(const void *data, size_t x, size_t y, size_t pitch, image_channel channel) { detach(); image_wrapper_base::set_data( image_data(const_cast<void *>(data), x, y, pitch, channel)); } /// Detach data. virtual void detach() {} sampling_info get_sampling_info() { return _sampling_info; } void set_sampling_info(sampling_info info) { _sampling_info = info; } const image_data &get_data() { return _data; } void set_data(image_data data) { _data = data; } image_channel get_channel() { return _data.get_channel(); } void set_channel(image_channel channel) { _data.set_channel(channel); } image_channel_data_type get_channel_data_type() { return _data.get_channel_data_type(); } void set_channel_data_type(image_channel_data_type type) { _data.set_channel_data_type(type); } unsigned get_channel_size() { return _data.get_channel_size(); } void set_channel_size(unsigned channel_num, unsigned channel_size) { return _data.set_channel_size(channel_num, channel_size); } sycl::addressing_mode get_addressing_mode() { return _sampling_info.get_addressing_mode(); } void set(sycl::addressing_mode addressing_mode) { _sampling_info.set(addressing_mode); } sycl::filtering_mode get_filtering_mode() { return _sampling_info.get_filtering_mode(); } void set(sycl::filtering_mode filtering_mode) { _sampling_info.set(filtering_mode); } sycl::coordinate_normalization_mode get_coordinate_normalization_mode() { return _sampling_info.get_coordinate_normalization_mode(); } void set(sycl::coordinate_normalization_mode coordinate_normalization_mode) { _sampling_info.set(coordinate_normalization_mode); } bool is_coordinate_normalized() { return _sampling_info.is_coordinate_normalized(); } void set_coordinate_normalization_mode(int is_normalized) { _sampling_info.set_coordinate_normalization_mode(is_normalized); } void set(sycl::addressing_mode addressing_mode, sycl::filtering_mode filtering_mode, sycl::coordinate_normalization_mode coordinate_normalization_mode) { set(addressing_mode); set(filtering_mode); set(coordinate_normalization_mode); } void set(sycl::addressing_mode addressing_mode, sycl::filtering_mode filtering_mode, int is_normalized) { set(addressing_mode); set(filtering_mode); set_coordinate_normalization_mode(is_normalized); } unsigned get_channel_num() { return _data.get_channel_num(); } void set_channel_num(unsigned num) { return _data.set_channel_num(num); } sycl::image_channel_type get_channel_type() { return _data.get_channel_type(); } void set_channel_type(sycl::image_channel_type type) { return _data.set_channel_type(type); } sycl::sampler get_sampler() { return _sampling_info.get_sampler(); } }; inline image_wrapper_base::~image_wrapper_base() {} using image_wrapper_base_p = image_wrapper_base *; template <class T, int dimensions, bool IsImageArray> class image_accessor_ext; /// Image class, wrapper of sycl::image. template <class T, int dimensions, bool IsImageArray = false> class image_wrapper : public image_wrapper_base { sycl::image<dimensions> *_image = nullptr; #ifndef DPCT_USM_LEVEL_NONE std::vector<char> _host_buffer; #endif void create_image(sycl::queue q) { auto &data = get_data(); if (data.get_data_type() == image_data_type::matrix) { _image = static_cast<image_matrix_p>(data.get_data_ptr()) ->create_image<dimensions>(data.get_channel()); return; } auto ptr = data.get_data_ptr(); auto channel = data.get_channel(); if (detail::get_pointer_attribute(q, ptr) == detail::pointer_access_attribute::device_only) { #ifdef DPCT_USM_LEVEL_NONE ptr = get_buffer(ptr) .template get_access<sycl::access_mode::read_write>() .get_pointer(); #else auto sz = data.get_x(); if (data.get_data_type() == image_data_type::pitch) sz *= channel.get_total_size() * data.get_y(); _host_buffer.resize(sz); q.memcpy(_host_buffer.data(), ptr, sz).wait(); ptr = _host_buffer.data(); #endif } if constexpr (dimensions == 1) { assert(data.get_data_type() == image_data_type::linear); _image = new sycl::image<1>( ptr, channel.get_channel_order(), channel.get_channel_type(), sycl::range<1>(data.get_x() / channel.get_total_size())); } else if constexpr (dimensions == 2) { assert(data.get_data_type() == image_data_type::pitch); _image = new sycl::image<2>(ptr, channel.get_channel_order(), channel.get_channel_type(), sycl::range<2>(data.get_x(), data.get_y()), sycl::range<1>(data.get_pitch())); } else { throw std::runtime_error("3D image only support matrix data"); } return; } public: using acc_data_t = typename detail::image_trait<T>::acc_data_t; using accessor_t = typename image_accessor_ext<T, IsImageArray ? (dimensions - 1) : dimensions, IsImageArray>::accessor_t; image_wrapper() { set_channel(image_channel::create<T>()); } ~image_wrapper() { detach(); } /// Get image accessor. accessor_t get_access(sycl::handler &cgh, sycl::queue &q = get_default_queue()) { if (!_image) create_image(q); return accessor_t(*_image, cgh); } /// Detach data. void detach() override { if (_image) delete _image; _image = nullptr; } }; /// Wrap sampler and image accessor together. template <class T, int dimensions, bool IsImageArray = false> class image_accessor_ext { public: using accessor_t = typename detail::image_trait<T>::template accessor_t<dimensions>; using data_t = typename detail::image_trait<T>::data_t; sycl::sampler _sampler; accessor_t _img_acc; public: image_accessor_ext(sycl::sampler sampler, accessor_t acc) : _sampler(sampler), _img_acc(acc) {} /// Read data from accessor. template <bool Available = dimensions == 3> typename std::enable_if<Available, data_t>::type read(float x, float y, float z) { return detail::fetch_data<T>()( _img_acc.read(sycl::float4(x, y, z, 0), _sampler)); } /// Read data from accessor. template <class Coord0, class Coord1, class Coord2, bool Available = dimensions == 3 && std::is_integral<Coord0>::value &&std::is_integral<Coord1>::value &&std::is_integral<Coord2>::value> typename std::enable_if<Available, data_t>::type read(Coord0 x, Coord1 y, Coord2 z) { return detail::fetch_data<T>()( _img_acc.read(sycl::int4(x, y, z, 0), _sampler)); } /// Read data from accessor. template <bool Available = dimensions == 2> typename std::enable_if<Available, data_t>::type read(float x, float y) { return detail::fetch_data<T>()( _img_acc.read(sycl::float2(x, y), _sampler)); } /// Read data from accessor. template <class Coord0, class Coord1, bool Available = dimensions == 2 && std::is_integral<Coord0>::value &&std::is_integral<Coord1>::value> typename std::enable_if<Available, data_t>::type read(Coord0 x, Coord1 y) { return detail::fetch_data<T>()( _img_acc.read(sycl::int2(x, y), _sampler)); } /// Read data from accessor. template <bool Available = dimensions == 1> typename std::enable_if<Available, data_t>::type read(float x) { return detail::fetch_data<T>()(_img_acc.read(x, _sampler)); } /// Read data from accessor. template <class CoordT, bool Available = dimensions == 1 && std::is_integral<CoordT>::value> typename std::enable_if<Available, data_t>::type read(CoordT x) { return detail::fetch_data<T>()(_img_acc.read(x, _sampler)); } }; template <class T, int dimensions> class image_accessor_ext<T, dimensions, true> { public: using accessor_t = typename detail::image_trait<T>::template array_accessor_t<dimensions>; using data_t = typename detail::image_trait<T>::data_t; sycl::sampler _sampler; accessor_t _img_acc; public: image_accessor_ext(sycl::sampler sampler, accessor_t acc) : _sampler(sampler), _img_acc(acc) {} /// Read data from accessor. template <bool Available = dimensions == 2> typename std::enable_if<Available, data_t>::type read(int index, float x, float y) { return detail::fetch_data<T>()( _img_acc[index].read(sycl::float2(x, y), _sampler)); } /// Read data from accessor. template <bool Available = dimensions == 2> typename std::enable_if<Available, data_t>::type read(int index, int x, int y) { return detail::fetch_data<T>()( _img_acc[index].read(sycl::int2(x, y), _sampler)); } /// Read data from accessor. template <bool Available = dimensions == 1> typename std::enable_if<Available, data_t>::type read(int index, float x) { return detail::fetch_data<T>()( _img_acc[index].read(x, _sampler)); } /// Read data from accessor. template <bool Available = dimensions == 1> typename std::enable_if<Available, data_t>::type read(int index, int x) { return detail::fetch_data<T>()( _img_acc[index].read(x, _sampler)); } }; /// Create image wrapper according to image data and sampling info. /// \return Pointer to image wrapper base class. /// \param data Image data used to create image wrapper. /// \param info Image sampling info used to create image wrapper. /// \returns Pointer to base class of created image wrapper object. static inline image_wrapper_base *create_image_wrapper(image_data data, sampling_info info) { image_channel channel; int dims = 1; if (data.get_data_type() == image_data_type::matrix) { auto matrix = (image_matrix_p)data.get_data_ptr(); channel = matrix->get_channel(); dims = matrix->get_dims(); } else { if (data.get_data_type() == image_data_type::pitch) { dims = 2; } channel = data.get_channel(); } if (auto ret = detail::create_image_wrapper(channel, dims)) { ret->set_sampling_info(info); ret->set_data(data); return ret; } return nullptr; } namespace detail { /// Create image according with given type \p T and \p dims. template <class T> static image_wrapper_base *create_image_wrapper(int dims) { switch (dims) { case 1: return new image_wrapper<T, 1>(); case 2: return new image_wrapper<T, 2>(); case 3: return new image_wrapper<T, 3>(); default: return nullptr; } } /// Create image with given data type \p T, channel order and dims template <class T> static image_wrapper_base *create_image_wrapper(unsigned channel_num, int dims) { switch (channel_num) { case 1: return create_image_wrapper<T>(dims); case 2: return create_image_wrapper<sycl::vec<T, 2>>(dims); case 3: return create_image_wrapper<sycl::vec<T, 3>>(dims); case 4: return create_image_wrapper<sycl::vec<T, 4>>(dims); default: return nullptr; } } /// Create image with channel info and specified dimensions. static image_wrapper_base *create_image_wrapper(image_channel channel, int dims) { switch (channel.get_channel_type()) { case sycl::image_channel_type::fp16: return create_image_wrapper<sycl::half>(channel.get_channel_num(), dims); case sycl::image_channel_type::fp32: return create_image_wrapper<float>(channel.get_channel_num(), dims); case sycl::image_channel_type::signed_int8: return create_image_wrapper<std::int8_t>(channel.get_channel_num(), dims); case sycl::image_channel_type::signed_int16: return create_image_wrapper<std::int16_t>(channel.get_channel_num(), dims); case sycl::image_channel_type::signed_int32: return create_image_wrapper<std::int32_t>(channel.get_channel_num(), dims); case sycl::image_channel_type::unsigned_int8: return create_image_wrapper<std::uint8_t>(channel.get_channel_num(), dims); case sycl::image_channel_type::unsigned_int16: return create_image_wrapper<std::uint16_t>(channel.get_channel_num(), dims); case sycl::image_channel_type::unsigned_int32: return create_image_wrapper<std::uint32_t>(channel.get_channel_num(), dims); default: return nullptr; } } } // namespace detail } // namespace dpct #endif // !__DPCT_IMAGE_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/kernel.hpp
//==---- kernel.hpp -------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_KERNEL_HPP__ #define __DPCT_KERNEL_HPP__ #include <sycl/sycl.hpp> #ifdef _WIN32 #include <unordered_set> #include <windows.h> #else #include <dlfcn.h> #endif #if defined(__has_include) && __has_include(<filesystem>) #include <filesystem> #elif defined(__has_include) && __has_include(<experimental/filesystem>) #include <experimental/filesystem> #else #error "SYCLomatic runtime requires C++ filesystem support" #endif #include <random> #include <image.hpp> #include <fstream> namespace dpct { typedef void (*kernel_functor)(sycl::queue &, const sycl::nd_range<3> &, unsigned int, void **, void **); struct kernel_function_info { int max_work_group_size = 0; }; static inline void get_kernel_function_info(kernel_function_info *kernel_info, const void *function) { kernel_info->max_work_group_size = dpct::dev_mgr::instance() .current_device() .get_info<sycl::info::device::max_work_group_size>(); } static inline kernel_function_info get_kernel_function_info(const void *function) { kernel_function_info kernel_info; kernel_info.max_work_group_size = dpct::dev_mgr::instance() .current_device() .get_info<sycl::info::device::max_work_group_size>(); return kernel_info; } namespace detail { #if defined(__has_include) && __has_include(<filesystem>) namespace fs = std::filesystem; #else namespace fs = std::experimental::filesystem; #endif /// Write data to temporary file and return absolute path to temporary file. /// Temporary file is created in a temporary directory both of which have random /// names with only the user having access permissions. Only one temporary file /// will be created in the temporary directory. static inline fs::path write_data_to_file(char const *const data, size_t size) { std::error_code ec; if (sizeof(size_t) >= sizeof(std::streamsize) && size > (std::numeric_limits<std::streamsize>::max)()) throw std::runtime_error("data file too large"); // random number generator std::random_device dev; std::mt19937 prng(dev()); std::uniform_int_distribution<uint64_t> rand(0); // find temporary directory auto tmp_dir = fs::temp_directory_path(ec); if (ec) throw std::runtime_error("could not find temporary directory"); // create private directory std::stringstream directory; fs::path directory_path; constexpr int max_attempts = 5; int i; for (i = 0; i < max_attempts; i++) { directory << std::hex << rand(prng); directory_path = tmp_dir / directory.str(); if (fs::create_directory(directory_path)) { break; } } if (i == max_attempts) throw std::runtime_error("could not create directory"); // only allow owner permissions to private directory fs::permissions(directory_path, fs::perms::owner_all, ec); if (ec) throw std::runtime_error("could not set directory permissions"); // random filename in private directory std::stringstream filename; filename << std::hex << rand(prng); #ifdef _WIN32 auto filepath = directory_path / (filename.str() + ".dll"); #else auto filepath = directory_path / filename.str(); #endif // write data to temporary file auto outfile = std::ofstream(filepath, std::ios::out | std::ios::binary); if (outfile) { // only allow program to write file fs::permissions(filepath, fs::perms::owner_write, ec); if (ec) throw std::runtime_error("could not set permissions"); outfile.write(data, size); if (!outfile.good()) throw std::runtime_error("could not write data"); outfile.close(); // only allow program to read/execute file fs::permissions(filepath, fs::perms::owner_read | fs::perms::owner_exec, ec); if (ec) throw std::runtime_error("could not set permissions"); } else throw std::runtime_error("could not write data"); // check temporary file contents auto infile = std::ifstream(filepath, std::ios::in | std::ios::binary); if (infile) { bool mismatch = false; size_t cnt = 0; while (1) { char c; infile.get(c); if (infile.eof()) break; if (c != data[cnt++]) mismatch = true; } if (cnt != size || mismatch) throw std::runtime_error("file contents not written correctly"); } else throw std::runtime_error("could not validate file"); if (!filepath.is_absolute()) throw std::runtime_error("temporary filepath is not absolute"); return filepath; } static inline uint16_t extract16(unsigned char const *const ptr) { uint16_t ret = 0; ret |= static_cast<uint16_t>(ptr[0]) << 0; ret |= static_cast<uint16_t>(ptr[1]) << 8; return (ret); } static inline uint32_t extract32(unsigned char const *const ptr) { uint32_t ret = 0; ret |= static_cast<uint32_t>(ptr[0]) << 0; ret |= static_cast<uint32_t>(ptr[1]) << 8; ret |= static_cast<uint32_t>(ptr[2]) << 16; ret |= static_cast<uint32_t>(ptr[3]) << 24; return (ret); } static inline uint64_t extract64(unsigned char const *const ptr) { uint64_t ret = 0; ret |= static_cast<uint64_t>(ptr[0]) << 0; ret |= static_cast<uint64_t>(ptr[1]) << 8; ret |= static_cast<uint64_t>(ptr[2]) << 16; ret |= static_cast<uint64_t>(ptr[3]) << 24; ret |= static_cast<uint64_t>(ptr[4]) << 32; ret |= static_cast<uint64_t>(ptr[5]) << 40; ret |= static_cast<uint64_t>(ptr[6]) << 48; ret |= static_cast<uint64_t>(ptr[7]) << 56; return (ret); } static inline uint64_t get_lib_size(char const *const blob) { #ifdef _WIN32 /////////////////////////////////////////////////////////////////////// // Analyze DOS stub unsigned char const *const ublob = reinterpret_cast<unsigned char const *const>(blob); if (ublob[0] != 0x4d || ublob[1] != 0x5a) { throw std::runtime_error("Blob is not a Windows DLL."); } uint32_t pe_header_offset = extract32(ublob + 0x3c); /////////////////////////////////////////////////////////////////////// // Ananlyze PE-header unsigned char const *const pe_header = ublob + pe_header_offset; // signature uint32_t pe_signature = extract32(pe_header + 0); if (pe_signature != 0x00004550) { throw std::runtime_error("PE-header signature is not 0x00004550"); } // machine uint16_t machine = extract16(pe_header + 4); if (machine != 0x8664) { throw std::runtime_error("Only DLLs for x64 supported"); } // number of sections uint16_t number_of_sections = extract16(pe_header + 6); // sizeof optional header uint16_t sizeof_optional_header = extract16(pe_header + 20); // magic uint16_t magic = extract16(pe_header + 24); if (magic != 0x10b && magic != 0x20b) { throw std::runtime_error("MAGIC is not 0x010b or 0x020b"); } /////////////////////////////////////////////////////////////////////// // Analyze tail of optional header constexpr int coff_header_size = 24; unsigned char const *const tail_of_optional_header = pe_header + coff_header_size + sizeof_optional_header; if (extract64(tail_of_optional_header - 8) != 0) { throw std::runtime_error("Optional header not zero-padded"); } /////////////////////////////////////////////////////////////////////// // Analyze last section header constexpr int section_header_size = 40; unsigned char const *const last_section_header = tail_of_optional_header + section_header_size * (number_of_sections - 1); uint32_t sizeof_raw_data = extract32(last_section_header + 16); uint32_t pointer_to_raw_data = extract32(last_section_header + 20); return sizeof_raw_data + pointer_to_raw_data; #else if (blob[0] != 0x7F || blob[1] != 'E' || blob[2] != 'L' || blob[3] != 'F') throw std::runtime_error("Blob is not in ELF format"); if (blob[4] != 0x02) throw std::runtime_error("Only 64-bit headers are supported"); if (blob[5] != 0x01) throw std::runtime_error("Only little-endian headers are supported"); unsigned char const *const ublob = reinterpret_cast<unsigned char const *const>(blob); uint64_t e_shoff = extract64(ublob + 0x28); uint16_t e_shentsize = extract16(ublob + 0x3A); uint16_t e_shnum = extract16(ublob + 0x3C); return e_shoff + (e_shentsize * e_shnum); #endif } #ifdef _WIN32 class path_lib_record { public: void operator=(const path_lib_record &) = delete; ~path_lib_record() { for (auto entry : lib_to_path) { FreeLibrary(static_cast<HMODULE>(entry.first)); fs::permissions(entry.second, fs::perms::owner_all); fs::remove_all(entry.second.remove_filename()); } } static void record_lib_path(fs::path path, void *library) { lib_to_path[library] = path; } static void remove_lib(void *library) { auto path = lib_to_path[library]; std::error_code ec; FreeLibrary(static_cast<HMODULE>(library)); fs::permissions(path, fs::perms::owner_all); if (fs::remove_all(path.remove_filename(), ec) != 2 || ec) // one directory and one temporary file should have been deleted throw std::runtime_error("Directory delete failed"); lib_to_path.erase(library); } private: static inline std::unordered_map<void *, fs::path> lib_to_path; }; #endif } // namespace detail class kernel_library { public: kernel_library() : ptr{nullptr} {} kernel_library(void *ptr) : ptr{ptr} {} operator void *() const { return ptr; } private: void *ptr; #ifdef _WIN32 static inline detail::path_lib_record single_instance_to_trigger_destructor; #endif }; namespace detail { static inline kernel_library load_dl_from_data(char const *const data, size_t size) { fs::path filename = write_data_to_file(data, size); #ifdef _WIN32 void *so = LoadLibraryW(filename.wstring().c_str()); #else void *so = dlopen(filename.c_str(), RTLD_LAZY); #endif if (so == nullptr) throw std::runtime_error("Failed to load kernel library"); #ifdef _WIN32 detail::path_lib_record::record_lib_path(filename, so); #else std::error_code ec; // Windows DLL cannot be deleted while in use if (fs::remove_all(filename.remove_filename(), ec) != 2 || ec) // one directory and one temporary file should have been deleted throw std::runtime_error("Directory delete failed"); #endif return so; } } // namespace detail /// Load kernel library and return a handle to use the library. /// \param [in] name The name of the library. static inline kernel_library load_kernel_library(const std::string &name) { std::ifstream ifs; ifs.open(name, std::ios::in | std::ios::binary); std::stringstream buffer; buffer << ifs.rdbuf(); const std::string buffer_string = buffer.str(); return detail::load_dl_from_data(buffer_string.c_str(), buffer_string.size()); } /// Load kernel library whose image is alreay in memory and return a handle to /// use the library. /// \param [in] image A pointer to the image in memory. static inline kernel_library load_kernel_library_mem(char const *const image) { const size_t size = detail::get_lib_size(image); return detail::load_dl_from_data(image, size); } /// Unload kernel library. /// \param [in,out] library Handle to the library to be closed. static inline void unload_kernel_library(const kernel_library &library) { #ifdef _WIN32 detail::path_lib_record::remove_lib(library); #else dlclose(library); #endif } class kernel_function { public: kernel_function() : ptr{nullptr} {} kernel_function(dpct::kernel_functor ptr) : ptr{ptr} {} operator void *() const { return ((void *)ptr); } void operator()(sycl::queue &q, const sycl::nd_range<3> &range, unsigned int a, void **args, void **extra) { ptr(q, range, a, args, extra); } private: dpct::kernel_functor ptr; }; /// Find kernel function in a kernel library and return its address. /// \param [in] library Handle to the kernel library. /// \param [in] name Name of the kernel function. static inline dpct::kernel_function get_kernel_function(kernel_library &library, const std::string &name) { #ifdef _WIN32 dpct::kernel_functor fn = reinterpret_cast<dpct::kernel_functor>( GetProcAddress(static_cast<HMODULE>(static_cast<void *>(library)), (name + std::string("_wrapper")).c_str())); #else dpct::kernel_functor fn = reinterpret_cast<dpct::kernel_functor>( dlsym(library, (name + std::string("_wrapper")).c_str())); #endif if (fn == nullptr) throw std::runtime_error("Failed to get function"); return fn; } /// Invoke a kernel function. /// \param [in] function kernel function. /// \param [in] queue SYCL queue used to execute kernel /// \param [in] groupRange SYCL group range /// \param [in] localRange SYCL local range /// \param [in] localMemSize The size of local memory required by the kernel /// function. /// \param [in] kernelParams Array of pointers to kernel arguments. /// \param [in] extra Extra arguments. static inline void invoke_kernel_function(dpct::kernel_function &function, sycl::queue &queue, sycl::range<3> groupRange, sycl::range<3> localRange, unsigned int localMemSize, void **kernelParams, void **extra) { function(queue, sycl::nd_range<3>(groupRange * localRange, localRange), localMemSize, kernelParams, extra); } /// Find image wrapper in a kernel library and return its address. /// \param [in] library Handle to the kernel library. /// \param [in] name Name of the target image wrapper. static inline dpct::image_wrapper_base_p get_image_wrapper(dpct::kernel_library &library, const std::string &name) { #ifdef _WIN32 dpct::image_wrapper_base_p fn = reinterpret_cast<dpct::image_wrapper_base_p>(GetProcAddress( static_cast<HMODULE>(static_cast<void *>(library)), name.c_str())); #else dpct::image_wrapper_base_p fn = reinterpret_cast<dpct::image_wrapper_base_p>( dlsym(library, name.c_str())); #endif if (fn == nullptr) throw std::runtime_error("Failed to get image"); return fn; } } // namespace dpct #endif // __DPCT_KERNEL_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/dpct.hpp
//==---- dpct.hpp ---------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_HPP__ #define __DPCT_HPP__ #include <sycl/sycl.hpp> #include <iostream> #include <limits.h> #include <math.h> template <class... Args> class dpct_kernel_name; template <int Arg> class dpct_kernel_scalar; #include "atomic.hpp" #include "device.hpp" #include "image.hpp" #include "kernel.hpp" #include "math.hpp" #include "memory.hpp" #include "util.hpp" #if defined(_MSC_VER) #define __dpct_align__(n) __declspec(align(n)) #define __dpct_inline__ __forceinline #else #define __dpct_align__(n) __attribute__((aligned(n))) #define __dpct_inline__ __inline__ __attribute__((always_inline)) #endif #if defined(_MSC_VER) #define __dpct_noinline__ __declspec(noinline) #else #define __dpct_noinline__ __attribute__((noinline)) #endif #define DPCT_COMPATIBILITY_TEMP (600) namespace dpct{ enum error_code { success = 0, default_error = 999 }; } #define DPCT_CHECK_ERROR(expr) \ [&]() { \ try { \ expr; \ return dpct::success; \ } catch (std::exception const &e) { \ std::cerr << e.what() << std::endl; \ return dpct::default_error; \ } \ }() #define DPCT_PI_F (3.14159274101257f) #define DPCT_PI (3.141592653589793115998) #endif // __DPCT_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/dnnl_utils.hpp
//==---- dnnl_utils.hpp ---------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_DNNL_UTILS_HPP__ #define __DPCT_DNNL_UTILS_HPP__ #include <oneapi/dpl/algorithm> #include <oneapi/dpl/execution> #include <oneapi/dpl/numeric> #include <oneapi/mkl.hpp> #include <oneapi/mkl/rng/device.hpp> #include <sycl/sycl.hpp> #include <oneapi/dnnl/dnnl.hpp> #include <oneapi/dnnl/dnnl_sycl.hpp> #include <unordered_map> #include <algorithm> #include <list> #include "memory.hpp" #include "device.hpp" #include "lib_common_utils.hpp" namespace dpct { namespace dnnl { /// Get concatenated library version as an integer. static inline size_t get_version() { const ::dnnl::version_t *ver = ::dnnl::version(); return ver->major * 1000 + ver->minor * 100 + ver->patch; } class engine_ext; typedef oneapi::mkl::rng::philox4x32x10 rng_engine_t; /// An enum class representing memory layout. Used by /// memory_desc_ext to create a memory with pre-defined layout. enum class memory_format_tag { nchw, nhwc, nchw_blocked }; /// An enum class representing RNN data memory layout. Used by /// memory_desc_ext to create a memory with pre-defined layout. enum class rnn_memory_format_tag { tnc, ntc }; /// A class holding the description of an N-dimensions memory. class memory_desc_ext { ::dnnl::memory::desc _desc; public: /// Convert dpct::library_data_t to dnnl::memory::data_type. static ::dnnl::memory::data_type to_dnnl_data_type(dpct::library_data_t dt); /// Convert dnnl::memory::data_type to dpct::library_data_t. static dpct::library_data_t to_dpct_library_data_t(::dnnl::memory::data_type dt, unsigned block_size); /// Convert dpct::dnnl::memory_format_tag to dnnl::memory::format_tag. static ::dnnl::memory::format_tag to_dnnl_format_tag(dpct::library_data_t dt, memory_format_tag tag); memory_desc_ext() = default; memory_desc_ext(::dnnl::memory::desc &desc) : _desc(desc) {} memory_desc_ext(::dnnl::memory::desc &&desc) : _desc(std::move(desc)) {} /// Setting a 4D memory with given parameters. /// \param [in] tag Format tag. /// \param [in] dt Data type. /// \param [in] n Number of images. /// \param [in] c Number of channels. /// \param [in] h Height of images. /// \param [in] w Width of images. void set(memory_format_tag tag, dpct::library_data_t dt, int n, int c, int h, int w); /// Setting a 3D RNN data memory with given parameters. /// \param [in] tag RNN data format tag. /// \param [in] dt Data type. /// \param [in] t Number of sequence length. /// \param [in] n Number of batch. /// \param [in] c Height of input channel. void set(rnn_memory_format_tag tag, dpct::library_data_t dt, int t, int n, int c); /// Setting a 4D memory with given parameters. /// \param [in] dt Data type. /// \param [in] n Number of images. /// \param [in] c Number of channels. /// \param [in] h Height of images. /// \param [in] w Width of images. /// \param [in] n_stride Stride between two continuous images. /// \param [in] c_stride Stride between two continuous channels. /// \param [in] h_stride Stride between two continuous rows. /// \param [in] w_stride Stride between two continuous columns. void set(dpct::library_data_t dt, int n, int c, int h, int w, int n_stride, int c_stride, int h_stride, int w_stride); /// Setting a ND memory with given parameters. /// \param [in] dt Data type. /// \param [in] ndims Dimension of the memory. /// \param [in] dims Array of dimension ndims that contain the size of each /// memory dimension. \param [in] strides Array of dimension ndims that /// contain the stride of each memory dimension. void set(dpct::library_data_t dt, int ndims, const int dims[], const int strides[]); /// Setting a ND memory with given parameters. /// \param [in] tag Format tag. /// \param [in] dt Data type. /// \param [in] ndims Dimension of the memory. /// \param [in] dims Array of dimension ndims that contain the size of each /// memory dimension. void set(memory_format_tag tag, dpct::library_data_t dt, int ndims, const int dims[]); /// Getting a ::dnnl::memory::desc from a memory_desc_ext. /// \returns The ::dnnl::memory::desc. const ::dnnl::memory::desc &get_desc() const { return _desc; } /// Setting holding desc with given dnnl memory descriptor. void set_desc(::dnnl::memory::desc desc) { _desc = desc; } /// Getting a size of a memory_desc_ext in bytes. /// \returns The size. size_t get_size() const { return _desc.get_size(); } /// Getting parameters from a 4D memory. /// \param [out] dt Data type. /// \param [out] n Number of images. /// \param [out] c Number of channels. /// \param [out] h Height of images. /// \param [out] w Width of images. /// \param [out] n_stride Stride between two continuous images. /// \param [out] c_stride Stride between two continuous channels. /// \param [out] h_stride Stride between two continuous rows. /// \param [out] w_stride Stride between two continuous columns. void get(dpct::library_data_t *dt, int *n, int *c, int *h, int *w, int *n_stride, int *c_stride, int *h_stride, int *w_stride) const; /// Getting parameters from a 4D memory. /// \param [out] dt Data type. /// \param [out] tag Format tag. /// \param [out] n Number of images. /// \param [out] c Number of channels. /// \param [out] h Height of images. /// \param [out] w Width of images. void get(dpct::library_data_t *dt, memory_format_tag *tag, int *n, int *c, int *h, int *w) const; /// Getting parameters from a 3D RNN data memory. /// \param [out] dt Data type. /// \param [out] tag RNN data format tag. /// \param [out] t Number of sequence length. /// \param [out] n Number of batch. /// \param [out] c Height of input channel. void get(dpct::library_data_t *dt, rnn_memory_format_tag *tag, int *t, int *n, int *c) const; /// Getting parameters from a ND memory. /// \param [in] requested_ndims Requested number of dimensions to get from a /// given memory descriptor. /// \param [out] dt Data type. /// \param [out] ndims Dimension of the memory. /// \param [out] dims Array of dimension requested_ndims that contain the /// size of each memory dimension. /// \param [out] strides Array of dimension requested_ndims that contain the /// stride of each memory dimension. void get(int requested_ndims, dpct::library_data_t *dt, int *ndims, int dims[], int strides[]) const; /// Getting parameters from a ND memory. /// \param [in] requested_ndims Requested number of dimensions to get from a /// given memory descriptor. /// \param [out] dt Data type. /// \param [out] tag Format tag. /// \param [out] ndims Dimension of the memory. /// \param [out] dims Array of dimension requested_ndims that contain the /// size of each memory dimension. void get(int requested_ndims, dpct::library_data_t *dt, memory_format_tag *tag, int *ndims, int dims[]) const; /// Getting dims from a ND memory. /// \return The dims. std::vector<int64_t> get_dims() const { return _desc.get_dims(); } /// Getting strides from a ND memory. /// \return The strides. std::vector<int64_t> get_strides() const { return _desc.get_strides(); } /// Getting element num from a ND memory. /// \return The element number. size_t get_element_num() const { auto dims = _desc.get_dims(); if (dims.empty()) { return 0; } size_t result = 1; for (auto &dim : dims) { result *= dim; } return result; } operator bool() const { return bool(_desc); } memory_desc_ext &operator=(std::nullptr_t) { _desc.reset(nullptr); return *this; } }; /// A class holding description for an activation operation. class activation_desc { ::dnnl::algorithm _alg; float _alpha; float _beta; public: /// Setting an activation descriptor with given parameters. /// \param [in] alg Activation algorithm. /// \param [in] alpha Value of alpha parameter. void set(::dnnl::algorithm alg, float alpha) { _alg = alg; if(alg == ::dnnl::algorithm::eltwise_clip) { _alpha = 0; _beta = alpha; } else { _alpha = alpha; } } /// Getting parameters form an activation descriptor. /// \param [out] alg Activation algorithm. /// \param [out] alpha Value of alpha parameter. void get(::dnnl::algorithm *alg, float *alpha) const { *alg = _alg; if(_alg == ::dnnl::algorithm::eltwise_clip) { *alpha = _beta; } else { *alpha = _alpha; } } /// Setting the alpha parameter of an activation descriptor. /// \param [in] alpha Value of alpha parameter. void set_alpha(float alpha) { _alpha = alpha; } /// Setting the beta parameter of an activation descriptor. /// \param [in] beta Value of beta parameter. void set_beta(float beta) { _beta = beta; } /// Setting the algorithm parameter of an activation descriptor. /// \param [in] alg Activation algorithm. void set_algorithm(::dnnl::algorithm alg) { _alg = alg; } /// Getting the alpha parameter from an activation descriptor. /// \param [out] alpha Value of alpha parameter. float get_alpha() const { return _alpha; } /// Getting the beta parameter from an activation descriptor. /// \param [out] beta Value of beta parameter. float get_beta() const { return _beta; } /// Getting the algorithm parameter from an activation descriptor. /// \param [out] alg Activation algorithm. ::dnnl::algorithm get_algorithm() const { return _alg; } }; /// A class holding description for a local response normalization operation. class lrn_desc { unsigned int _local_size; float _alpha; float _beta; float _k; public: /// Setting a local response normalization descriptor with given parameters. /// \param [in] local_size Value of local_size parameter. /// \param [in] alpha Value of alpha parameter. /// \param [in] beta Value of beta parameter. /// \param [in] k Value of k parameter. void set(unsigned int local_size, float alpha, float beta, float k) { _local_size = local_size; _alpha = alpha; _beta = beta; _k = k; } /// Getting parameters form a local response normalization descriptor. /// \param [out] local_size Value of local_size parameter. /// \param [out] alpha Value of alpha parameter. /// \param [out] beta Value of beta parameter. /// \param [out] k Value of k parameter. void get(unsigned int *local_size, float *alpha, float *beta, float *k) const { *local_size = _local_size; *alpha = _alpha; *beta = _beta; *k = _k; } /// Setting the local size parameter of a local response normalization /// descriptor. /// \param [in] local_size Value of local_size parameter. void set_local_size(unsigned int local_size) { _local_size = local_size; } /// Setting the alpha parameter of a local response normalization descriptor. /// \param [in] alpha Value of alpha parameter. void set_alpha(float alpha) { _alpha = alpha; } /// Setting the beta parameter of a local response normalization descriptor. /// \param [in] beta Value of beta parameter. void set_beta(float beta) { _beta = beta; } /// Setting the k parameter of a local response normalization descriptor. /// \param [in] k Value of k parameter. void set_k(float k) { _k = k; } /// Getting the local size parameter from a local response normalization /// descriptor. /// \param [out] local_size Value of local_size parameter. unsigned int get_local_size() const { return _local_size; } /// Getting the alpha parameter from a local response normalization /// descriptor. /// \param [out] alpha Value of alpha parameter. float get_alpha() const { return _alpha; } /// Getting the beta parameter from a local response normalization descriptor. /// \param [out] beta Value of beta parameter. float get_beta() const { return _beta; } /// Getting the k parameter from a local response normalization descriptor. /// \param [out] k Value of k parameter. float get_k() const { return _k; } }; /// An enum class representing softmax algorithm. enum class softmax_algorithm { normal, log }; /// An enum class representing softmax mode. enum class softmax_mode { instance, channel }; /// A class holding description for a pooling operation. class pooling_desc { ::dnnl::algorithm _alg; std::vector<int64_t> _stride; std::vector<int64_t> _kernel; std::vector<int64_t> _padding; public: /// Setting a 2D pooling descriptor with given parameters. /// \param [in] alg Pooling algorithm. /// \param [in] kernel_h Value of height of kernel. /// \param [in] kernel_w Value of width of kernel. /// \param [in] padding_h Value of height of padding. /// \param [in] padding_w Value of width of padding. /// \param [in] stride_h Value of height of stride. /// \param [in] stride_w Value of width of stride. void set(::dnnl::algorithm alg, int kernel_h, int kernel_w, int padding_h, int padding_w, int stride_h, int stride_w) { _alg = alg; _stride = {stride_h, stride_w}; _kernel = {kernel_h, kernel_w}; _padding = {padding_h, padding_w}; } /// Setting a ND pooling descriptor with given parameters. /// \param [in] alg Pooling algorithm. /// \param [in] ndims Dimension of the pooling operation. /// \param [in] kernel Array of dimension ndims containing the kernel size of /// each dimension. /// \param [in] padding Array of dimension ndims containing the padding size of /// each dimension. /// \param [in] stride Array of dimension ndims containing the stride size of /// each dimension. void set(::dnnl::algorithm alg, int ndims, int kernel[], int padding[], int stride[]) { _alg = alg; _stride = std::vector<int64_t>(stride, stride + ndims); _kernel = std::vector<int64_t>(kernel, kernel + ndims); _padding = std::vector<int64_t>(padding, padding + ndims); } /// Getting parameters from a 2D pooling descriptor. /// \param [out] alg Pooling algorithm. /// \param [out] kernel_h Value of height of kernel. /// \param [out] kernel_w Value of width of kernel. /// \param [out] padding_h Value of height of padding. /// \param [out] padding_w Value of width of padding. /// \param [out] stride_h Value of height of stride. /// \param [out] stride_w Value of width of stride. void get(::dnnl::algorithm *alg, int *kernel_h, int *kernel_w, int *padding_h, int *padding_w, int *stride_h, int *stride_w) const { *alg = _alg; *kernel_h = _kernel[0]; *kernel_w = _kernel[1]; *padding_h = _padding[0]; *padding_w = _padding[1]; *stride_h = _stride[0]; *stride_w = _stride[1]; } /// Getting parameters from a ND pooling descriptor. /// \param [in] requested_ndims Requested number of dimensions to get from a /// given pooling descriptor. /// \param [out] alg Pooling algorithm. /// \param [out] ndims Dimension of the pooling operation. /// \param [out] kernel Array of dimension ndims containing the kernel size of /// each dimension. /// \param [out] padding Array of dimension ndims containing the padding size /// of each dimension. /// \param [out] stride Array of dimension ndims containing the stride size of /// each dimension. void get(int requested_ndims, ::dnnl::algorithm *alg, int *ndims, int kernel[], int padding[], int stride[]) const { *alg = _alg; *ndims = _stride.size(); for (int i = 0; i < requested_ndims; i++) { kernel[i] = _kernel[i]; padding[i] = _padding[i]; stride[i] = _stride[i]; } } /// Setting the algorithm parameter of a pooling descriptor. /// \param [in] alg Pooling algorithm. void set_algorithm(::dnnl::algorithm alg) { _alg = alg; } /// Setting the stride parameter of a pooling descriptor. /// \param [in] stride Array of dimension ndims containing the stride size of /// each dimension. void set_stride(const std::vector<int64_t> &stride) { _stride = stride; } /// Setting the kernel parameter of a pooling descriptor. /// \param [in] kernel Array of dimension ndims containing the kernel size of /// each dimension. void set_kernel(const std::vector<int64_t> &kernel) { _kernel = kernel; } /// Setting the padding parameter of a pooling descriptor. /// \param [in] padding Array of dimension ndims containing the padding size /// of each dimension. void set_padding(const std::vector<int64_t> &padding) { _padding = padding; } /// Getting the algorithm parameter from a pooling descriptor. /// \param [out] alg Pooling algorithm. ::dnnl::algorithm get_algorithm() const { return _alg; } /// Getting the stride parameter from a pooling descriptor. /// \returns Array of dimension ndims containing the stride size of each /// dimension. const std::vector<int64_t> &get_stride() const { return _stride; } /// Getting the kernel parameter from a pooling descriptor. /// \returns Array of dimension ndims containing the kernel size of each /// dimension. const std::vector<int64_t> &get_kernel() const { return _kernel; } /// Getting the padding parameter from a pooling descriptor. /// \returns Array of dimension ndims containing the padding size of each /// dimension. const std::vector<int64_t> &get_padding() const { return _padding; } /// Getting the output dimensions of a memory after 2D pooling has been /// applied. /// \param [in] desc Input memory descriptor. /// \param [out] out_n Number of images. /// \param [out] out_c Number of channels. /// \param [out] out_h Height of images. /// \param [out] out_w Width of images. void get_forward_output_dim(const memory_desc_ext &desc, int *out_n, int *out_c, int *out_h, int *out_w) const { auto dims = desc.get_dims(); *out_n = dims[0]; *out_c = dims[1]; *out_h = 1 + (dims[2] + 2 * _padding[0] - _kernel[0]) / _stride[0]; *out_w = 1 + (dims[3] + 2 * _padding[1] - _kernel[1]) / _stride[1]; } /// Getting the output dimensions of a memory after ND pooling has been /// applied. /// \param [in] desc Input memory descriptor. /// \param [out] ndims Dimension of the memory. /// \param [out] out_dims Array of dimension requested_ndims that contain /// the size of each memory dimension. void get_forward_output_dim(const memory_desc_ext &desc, int ndims, int out_dims[]) const { assert(ndims >= 4 && "ndims is at least 4."); auto dims = desc.get_dims(); out_dims[0] = dims[0]; out_dims[1] = dims[1]; for (int i = 2; i < ndims; i++) { out_dims[i] = 1 + (dims[i] + 2 * _padding[i - 2] - _kernel[i - 2]) / _stride[i - 2]; } } }; /// An enum class representing reduction operations. enum class reduction_op { max, min, sum, mul, mean, amax, mul_no_zeros, norm1, norm2 }; /// An enum class representing batch normalization mode. enum class batch_normalization_mode { per_activation, spatial }; /// An enum class representing batch normalization operations. enum class batch_normalization_ops { none, activation, add_activation }; /// An enum class representing binary operations. enum class binary_op { add, sub, mul, div, min, max, sqrt, neg }; /// An struct representing convolution algorithm infomation. struct convolution_algorithm_info { ::dnnl::algorithm algo = ::dnnl::algorithm::convolution_auto; int status = 0; }; /// A class holding description for a convolution operation. class convolution_desc { std::vector<int64_t> _strides; std::vector<int64_t> _dilates; std::vector<int64_t> _paddings; int _group_count = 1; ::dnnl::fpmath_mode _math_mode = ::dnnl::fpmath_mode::strict; public: /// Setting a group count to be used in the convolution. /// \param [in] group_count Value of group count. void set_group_count(int group_count) { _group_count = group_count; } /// Getting a group count specified in the given convolution descriptor. /// \returns Value of group count. int get_group_count() { return _group_count; } /// Setting floating point math mode to be used in the convolution. /// \param [in] math_mode Value of math_mode. void set_math_mode(::dnnl::fpmath_mode math_mode) { _math_mode = math_mode; } /// Getting floating point math mode specified in the given convolution descriptor. /// \returns Value of math mode. ::dnnl::fpmath_mode get_math_mode() { return _math_mode; } /// Setting a 2D convolution descriptor with given parameters. /// \param [in] padding_h Value of height of padding. /// \param [in] padding_w Value of width of padding. /// \param [in] stride_h Value of height of stride. /// \param [in] stride_w Value of width of stride. /// \param [in] dilate_h Value of height of dilate. /// \param [in] dilate_w Value of width of dilate. void set(int padding_h, int padding_w, int stride_h, int stride_w, int dilate_h, int dilate_w) { _strides = {stride_h, stride_w}; _dilates = {dilate_h - 1, dilate_w - 1}; _paddings = {padding_h, padding_w}; } /// Setting a ND convolution descriptor with given parameters. /// \param [in] ndims Dimension of the convolution operation. /// \param [in] paddings Array of dimension ndims containing the padding size of /// each dimension. /// \param [in] strides Array of dimension ndims containing the stride size of /// each dimension. /// \param [in] dilates Array of dimension ndims containing the kernel size of /// each dimension. void set(int ndims, int paddings[], int strides[], int dilates[]) { _strides = std::vector<int64_t>(strides, strides + ndims); _paddings = std::vector<int64_t>(paddings, paddings + ndims); _dilates = std::vector<int64_t>(dilates, dilates + ndims); for (auto &dilate : _dilates) { dilate--; } } /// Getting parameters from a 2D convolution descriptor. /// \param [out] padding_h Value of height of padding. /// \param [out] padding_w Value of width of padding. /// \param [out] stride_h Value of height of stride. /// \param [out] stride_w Value of width of stride. /// \param [out] dilate_h Value of height of dilate. /// \param [out] dilate_w Value of width of dilate. void get(int *padding_h, int *padding_w, int *stride_h, int *stride_w, int *dilate_h, int *dilate_w) const { *dilate_h = _dilates[0]; *dilate_w = _dilates[1]; *padding_h = _paddings[0]; *padding_w = _paddings[1]; *stride_h = _strides[0]; *stride_w = _strides[1]; } /// Getting parameters from a ND convolution descriptor. /// \param [in] requested_ndims Requested number of dimensions to get from a /// given convolution descriptor. /// \param [out] ndims Dimension of the pooling operation. /// \param [out] paddings Array of dimension ndims containing the padding size /// of each dimension. /// \param [out] strides Array of dimension ndims containing the stride size of /// each dimension. /// \param [out] dilates Array of dimension ndims containing the dilate size of /// each dimension. void get(int requested_ndims, int *ndims, int paddings[], int strides[], int dilates[]) const { *ndims = _strides.size(); for (int i = 0; i < requested_ndims; i++) { dilates[i] = _dilates[i]; paddings[i] = _paddings[i]; strides[i] = _strides[i]; } } /// Getting the stride parameter from a convolution descriptor. /// \returns Array of dimension ndims containing the stride size of each /// dimension. const std::vector<int64_t> &get_stride() const { return _strides; } /// Getting the kernel parameter from a convolution descriptor. /// \returns Array of dimension ndims containing the dilate size of each /// dimension. const std::vector<int64_t> &get_dilate() const { return _dilates; } /// Getting the padding parameter from a convolution descriptor. /// \returns Array of dimension ndims containing the padding size of each /// dimension. const std::vector<int64_t> &get_padding() const { return _paddings; } /// Getting the output dimensions of a memory after 2D convolution has been /// applied. /// \param [in] desc Input memory descriptor. /// \param [in] weight_desc Input weight memory descriptor. /// \param [out] out_n Number of images. /// \param [out] out_c Number of channels. /// \param [out] out_h Height of images. /// \param [out] out_w Width of images. void get_forward_output_dim(const memory_desc_ext &desc, const memory_desc_ext &weight_desc, int *out_n, int *out_c, int *out_h, int *out_w) const { auto dims = desc.get_dims(); auto weight_dims = weight_desc.get_dims(); *out_n = dims[0]; *out_c = weight_dims[0]; *out_h = 1 + (dims[2] + 2 * _paddings[0] - (1 + (_dilates[0] * (weight_dims[2] - 1)))) / _strides[0]; *out_w = 1 + (dims[3] + 2 * _paddings[1] - (1 + (_dilates[1] * (weight_dims[3] - 1)))) / _strides[1]; } /// Getting the output dimensions of a memory after ND convolution has been /// applied. /// \param [in] desc Input memory descriptor. /// \param [in] weight_desc Input weight memory descriptor. /// \param [out] ndims Dimension of the memory. /// \param [out] out_dims Array of dimension requested_ndims that contain /// the size of each memory dimension. void get_forward_output_dim(const memory_desc_ext &desc, const memory_desc_ext &weight_desc, int ndims, int out_dims[]) const { assert(ndims >= 4 && "ndims is at least 4."); auto dims = desc.get_dims(); auto weight_dims = weight_desc.get_dims(); out_dims[0] = dims[0]; out_dims[1] = weight_dims[1]; for (int i = 2; i < ndims; i++) { out_dims[i] = 1 + (dims[i] + 2 * _paddings[i - 2] - (1 + (_dilates[i - 2] * (weight_dims[i] - 1)))) / _strides[i - 2]; } } convolution_desc &operator=(std::nullptr_t) { return *this = convolution_desc(); } operator bool() const { return !(_strides.size() == 0 && _dilates.size() == 0 && _paddings.size() == 0); } }; /// An enum class representing rnn mode. enum class rnn_mode { vanilla_relu, vanilla_tanh, lstm, gru }; /// An enum class representing rnn bias mode. enum class rnn_bias_mode { none, single }; /// An enum class representing rnn direction. enum class rnn_direction {unidirectional, bidirectional}; /// A class holding description for a RNN operation. class rnn_desc { rnn_mode _mode; rnn_bias_mode _bias_mode; rnn_direction _direction; dpct::library_data_t _dt; int _input_size; int _hidden_size; int _projection_size; int _layer_size; public: void set(rnn_mode mode, rnn_bias_mode bias_mode, rnn_direction direction, dpct::library_data_t dt, int input_size, int hidden_size, int projection_size, int layer_size) { _mode = mode; _bias_mode = bias_mode; _direction = direction; _input_size = input_size; _hidden_size = hidden_size; _projection_size = projection_size; _layer_size = layer_size; _dt = dt; } void get(rnn_mode *mode, rnn_bias_mode *bias_mode, rnn_direction *direction, dpct::library_data_t *dt, int *input_size, int *hidden_size, int *projection_size, int *layer_size) const { *mode = _mode; *bias_mode = _bias_mode; *direction = _direction; *input_size = _input_size; *hidden_size = _hidden_size; *projection_size = _projection_size; *layer_size = _layer_size; *dt = _dt; } }; /// A class holding description for a Dropout operation. class dropout_desc { struct dropout_desc_imp { float _p = 0.5f; unsigned long long _seed = 1; void *_state = nullptr; std::vector<std::uint8_t> _host_state; rng_engine_t _rng_engine; dropout_desc_imp() : _rng_engine(dpct::get_default_queue(), 1) {} }; std::shared_ptr<dropout_desc_imp> _imp; void generate(sycl::queue *q, std::int64_t required_state_size, std::int64_t num, void *buffer) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) " "Interfaces Project does not support this API."); #else sycl::event e_gen = oneapi::mkl::rng::generate( oneapi::mkl::rng::bernoulli<std::int32_t>(1.f - _imp->_p), _imp->_rng_engine, num, (std::int32_t *)buffer); sycl::event e_save = q->submit([&](sycl::handler &cgh) { cgh.depends_on(e_gen); cgh.host_task([=] { oneapi::mkl::rng::save_state(_imp->_rng_engine, _imp->_host_state.data()); }); }); q->memcpy(_imp->_state, _imp->_host_state.data(), required_state_size, e_save); #endif } public: operator bool() const { return bool(_imp); } dropout_desc &operator=(std::nullptr_t) { _imp.reset(); return *this; } /// Initializing a dropout descriptor. void init(){ _imp = std::make_shared<dropout_desc_imp>(); } /// Setting a dropout descriptor with given parameters. /// \param [in] engine Engine of the dropout operation. /// \param [in] p Probability of value set to zero. /// \param [in] state Memory that store random generator state. /// \param [in] state_size Required size to store random generator state. /// \param [in] seed Seed to initialize conditions of the generator state. void set(engine_ext &engine, float p, void *state, size_t state_size, unsigned long long seed); /// Getting parameters from a dropout descriptor. /// \param [in] engine Engine of the dropout operation. /// \param [in] p Probability of value set to zero. /// \param [in] state Memory that store random generator state. /// \param [in] seed Seed to initialize conditions of the generator state. void get(float *p, void **states, unsigned long long *seed) const noexcept { *seed = _imp->_seed; *states = _imp->_state; *p = _imp->_p; } /// Getting the probability of value set to zero. /// \returns Probability. float get_probability() const noexcept { return _imp->_p; } /// Restoreing a dropout descriptor from stored state. /// \param [in] engine Engine of the dropout operation. /// \param [in] p Probability of value set to zero. /// \param [in] state Memory that store random generator state. /// \param [in] state_size Required size to store random generator state. /// \param [in] seed Seed to initialize conditions of the generator state. void restore(engine_ext &engine, float p, void *state, size_t state_size, unsigned long long seed); friend class engine_ext; }; namespace detail { typedef std::string primitive_cache_key_type; typedef std::list<primitive_cache_key_type> usage_list_type; typedef struct { ::dnnl::primitive *primitive; usage_list_type::iterator usage_it; std::function<void(::dnnl::primitive *)> destructor; sycl::event e; } primitive_cache_value_type; typedef std::unordered_map<primitive_cache_key_type, primitive_cache_value_type> cache_map_type; // The primitive cache uses LRU replacement policy, and the default cache // capacity is 1024. class primitive_cache { int _capacity = 1024; usage_list_type usage; cache_map_type cache_map; void touch(cache_map_type::iterator it, sycl::event e = {}, bool update_event = false) { if (it->second.usage_it != usage.begin()) { const primitive_cache_key_type &key = it->first; usage.erase(it->second.usage_it); usage.push_front(key); it->second.usage_it = usage.begin(); } if (update_event) { it->second.e = e; } } void async_destruct_primitive(const primitive_cache_value_type &value) { dpct::get_current_device().default_queue().submit([&](sycl::handler &cgh) { cgh.depends_on(value.e); cgh.host_task([=] { value.destructor(value.primitive); }); }); } public: ::dnnl::primitive *get(const primitive_cache_key_type &key) { auto it = cache_map.find(key); if (it == cache_map.end()) { return nullptr; } touch(it); return it->second.primitive; } void put(const primitive_cache_key_type &key, ::dnnl::primitive *value, std::function<void(::dnnl::primitive *)> destructor, sycl::event e) { auto it = cache_map.find(key); if (it != cache_map.end()) { touch(it, e, true); } else { if (cache_map.size() == _capacity) { auto last_primitive = cache_map.find(usage.back()); async_destruct_primitive(last_primitive->second); cache_map.erase(usage.back()); usage.pop_back(); } usage.push_front(key); cache_map[key] = {value, usage.begin(), destructor, e}; } } ~primitive_cache() { for (auto &v : cache_map) { async_destruct_primitive(v.second); } } }; } // namespace detail /// A class holding the oneDNN engine. class engine_ext { struct output_argument_info { float _alpha; float _beta; int _name; memory_desc_ext _desc; void *_data; output_argument_info(float alpha, float beta, int name, memory_desc_ext desc, void *data) : _alpha(alpha), _beta(beta), _name(name), _desc(desc), _data(data) {} output_argument_info(float alpha, float beta, memory_desc_ext desc, void *data) : _alpha(alpha), _beta(beta), _name(0), _desc(desc), _data(data) {} }; ::dnnl::engine _eng; ::dnnl::stream _s; sycl::queue *_q = nullptr; std::map<void *, ::dnnl::memory> workspace_map; std::int64_t _random_engine_state_size = -1; detail::primitive_cache _primitive_cache; ::dnnl::memory &get_workspace(void *key) { return workspace_map[key]; } void insert_workspace(void *key, ::dnnl::memory workspace) { workspace_map[key] = workspace; } const ::dnnl::stream &get_stream() const { return _s; } const ::dnnl::engine &get_engine() const { return _eng; } void *allocate(const memory_desc_ext &desc, int count = 1) const; ::dnnl::memory::desc compress_spatial_dimensions_to_channel(const ::dnnl::memory::desc &desc); ::dnnl::memory::desc get_bn_scale_bias_mean_var_desc(const ::dnnl::memory::desc &desc, batch_normalization_mode mode); sycl::event batch_normalization_backward_internal( batch_normalization_mode mode, float epsilon, float alpha_data, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param, const memory_desc_ext &diff_scale_bias_desc, void *scale, void *bias, float beta_param, void *diff_scale, void *diff_bias, const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var); sycl::event batch_normalization_forward_internal( bool is_infer, batch_normalization_mode mode, float epsilon, float factor, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &scale_bias_desc, void *scale, void *bias, const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var, void *running_mean, void *running_var); ::dnnl::memory::desc transfer_memory_desc_to_channel_major_format(const ::dnnl::memory::desc &desc); ::dnnl::memory::desc bn_reorder_memory_to_channel_major_format(bool is_input, ::dnnl::memory::desc &desc, void *src, void **cache, std::vector<void *> &caches); ::dnnl::memory::desc transfer_memory_desc_to_format_tag_any(const ::dnnl::memory::desc &desc){ return ::dnnl::memory::desc(desc.get_dims(), desc.get_data_type(), ::dnnl::memory::format_tag::any); } void allocate_and_reorder_memory_to_optimal(::dnnl::memory::desc &from_desc, void *&from, ::dnnl::memory::desc &to_desc, void *&to, std::vector<void *> &caches) { if (from_desc != to_desc) { to = allocate(to_desc); caches.push_back(to); async_reorder(1.f, from_desc, from, 0.f, to_desc, to); } } template <typename primitive_type, typename... args_type> std::pair<detail::primitive_cache_key_type, primitive_type *> create_primitive(args_type &&...args); template <typename primitive_type> std::pair<detail::primitive_cache_key_type, primitive_type *> create_primitive_with_pd(const typename primitive_type::primitive_desc &pd); template <typename primitive_type, typename... args_type> typename primitive_type::primitive_desc create_primitive_desc(args_type &&...args); template <typename primitive_desc_type> std::string generate_cache_key(const primitive_desc_type &pd); void serialize_dims(std::stringstream &ss, const std::vector<int64_t> &dims) { ss.write((char *)dims.data(), dims.size() * sizeof(int64_t)); }; void serialize_mem_desc(std::stringstream &ss, const ::dnnl::memory::desc &desc) { if (desc.is_zero()) { return; } auto format_kind = desc.get_format_kind(); ss << desc.get_ndims() << (std::uint8_t)desc.get_data_type() << (std::uint8_t)format_kind; serialize_dims(ss, desc.get_dims()); serialize_dims(ss, desc.get_strides()); if (format_kind == ::dnnl::memory::format_kind::blocked) { ss << desc.get_inner_nblks(); serialize_dims(ss, desc.get_inner_blks()); serialize_dims(ss, desc.get_inner_idxs()); } }; sycl::event execute_rnn_forward_primitive( rnn_mode mode, ::dnnl::prop_kind kind, ::dnnl::rnn_direction direction, rnn_bias_mode bias_mode, ::dnnl::memory::data_type dt, ::dnnl::memory::format_tag tag, int seq_length, int batch_size, int src_c, int dst_c, int layer_size, int direction_num, int hidden_size, int gate_num, int projection_size, std::vector<void *> &data, std::vector<int> &offset, int iter_num, size_t *weight_size = nullptr, size_t *workspace_size = nullptr, size_t *scratchpad_size = nullptr); sycl::event rnn_forward_internal( const rnn_desc &desc, ::dnnl::prop_kind kind, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter, const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c, size_t weight_size, void *weight, size_t workspace_size, void *workspace, size_t scratchpad_size, void *scratchpad, bool is_get_execution_args, size_t *weight_size_query, size_t *workspace_size_query, size_t *scratchpad_size_query); sycl::event execute_rnn_backward_primitive( rnn_mode mode, ::dnnl::rnn_direction direction, rnn_bias_mode bias_mode, ::dnnl::memory::data_type dt, ::dnnl::memory::format_tag tag, int seq_length, int batch_size, int src_c, int dst_c, int layer_size, int direction_num, int hidden_size, int gate_num, int projection_size, std::vector<void *> &data, std::vector<int> &offset, int iter_num); void async_free(sycl::queue *q, sycl::event e, std::unordered_map<int, ::dnnl::memory> *args, std::vector<void *> device_ptrs = {}) { q->submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { if (args) { delete args; } for (auto ptr : device_ptrs) { if (ptr) { sycl::free(ptr, *_q); } } }); }); }; bool scale_parameter_preprocess(const std::vector<output_argument_info> &args); template <typename primitive_type> sycl::event execute_primitive(const std::pair<detail::primitive_cache_key_type, primitive_type *> &primitive, std::unordered_map<int, ::dnnl::memory> *args, const std::vector<output_argument_info> &extra_args = {}, const std::vector<void *> &device_ptrs = {}); template <typename T> sycl::event fill_with_type(sycl::queue *q, void *src, const void *value, size_t size_with_byte) { return q->fill<T>(static_cast<T *>(src), *static_cast<const T *>(value), size_with_byte / sizeof(T)); } template <typename T> struct no_zero_op { T operator()(T e) { if (!e) { return 1; } return e; } }; template <typename T> void transform_no_zero_with_type(sycl::queue *q, void *src, void *dst, size_t num) { std::transform(oneapi::dpl::execution::make_device_policy(*q), static_cast<T *>(src), static_cast<T *>(src) + num, static_cast<T *>(dst), no_zero_op<T>()); } void transform_no_zero(const memory_desc_ext &desc, void *src, void *dst); ::dnnl::memory::desc get_group_weight_desc(int group_count, const memory_desc_ext &weight_desc); void get_rnn_configuration(const ::dnnl::memory::desc &desc, rnn_direction direction, rnn_mode mode, dpct::library_data_t dt, int hidden_size, ::dnnl::memory::data_type *dnnl_dt, ::dnnl::memory::format_tag *tag, int *projection_size, int *output_size, int *seq_length, int *batch_size, int *direction_num, int *gate_num); public: engine_ext() {} operator bool() const { return bool(_eng) && bool(_s) && bool(_q); } engine_ext &operator=(std::nullptr_t) { _eng.reset(nullptr); _s.reset(nullptr); _q = nullptr; return *this; } /// Creating oneDNN engine. void create_engine() { _eng = ::dnnl::sycl_interop::make_engine( dpct::get_current_device(), dpct::get_current_device().get_context()); _s = ::dnnl::sycl_interop::make_stream( _eng, dpct::get_current_device().default_queue()); _q = &dpct::get_current_device().default_queue(); } /// Setting the user's SYCL queue for an oneDNN engine. /// \param [in] q Pointer to the SYCL queue. void set_queue(sycl::queue *q) { if (!q) { throw std::runtime_error("set_queue: pointer must not be nullptr."); } if (!_eng) { throw std::runtime_error("set_queue: current engine is invalid."); } if (q->get_context() != ::dnnl::sycl_interop::get_context(_eng)) { throw std::runtime_error( "set_queue: queue is mismatch with current engine context."); } _q = q; _s = ::dnnl::sycl_interop::make_stream(_eng, *q); } /// Retrieving the user's SYCL queue set in the oneDNN engine. /// \returns Pointer to the SYCL queue. sycl::queue *get_queue() const { return _q; } /// Setting all elements of a memory to a given value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] valuePtr Pointer to a single value. void fill(const memory_desc_ext &src_desc, void *src, const void *valuePtr); /// Coping the scaled data from a memory to another memory with a different /// description. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. void reorder(float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst); /// Scaling all the elements of a memory by a given factor. /// \param [in] alpha Value to scaling factors. /// \param [in] src_desc Source memory descriptor. /// \param [out] src Pointer to source data. void scale(float alpha, const memory_desc_ext &src_desc, void *src); /// Adding the scaled values of a memory to another memory. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. void sum(float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst); /// Computing a specified activation function value. /// \param [in] desc Activation descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. void activation_forward(activation_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst); /// Computing the gradient of a specified activation function. /// \param [in] desc Activation descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] dst_desc Destination memory descriptor. /// \param [in] dst Pointer to destination data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the differential destination memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential source data. void activation_backward(activation_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src); /// Computing a specified pooling function value. /// \param [in] desc Pooling descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \param [out] workspace Pointer to workspace generated from forward propagation. void pooling_forward(pooling_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, ::dnnl::memory *workspace = nullptr); /// Computing the gradient of a specified pooling function. /// \param [in] desc Activation descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] dst_desc Destination memory descriptor. /// \param [in] dst Pointer to destination data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the differential destination memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential /// source data. /// \param [in] workspace Pointer to workspace used for backward /// propagation. void pooling_backward(pooling_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src, ::dnnl::memory *workspace = nullptr); /// Computing a specified softmax function value. /// \param [in] alg Softmax algorithm. /// \param [in] mode Softmax mode. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. void softmax_forward(softmax_algorithm alg, softmax_mode mode, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst); /// Computing the gradient of a specified softmax function. /// \param [in] alg Softmax algorithm. /// \param [in] mode Softmax mode. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] dst_desc Destination memory descriptor. /// \param [in] dst Pointer to destination data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the differential destination memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential source data. void softmax_backward(softmax_algorithm alg, softmax_mode mode, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta, const memory_desc_ext &diff_src_desc, void *diff_src); /// Computing a specified local response normalization function value. /// \param [in] desc Local response normalization descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \param [out] workspace Pointer to workspace generated from forward /// propagation. void lrn_forward(lrn_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, ::dnnl::memory *workspace = nullptr); /// Computing the gradient of a specified local response normalization /// function. /// \param [in] desc Local response normalization descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed value. /// \param [in] dst_desc Destination memory descriptor. /// \param [in] dst Pointer to destination data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the differential destination memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential source data. /// \param [in] workspace Pointer to workspace used for backward propagation. void lrn_backward(lrn_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src, ::dnnl::memory *workspace = nullptr); /// Setting all elements of a memory to a given value asynchronously. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] valuePtr Pointer to a single value. /// \returns An event representing the fill operations. sycl::event async_fill(const memory_desc_ext &src_desc, void *src, const void *valuePtr); /// Coping the scaled data from a memory to another memory with a different /// description asynchronously. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \returns An event representing the reorder operations. sycl::event async_reorder(float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst); /// Scaling all the elements of a memory by a given factor asynchronously. /// \param [in] alpha Value to scaling factors. /// \param [in] src_desc Source memory descriptor. /// \param [out] src Pointer to source data. /// \returns An event representing the scale operations. sycl::event async_scale(float alpha, const memory_desc_ext &src_desc, void *src); /// Adding the scaled values of a memory to another memory asynchronously. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \returns An event representing the sum operations. sycl::event async_sum(float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst); /// Perform specified binary operation asynchronously. /// \param [in] op Specified binary operation. /// \param [in] alpha_0 Value to scaling factors used to scale the src_0 /// value. /// \param [in] src_desc_0 Source 0 memory descriptor. /// \param [in] src_0 Pointer to source 0 data. /// \param [in] alpha_1 Value to scaling factors used to scale the src_1 /// value. /// \param [in] src_desc_1 Source 1 memory descriptor. /// \param [in] src_1 Pointer to source 1 data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \returns An event representing the binary operations. sycl::event async_binary(binary_op op, float alpha_0, const memory_desc_ext &src_desc_0, void *src_0, float alpha_1, const memory_desc_ext &src_desc_1, void *src_1, float beta, const memory_desc_ext &dst_desc, void *dst); /// Perform specified binary operation asynchronously. /// \param [in] op Specified reduction operation. /// \param [in] alpha Value to scaling factors used to scale the data /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \returns An event representing the reduction operations. sycl::event async_reduction(reduction_op op, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst); /// Computing a specified activation function value asynchronously. /// \param [in] desc Activation descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \returns An event representing the activation forward operations. sycl::event async_activation_forward(activation_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst); /// Computing the gradient of a specified activation function asynchronously. /// \param [in] desc Activation descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] dst_desc Destination memory descriptor. /// \param [in] dst Pointer to destination data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the differential destination memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential source data. /// \returns An event representing the activation backward operations. sycl::event async_activation_backward(activation_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src); /// Computing a specified pooling function value asynchronously. /// \param [in] desc Pooling descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \param [out] workspace Pointer to workspace generated from forward propagation. /// \returns An event representing the pooling forward operations. sycl::event async_pooling_forward(pooling_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, ::dnnl::memory *workspace = nullptr); /// Computing the gradient of a specified pooling function asynchronously. /// \param [in] desc Activation descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] dst_desc Destination memory descriptor. /// \param [in] dst Pointer to destination data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the differential destination memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential /// source data. /// \param [in] workspace Pointer to workspace used for backward /// propagation. /// \returns An event representing the pooling backward operations. sycl::event async_pooling_backward(pooling_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src, ::dnnl::memory *workspace = nullptr); /// Computing a specified softmax function value asynchronously. /// \param [in] alg Softmax algorithm. /// \param [in] mode Softmax mode. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \returns An event representing the softmax forward operations. sycl::event async_softmax_forward(softmax_algorithm alg, softmax_mode mode, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst); /// Computing the gradient of a specified softmax function asynchronously. /// \param [in] alg Softmax algorithm. /// \param [in] mode Softmax mode. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] dst_desc Destination memory descriptor. /// \param [in] dst Pointer to destination data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the differential destination memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential source data. /// \returns An event representing the softmax backward operations. sycl::event async_softmax_backward(softmax_algorithm alg, softmax_mode mode, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta, const memory_desc_ext &diff_src_desc, void *diff_src); /// Computing a specified local response normalization function value /// asynchronously. /// \param [in] desc Local response normalization descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \param [out] workspace Pointer to workspace generated from forward /// propagation. /// \returns An event representing the lrn forward operations. sycl::event async_lrn_forward(lrn_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, ::dnnl::memory *workspace = nullptr); /// Computing the gradient of a specified local response normalization /// function asynchronously. /// \param [in] desc Local response normalization descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed value. /// \param [in] dst_desc Destination memory descriptor. /// \param [in] dst Pointer to destination data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the differential destination memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential source data. /// \param [in] workspace Pointer to workspace used for backward propagation. /// \returns An event representing the lrn backward operations. sycl::event async_lrn_backward(lrn_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src, ::dnnl::memory *workspace = nullptr); /// Derives a memory descriptor for the batch normalization scale, bias, mean, /// variance from the source memory descriptor and batch normalization mode. /// \param [out] desc Derived memory descriptor. /// \param [in] src_desc Source memory descriptor. /// \param [in] mode Batch normalization mode. static void derive_batch_normalization_memory_desc(memory_desc_ext &desc, const memory_desc_ext &src_desc, batch_normalization_mode mode); /// Derives a memory descriptor for the batch normalization scale, bias, mean, /// variance from the source memory descriptor and batch normalization mode. /// \param [out] scale_bias_desc Derived scale and bias memory descriptor. /// \param [out] mean_var_desc Derived mean and var memory descriptor. /// \param [in] src_desc Source memory descriptor. /// \param [in] mode Batch normalization mode. static void derive_batch_normalization_memory_desc(memory_desc_ext &scale_bias_desc, memory_desc_ext &mean_var_desc, const memory_desc_ext &src_desc, batch_normalization_mode mode); /// Get the size of workspace that needed by batch normalization. The data stored /// in workspace must be preserved between forward and backward. /// \param [in] ops Batch normalization operation mode. This mode can set to /// perform only batch normalization, or batch normalization followed by /// activation, or batch normalization followed by element-wise addition and /// activation. /// \param [in] src_desc Source memory descriptor. /// \returns Size of workspace. size_t get_batch_normalization_workspace_size( batch_normalization_ops ops, const memory_desc_ext &src_desc); /// Computing a specified batch normalization inference stage function value /// asynchronously. /// \param [in] mode Batch normalization mode. /// \param [in] epsilon Epsilon value used in computation. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory /// descriptor. /// \param [in] scale Pointer to scale data. /// \param [in] bias Pointer to bias data. /// \param [in] mean Pointer to mean data. /// \param [in] var Pointer to variance data. /// \returns An event representing the batch normalization forward operations. sycl::event async_batch_normalization_forward_inference( batch_normalization_mode mode, float epsilon, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias, void *mean, void *var); /// Computing a specified batch normalization inference stage function value /// asynchronously. /// \param [in] mode Batch normalization mode. /// \param [in] ops Batch normalization operation mode. This mode can set to /// perform only batch normalization, or batch normalization followed by /// activation, or batch normalization followed by element-wise addition and /// activation. /// \param [in] adesc Activation operation descriptor. /// \param [in] epsilon Epsilon value used in computation. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \param [in] summand_desc Summand memory descriptor. /// \param [in] summand Pointer to summand data. /// \param [in] scale_bias_desc Scale, bias memory descriptor. /// \param [in] scale Pointer to scale data. /// \param [in] bias Pointer to bias data. /// \param [in] mean_var_desc Mean, variance memory descriptor. /// \param [in] mean Pointer to mean data. /// \param [in] var Pointer to variance data. /// \returns An event representing the batch normalization forward operations. sycl::event async_batch_normalization_forward_inference( batch_normalization_mode mode, batch_normalization_ops ops, activation_desc &adesc, float epsilon, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &summand_desc, void *summand, const memory_desc_ext &scale_bias_desc, void *scale, void *bias, const memory_desc_ext &mean_var_desc, void *mean, void *var); /// Computing a specified batch normalization training stage function value /// asynchronously. /// \param [in] mode Batch normalization mode. /// \param [in] epsilon Epsilon value used in computation. /// \param [in] factor Factor value used in running mean and variance /// computation. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory /// descriptor. /// \param [in] scale Pointer to scale data. /// \param [in] bias Pointer to bias data. /// \param [out] running_mean Pointer to running mean data. /// \param [out] running_var Pointer to running variance data. /// \param [out] saved_mean Pointer to optional cache to save mean data. /// \param [out] saved_var Pointer to optional cache to save variance data. /// \returns An event representing the batch normalization forward operations. sycl::event async_batch_normalization_forward_training( batch_normalization_mode mode, float epsilon, float factor, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias, void *running_mean, void *running_var, void *saved_mean, void *saved_var); /// Computing a specified batch normalization training stage function value /// asynchronously. /// \param [in] mode Batch normalization mode. /// \param [in] ops Batch normalization operation mode. This mode can set to /// perform only batch normalization, or batch normalization followed by /// activation, or batch normalization followed by element-wise addition and /// activation. /// \param [in] adesc Activation operation descriptor. /// \param [in] epsilon Epsilon value used in computation. /// \param [in] factor Factor value used in running mean and variance /// computation. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \param [in] summand_desc Summand memory descriptor. /// \param [in] summand Pointer to summand data. /// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory /// descriptor. /// \param [in] scale Pointer to scale data. /// \param [in] bias Pointer to bias data. /// \param [out] running_mean Pointer to running mean data. /// \param [out] running_var Pointer to running variance data. /// \param [out] saved_mean Pointer to optional cache to save mean data. /// \param [out] saved_var Pointer to optional cache to save variance data. /// \param [in] workspace_size Size of workspace. /// \param [out] workspace Pointer to workspace generated from forward /// propagation. /// \returns An event representing the batch normalization forward operations. sycl::event async_batch_normalization_forward_training( batch_normalization_mode mode, batch_normalization_ops ops, activation_desc &adesc, float epsilon, float factor, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &summand_desc, void *summand, const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias, void *running_mean, void *running_var, void *saved_mean, void *saved_var, size_t workspace_size, void *workspace); /// Computing a specified batch normalization training stage function value /// asynchronously. /// \param [in] mode Batch normalization mode. /// \param [in] ops Batch normalization operation mode. This mode can set to /// perform only batch normalization, or batch normalization followed by /// activation, or batch normalization followed by element-wise addition and /// activation. /// \param [in] adesc Activation operation descriptor. /// \param [in] epsilon Epsilon value used in computation. /// \param [in] factor Factor value used in running mean and variance /// computation. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \param [in] summand_desc Summand memory descriptor. /// \param [in] summand Pointer to summand data. /// \param [in] scale_bias_desc Scale, bias memory descriptor. /// \param [in] scale Pointer to scale data. /// \param [in] bias Pointer to bias data. /// \param [in] mean_var_desc Mean, variance memory descriptor. /// \param [out] running_mean Pointer to running mean data. /// \param [out] running_var Pointer to running variance data. /// \param [out] saved_mean Pointer to optional cache to save mean data. /// \param [out] saved_var Pointer to optional cache to save variance data. /// \param [in] workspace_size Size of workspace. /// \param [out] workspace Pointer to workspace generated from forward /// propagation. /// \returns An event representing the batch normalization forward operations. sycl::event async_batch_normalization_forward_training( batch_normalization_mode mode, batch_normalization_ops ops, activation_desc &adesc, float epsilon, float factor, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &summand_desc, void *summand, const memory_desc_ext &scale_bias_desc, void *scale, void *bias, const memory_desc_ext &mean_var_desc, void *running_mean, void *running_var, void *saved_mean, void *saved_var, size_t workspace_size, void *workspace); /// Computing the gradient of a specified batch normalization function asynchronously. /// \param [in] mode Batch normalization mode. /// \param [in] epsilon Epsilon value used in computation. /// \param [in] alpha_data Value to scaling factors used to scale the computed /// data value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] beta_data Value to scaling factors used to scale the prior value /// in the data memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential source data. /// \param [in] alpha_param Value to scaling factors used to scale the computed /// parameter value. /// \param [in] diff_scale_bias_mean_var_desc Differential scale, bias, mean, /// variance memory descriptor. /// \param [in] scale Pointer to scale data. /// \param [in] beta_param Value to scaling factors used to scale the prior value /// in the parameter memory. /// \param [in] diff_scale Pointer to differential scale data. /// \param [in] diff_bias Pointer to differential bias data. /// \param [in] saved_mean Pointer to optional cache saved mean data in forward. /// \param [in] saved_var Pointer to optional cache saved variance data in forward. /// \returns An event representing the batch normalization backward operations. sycl::event async_batch_normalization_backward( batch_normalization_mode mode, float epsilon, float alpha_data, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param, const memory_desc_ext &diff_scale_bias_mean_var_desc, void *scale, float beta_param, void *diff_scale, void *diff_bias, void *saved_mean, void *saved_var); /// Computing the gradient of a specified batch normalization function /// asynchronously. /// \param [in] mode Batch normalization mode. /// \param [in] ops Batch normalization operation mode. This mode can set to /// perform only batch normalization, or batch normalization followed by /// activation, or batch normalization followed by element-wise addition and /// activation. /// \param [in] adesc Activation operation descriptor. /// \param [in] epsilon Epsilon value used in computation. /// \param [in] alpha_data Value to scaling factors used to scale the computed /// data value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] dst_desc Destination memory descriptor. /// \param [in] dst Pointer to destination data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] beta_data Value to scaling factors used to scale the prior value /// in the data memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential source data. /// \param [in] diff_summand_desc Differential summand memory descriptor. /// \param [out] diff_summand Pointer to differential summand data. /// \param [in] alpha_param Value to scaling factors used to scale the computed /// parameter value. /// \param [in] diff_scale_bias_mean_var_desc Differential scale, bias, mean, /// variance memory descriptor. /// \param [in] scale Pointer to scale data. /// \param [in] bias Pointer to bias data. /// \param [in] beta_param Value to scaling factors used to scale the prior value /// in the parameter memory. /// \param [out] diff_scale Pointer to differential scale data. /// \param [out] diff_bias Pointer to differential bias data. /// \param [in] saved_mean Pointer to optional cache saved mean data in forward. /// \param [in] saved_var Pointer to optional cache saved variance data in forward. /// \param [in] workspace_size Size of workspace. /// \param [in] workspace Pointer to workspace used for backward propagation. /// \returns An event representing the batch normalization backward operations. sycl::event async_batch_normalization_backward( batch_normalization_mode mode, batch_normalization_ops ops, activation_desc &adesc, float epsilon, float alpha_data, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src, const memory_desc_ext &diff_summand_desc, void *diff_summand, float alpha_param, const memory_desc_ext &diff_scale_bias_mean_var_desc, void *scale, void *bias, float beta_param, void *diff_scale, void *diff_bias, void *saved_mean, void *saved_var, size_t workspace_size, void *workspace); /// Computing the gradient of a specified batch normalization function /// asynchronously. /// \param [in] mode Batch normalization mode. /// \param [in] ops Batch normalization operation mode. This mode can set to /// perform only batch normalization, or batch normalization followed by /// activation, or batch normalization followed by element-wise addition and /// activation. /// \param [in] adesc Activation operation descriptor. /// \param [in] epsilon Epsilon value used in computation. /// \param [in] alpha_data Value to scaling factors used to scale the computed /// data value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] dst_desc Destination memory descriptor. /// \param [in] dst Pointer to destination data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] beta_data Value to scaling factors used to scale the prior value /// in the data memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential source data. /// \param [in] diff_summand_desc Differential summand memory descriptor. /// \param [out] diff_summand Pointer to differential summand data. /// \param [in] alpha_param Value to scaling factors used to scale the computed /// parameter value. /// \param [in] diff_scale_bias_desc Differential scale, bias memory descriptor. /// \param [in] scale Pointer to scale data. /// \param [in] bias Pointer to bias data. /// \param [in] beta_param Value to scaling factors used to scale the prior value /// in the parameter memory. /// \param [out] diff_scale Pointer to differential scale data. /// \param [out] diff_bias Pointer to differential bias data. /// \param [in] mean_var_desc Differential mean, variance memory descriptor. /// \param [in] saved_mean Pointer to optional cache saved mean data in forward. /// \param [in] saved_var Pointer to optional cache saved variance data in forward. /// \param [in] workspace_size Size of workspace. /// \param [in] workspace Pointer to workspace used for backward propagation. /// \returns An event representing the batch normalization backward operations. sycl::event async_batch_normalization_backward( batch_normalization_mode mode, batch_normalization_ops ops, activation_desc &adesc, float epsilon, float alpha_data, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src, const memory_desc_ext &diff_summand_desc, void *diff_summand, float alpha_param, const memory_desc_ext &diff_scale_bias_desc, void *scale, void *bias, float beta_param, void *diff_scale, void *diff_bias, const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var, size_t workspace_size, void *workspace); /// Computing a specified convolution function value asynchronously. /// \param [in] desc Convolution descriptor. /// \param [in] alg Convolution algorithm. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] weight_desc Weight memory descriptor. /// \param [in] weight Pointer to weight data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \returns An event representing the convolution forward operations. sycl::event async_convolution_forward(convolution_desc &desc, ::dnnl::algorithm alg, float alpha, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &weight_desc, void *weight, float beta, const memory_desc_ext &dst_desc, void *dst); /// Computing a specified convolution function value asynchronously. /// \param [in] desc Convolution descriptor. /// \param [in] alg Convolution algorithm. /// \param [in] adesc Activation operation descriptor. /// \param [in] alpha_0 Value to scaling factors used to scale the data /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] weight_desc Weight memory descriptor. /// \param [in] weight Pointer to weight data. /// \param [in] alpha_1 Value to scaling factors used to scale the summand /// value. /// \param [in] summand_desc Summand memory descriptor. /// \param [in] summand Pointer to summand data. /// \param [in] bias_desc Bias memory descriptor. /// \param [in] bias Pointer to bias data. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \returns An event representing the convolution forward operations. sycl::event async_convolution_forward( convolution_desc &desc, ::dnnl::algorithm alg, activation_desc &adesc, float alpha_0, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &weight_desc, void *weight, float alpha_1, const memory_desc_ext &summand_desc, void *summand, const memory_desc_ext &bias_desc, void *bias, const memory_desc_ext &dst_desc, void *dst); /// Computing the data gradient of a specified convolution function asynchronously. /// \param [in] desc Convolution descriptor. /// \param [in] alg Convolution algorithm. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] weight_desc Weight memory descriptor. /// \param [in] weight Pointer to weight data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential source data. /// \returns An event representing the convolution backward data operations. sycl::event async_convolution_backward_data( convolution_desc &desc, ::dnnl::algorithm alg, float alpha, const memory_desc_ext &weight_desc, void *weight, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta, const memory_desc_ext &diff_src_desc, void *diff_src); /// Computing the weight gradient of a specified convolution function /// asynchronously. /// \param [in] desc Convolution descriptor. /// \param [in] alg Convolution algorithm. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] diff_weight_desc Differential weight memory descriptor. /// \param [out] diff_weight Pointer to differential weight data. /// \returns An event representing the convolution backward weight operations. sycl::event async_convolution_backward_weight( convolution_desc &desc, ::dnnl::algorithm alg, float alpha, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta, const memory_desc_ext &diff_weight_desc, void *diff_weight); /// Computing the bias gradient of a specified convolution function /// asynchronously. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] diff_bias_desc Differential bias memory descriptor. /// \param [out] diff_bias Pointer to differential bias data. /// \returns An event representing the convolution backward bias operations. sycl::event async_convolution_backward_bias(float alpha, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta, const memory_desc_ext &diff_bias_desc, void *diff_bias); /// Getting the required weight space size for specified rnn operation. /// \param [in] desc RNN descriptor. /// \param [out] weight_space_size Size of required weight space. void rnn_get_weight_space_size(const rnn_desc &desc, size_t *weight_space_size); /// Getting the required scratchpad size and workspace size for specified rnn operation. /// \param [in] desc RNN descriptor. /// \param [in] kind Propagation kind. /// \param [in] src_desc Source memory descriptor. /// \param [out] scratchpad_size Size of required scratchpad. /// \param [out] workspace_size Size of required workspace. void rnn_get_scratchpad_workspace_size(const rnn_desc &desc, ::dnnl::prop_kind kind, const memory_desc_ext &src_desc, size_t *scratchpad_size, size_t *workspace_size); /// Computing a specified rnn function value asynchronously. /// \param [in] desc RNN descriptor. /// \param [in] kind Propagation kind. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \param [in] iter_desc Recurrent hidden state data memory descriptor. /// \param [in] src_iter Pointer to input recurrent hidden state data. /// \param [in] dst_iter Pointer to output recurrent hidden state data. /// \param [in] iter_c_desc Recurrent cell state data memory descriptor. /// \param [in] src_c_iter Pointer to input recurrent cell state data. /// \param [in] dst_c_iter Pointer to output recurrent cell state data. /// \param [in] weight_size Size of weight memory. /// \param [in] weight Pointer to weight data. /// \param [in] scratchpad_size Size of scratchpad memory. /// \param [in] scratchpad Pointer to scratchpad data. /// \param [in] workspace_size Size of workspace memory. /// \param [in] workspace Pointer to workspace data. /// \returns An event representing the status of rnn forward operations. sycl::event async_rnn_forward(const rnn_desc &desc, ::dnnl::prop_kind kind, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter, const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c, size_t weight_size, void *weight, size_t scratchpad_size, void *scratchpad, size_t workspace_size, void *workspace); /// Computing the data and weight gradient of a specified rnn function /// asynchronously. /// \param [in] desc RNN descriptor. /// \param [in] dst_desc Destination memory descriptor. /// \param [in] dst Pointer to destination data. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [out] diff_src Pointer to differential source data. /// \param [in] iter_desc Recurrent hidden state data memory descriptor. /// \param [in] src_iter Pointer to input recurrent hidden state data. /// \param [in] diff_dst_iter Pointer to differential output recurrent hidden state data. /// \param [out] diff_src_iter Pointer to differential input recurrent hidden state data. /// \param [in] iter_c_desc Recurrent cell state data memory descriptor. /// \param [in] src_c_iter Pointer to input recurrent cell state data. /// \param [in] diff_dst_c_iter Pointer to differential output recurrent cell state data. /// \param [out] diff_src_c_iter Pointer to differential input recurrent cell state data. /// \param [in] weight_size Size of weight memory. /// \param [in] weight Pointer to weight data. /// \param [out] diff_weight Pointer to differential weight data. /// \param [in] scratchpad_size Size of scratchpad memory. /// \param [in] scratchpad Pointer to scratchpad data. /// \param [in] workspace_size Size of workspace memory. /// \param [in] workspace Pointer to workspace data. /// \returns An event representing the status of rnn backward operations. sycl::event async_rnn_backward( const rnn_desc &desc, const memory_desc_ext &dst_desc, void *dst, void *diff_dst, const memory_desc_ext &src_desc, void *src, void *diff_src, const memory_desc_ext &iter_desc, void *src_iter, void *diff_dst_iter, void *diff_src_iter, const memory_desc_ext &iter_c_desc, void *src_iter_c, void *diff_dst_iter_c, void *diff_src_iter_c, size_t weight_size, void *weight, void *diff_weight, size_t scratchpad_size, void *scratchpad, size_t workspace_size, void *workspace); /// Getting the required state size for specified dropout operation. /// \param [in] src_desc Source memory descriptor. /// \returns Required size of state. size_t get_dropout_state_size(); /// Getting the required workspace size for dropout operation. /// \param [in] src_desc Source memory descriptor. /// \returns Required size of workspace. static size_t get_dropout_workspace_size(const memory_desc_ext &src_desc); /// Computing a specified dropout function value asynchronously. /// \param [in] desc Dropout descriptor. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \param [in] workspace Pointer to workspace data. /// \param [in] workspace_size Size of workspace memory. /// \returns An event representing the dropout forward operations. sycl::event async_dropout_forward(dropout_desc &desc, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc, void *dst, void *workspace, size_t workspace_size); /// Computing the gradient of a specified dropout function asynchronously. /// \param [in] desc Dropout descriptor. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential source data. /// \param [in] workspace Pointer to workspace data. /// \param [in] workspace_size Size of workspace memory. /// \returns An event representing the dropout backward operations. sycl::event async_dropout_backward(dropout_desc &desc, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &diff_src_desc, void *diff_src, void *workspace, size_t workspace_size); }; inline void dropout_desc::restore(engine_ext &engine, float p, void *state, size_t state_size, unsigned long long seed) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) " "Interfaces Project does not support this API."); #else if (state) { std::int64_t required_state_size = engine.get_dropout_state_size(); if (state_size < required_state_size) { throw std::runtime_error("restore: state_size less than required state size."); } sycl::queue *q = engine.get_queue(); _imp->_p = p; _imp->_seed = seed; _imp->_state = state; _imp->_host_state = std::vector<std::uint8_t>(required_state_size); q->memcpy(_imp->_host_state.data(), _imp->_state, required_state_size).wait(); _imp->_rng_engine = oneapi::mkl::rng::load_state<rng_engine_t>( *q, _imp->_host_state.data()); } #endif } inline void dropout_desc::set(engine_ext &engine, float p, void *state, size_t state_size, unsigned long long seed) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) " "Interfaces Project does not support this API."); #else _imp->_p = p; if (state) { std::int64_t required_state_size = engine.get_dropout_state_size(); if (state_size < required_state_size) { throw std::runtime_error("set: no sufficient memory to save states."); } sycl::queue *q = engine.get_queue(); _imp->_seed = seed; _imp->_state = state; _imp->_host_state = std::vector<std::uint8_t>(required_state_size); _imp->_rng_engine = rng_engine_t(*q, seed); oneapi::mkl::rng::save_state(_imp->_rng_engine, _imp->_host_state.data()); q->memcpy(_imp->_state, _imp->_host_state.data(), required_state_size).wait(); } #endif } inline ::dnnl::memory::data_type memory_desc_ext::to_dnnl_data_type(dpct::library_data_t dt) { using dnnl_dt = ::dnnl::memory::data_type; switch (dt) { case dpct::library_data_t::real_half: return dnnl_dt::f16; case dpct::library_data_t::real_bfloat16: return dnnl_dt::bf16; case dpct::library_data_t::real_float: return dnnl_dt::f32; case dpct::library_data_t::real_int32: return dnnl_dt::s32; case dpct::library_data_t::real_int8: return dnnl_dt::s8; case dpct::library_data_t::real_uint8: return dnnl_dt::u8; case dpct::library_data_t::real_int8_4: return dnnl_dt::s8; case dpct::library_data_t::real_int8_32: return dnnl_dt::s8; case dpct::library_data_t::real_uint8_4: return dnnl_dt::u8; default: throw std::runtime_error("to_dnnl_data_type: unsupported data type."); } } inline dpct::library_data_t memory_desc_ext::to_dpct_library_data_t(::dnnl::memory::data_type dt, unsigned block_size) { using dpct_dt = dpct::library_data_t; using dnnl_dt = ::dnnl::memory::data_type; switch (dt) { case dnnl_dt::f16: return dpct_dt::real_half; case dnnl_dt::bf16: return dpct_dt::real_bfloat16; case dnnl_dt::f32: return dpct_dt::real_float; case dnnl_dt::s32: return dpct_dt::real_int32; case dnnl_dt::s8: if (block_size == 4) { return dpct_dt::real_int8_4; } else if (block_size == 32) { return dpct_dt::real_int8_32; } else { return dpct_dt::real_int8; } case dnnl_dt::u8: if (block_size == 4) { return dpct_dt::real_uint8_4; } else { return dpct_dt::real_uint8; } default: throw std::runtime_error("to_dpct_library_data_t: unsupported data type " "dnnl::memory::data_type::undef."); } } inline ::dnnl::memory::format_tag memory_desc_ext::to_dnnl_format_tag(dpct::library_data_t dt, memory_format_tag tag) { using dpct_dt = dpct::library_data_t; using dpct_tag = memory_format_tag; using dnnl_tag = ::dnnl::memory::format_tag; switch (tag) { case dpct_tag::nchw: return dnnl_tag::nchw; case dpct_tag::nhwc: return dnnl_tag::nhwc; default: if (dt == dpct_dt::real_int8_32) { return dnnl_tag::nChw32c; } else { return dnnl_tag::nChw4c; } } } inline void memory_desc_ext::set(memory_format_tag tag, dpct::library_data_t dt, int n, int c, int h, int w) { _desc = ::dnnl::memory::desc({n, c, h, w}, to_dnnl_data_type(dt), to_dnnl_format_tag(dt, tag)); } inline void memory_desc_ext::set(dpct::library_data_t dt, int n, int c, int h, int w, int n_stride, int c_stride, int h_stride, int w_stride) { _desc = ::dnnl::memory::desc({n, c, h, w}, to_dnnl_data_type(dt), {n_stride, c_stride, h_stride, w_stride}); } inline void memory_desc_ext::set(dpct::library_data_t dt, int ndims, const int dims[], const int strides[]) { _desc = ::dnnl::memory::desc({dims, dims + ndims}, to_dnnl_data_type(dt), {strides, strides + ndims}); } inline void memory_desc_ext::set(memory_format_tag tag, dpct::library_data_t dt, int ndims, const int dims[]) { _desc = ::dnnl::memory::desc({dims, dims + ndims}, to_dnnl_data_type(dt), to_dnnl_format_tag(dt, tag)); } inline void memory_desc_ext::set(rnn_memory_format_tag tag, dpct::library_data_t dt, int t, int n, int c) { if (tag == rnn_memory_format_tag::tnc) { _desc = ::dnnl::memory::desc({t, n, c}, to_dnnl_data_type(dt), ::dnnl::memory::format_tag::tnc); } else if(tag == rnn_memory_format_tag::ntc) { _desc = ::dnnl::memory::desc({t, n, c}, to_dnnl_data_type(dt), ::dnnl::memory::format_tag::ntc); } else { throw std::runtime_error("set: unsupported memory format tag."); } } inline void memory_desc_ext::get(dpct::library_data_t *dt, int *n, int *c, int *h, int *w, int *n_stride, int *c_stride, int *h_stride, int *w_stride) const { unsigned block_size = 1; auto dims = _desc.get_dims(); auto inner_blks = _desc.get_inner_blks(); auto strides = _desc.get_strides(); if (!inner_blks.empty()) { block_size = inner_blks[0]; } *dt = to_dpct_library_data_t(_desc.get_data_type(), block_size); *n = dims[0]; *c = dims[1]; *h = dims[2]; *w = dims[3]; *n_stride = strides[0] / block_size; *c_stride = strides[1] / block_size; *h_stride = strides[2] / block_size; *w_stride = strides[3] / block_size; } inline void memory_desc_ext::get(dpct::library_data_t *dt, memory_format_tag *tag, int *n, int *c, int *h, int *w) const { unsigned block_size = 1; *tag = memory_format_tag::nchw; auto dims = _desc.get_dims(); auto strides = _desc.get_strides(); auto inner_blks = _desc.get_inner_blks(); if (!inner_blks.empty()) { block_size = inner_blks[0]; *tag = memory_format_tag::nchw_blocked; } if (strides[1] == 1 && dims[1] != 1) { *tag = memory_format_tag::nhwc; } *dt = to_dpct_library_data_t(_desc.get_data_type(), block_size); *n = dims[0]; *c = dims[1]; *h = dims[2]; *w = dims[3]; } inline void memory_desc_ext::get(dpct::library_data_t *dt, rnn_memory_format_tag *tag, int *t, int *n, int *c) const { auto dims = _desc.get_dims(); auto strides = _desc.get_strides(); if (strides[0] >= strides[1]) { *tag = rnn_memory_format_tag::tnc; } else { *tag = rnn_memory_format_tag::ntc; } *dt = to_dpct_library_data_t(_desc.get_data_type(), 1); *t = dims[0]; *n = dims[1]; *c = dims[2]; } inline void memory_desc_ext::get(int requested_ndims, dpct::library_data_t *dt, int *ndims, int dims[], int strides[]) const { unsigned block_size = 1; auto inner_blks = _desc.get_inner_blks(); auto adims = _desc.get_dims(); auto astrides = _desc.get_strides(); if (!inner_blks.empty()) { block_size = inner_blks[0]; } *dt = to_dpct_library_data_t(_desc.get_data_type(), block_size); *ndims = _desc.get_ndims(); for (int index = 0; index < requested_ndims; index++) { dims[index] = adims[index]; strides[index] = astrides[index] / block_size; } } inline void memory_desc_ext::get(int requested_ndims, dpct::library_data_t *dt, memory_format_tag *tag, int *ndims, int dims[]) const { unsigned block_size = 1; *tag = memory_format_tag::nchw; auto inner_blks = _desc.get_inner_blks(); auto adims = _desc.get_dims(); auto astrides = _desc.get_strides(); if (!inner_blks.empty()) { block_size = inner_blks[0]; *tag = memory_format_tag::nchw_blocked; } if (astrides[1] == 1 && adims[1] != 1) { *tag = memory_format_tag::nhwc; } *dt = to_dpct_library_data_t(_desc.get_data_type(), block_size); *ndims = _desc.get_ndims(); for (int index = 0; index < requested_ndims; index++) { dims[index] = adims[index]; } } inline void engine_ext::get_rnn_configuration(const ::dnnl::memory::desc &desc, rnn_direction direction, rnn_mode mode, dpct::library_data_t dt, int hidden_size, ::dnnl::memory::data_type *dnnl_dt, ::dnnl::memory::format_tag *tag, int *projection_size, int *output_size, int *seq_length, int *batch_size, int *direction_num, int *gate_num) { if (!desc.is_zero()) { auto dims = desc.get_dims(); auto strides = desc.get_strides(); if (strides[0] >= strides[1]) { *tag = ::dnnl::memory::format_tag::tnc; *seq_length = dims[0]; *batch_size = dims[1]; } else { *tag = ::dnnl::memory::format_tag::ntc; *seq_length = dims[1]; *batch_size = dims[0]; } } if (direction == rnn_direction::bidirectional) { *direction_num = 2; } else { *direction_num = 1; } if (mode == rnn_mode::lstm) { *gate_num = 4; } else if (mode == rnn_mode::gru) { *gate_num = 3; } else { *gate_num = 1; } if (*projection_size != hidden_size) { *output_size = *projection_size; } else { *projection_size = 0; *output_size = hidden_size; } *dnnl_dt = memory_desc_ext::to_dnnl_data_type(dt); } inline void *engine_ext::allocate(const memory_desc_ext &data_desc, int count) const { size_t mem_size = data_desc.get_size(); void *mem = sycl::malloc_device(mem_size * count, *_q); return mem; } inline void engine_ext::transform_no_zero(const memory_desc_ext &desc, void *src, void *dst) { ::dnnl::memory::data_type dt = desc.get_desc().get_data_type(); size_t element_num = desc.get_element_num(); switch (dt) { case ::dnnl::memory::data_type::f32: transform_no_zero_with_type<float>(_q, src, dst, element_num); break; case ::dnnl::memory::data_type::f16: transform_no_zero_with_type<sycl::half>(_q, src, dst, element_num); break; case ::dnnl::memory::data_type::s32: transform_no_zero_with_type<int32_t>(_q, src, dst, element_num); break; case ::dnnl::memory::data_type::s8: transform_no_zero_with_type<int8_t>(_q, src, dst, element_num); break; case ::dnnl::memory::data_type::u8: transform_no_zero_with_type<uint8_t>(_q, src, dst, element_num); break; default: throw std::runtime_error("transform_no_zero: unsupported data type."); } } inline ::dnnl::memory::desc engine_ext::get_group_weight_desc(int group_count, const memory_desc_ext &weight_desc) { if (group_count == 1) { return weight_desc.get_desc(); } auto help_weight_desc = weight_desc.get_desc(); int ndims = help_weight_desc.get_ndims(); if (!help_weight_desc.get_inner_blks().empty()) { throw std::runtime_error("get_group_weight_desc: group convolution with " "blocked weight memory unimplemented."); } std::vector<int64_t> new_size; auto old_size = weight_desc.get_dims(); new_size.push_back(group_count); new_size.push_back(old_size[0] / group_count); for (int index = 1; index < old_size.size(); index++) { new_size.push_back(old_size[index]); } std::vector<int64_t> strides = help_weight_desc.get_strides(); ::dnnl::memory::format_tag tag; bool is_nhwc = (strides[1] == 1 && old_size[1] != 1); if (ndims == 4) { if (is_nhwc) { tag = ::dnnl::memory::format_tag::gohwi; } else { tag = ::dnnl::memory::format_tag::goihw; } } else if (ndims == 5) { if (is_nhwc) { tag = ::dnnl::memory::format_tag::godhwi; } else { tag = ::dnnl::memory::format_tag::goidhw; } } help_weight_desc = ::dnnl::memory::desc(new_size, weight_desc.get_desc().get_data_type(), tag); return help_weight_desc; } inline ::dnnl::memory::desc engine_ext::compress_spatial_dimensions_to_channel( const ::dnnl::memory::desc &desc) { int ndims = desc.get_ndims(); auto dims = desc.get_dims(); auto inner_blks = desc.get_inner_blks(); assert(ndims >= 4 && "ndims is at least 4."); std::vector<int64_t> compressed_dims(ndims); compressed_dims[0] = dims[0]; compressed_dims[1] = dims[1]; for (int index = 2; index < ndims; index++) { compressed_dims[1] = compressed_dims[1] * dims[index]; compressed_dims[index] = 1; } if (!inner_blks.empty() && inner_blks[0] == 4) { return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(), ::dnnl::memory::format_tag::nChw4c); } else if (!inner_blks.empty() && inner_blks[0] == 32) { return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(), ::dnnl::memory::format_tag::nChw32c); } std::vector<int64_t> strides(ndims, 1); strides[0] = compressed_dims[1]; return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(), strides); } inline ::dnnl::memory::desc engine_ext::get_bn_scale_bias_mean_var_desc(const ::dnnl::memory::desc &desc, batch_normalization_mode mode) { int ndims = desc.get_ndims(); auto dims = desc.get_dims(); assert(ndims >= 4 && "ndims is at least 4."); int channel_num = 1; if (mode == batch_normalization_mode::spatial) { channel_num = dims[1]; } else { for (int index = 1; index < ndims; index++) { channel_num = channel_num * dims[index]; } } return ::dnnl::memory::desc({channel_num}, desc.get_data_type(), ::dnnl::memory::format_tag::a); } inline ::dnnl::memory::desc engine_ext::transfer_memory_desc_to_channel_major_format( const ::dnnl::memory::desc &desc) { if (!desc.get_inner_blks().empty()) { return desc; } int ndims = desc.get_ndims(); auto dims = desc.get_dims(); if (ndims == 4) { return ::dnnl::memory::desc(dims, desc.get_data_type(), ::dnnl::memory::format_tag::nchw); } return ::dnnl::memory::desc(dims, desc.get_data_type(), ::dnnl::memory::format_tag::ncdhw); } /// If the alpha = 0 and beta = 1, then the destination (dst = alpha * out + /// beta * prior_dst) have no change. In this case this function returns true /// means the operation can exit directly. inline bool engine_ext::scale_parameter_preprocess( const std::vector<output_argument_info> &args) { bool direct_exit = true; for (auto &arg : args) { if (arg._alpha == 0.f) { if (arg._beta != 1.f) { async_scale(arg._beta, arg._desc, arg._data); } } else { direct_exit = false; } } return direct_exit; } inline void engine_ext::derive_batch_normalization_memory_desc( memory_desc_ext &scale_bias_desc, memory_desc_ext &mean_var_desc, const memory_desc_ext &src_desc, batch_normalization_mode mode) { derive_batch_normalization_memory_desc(scale_bias_desc, src_desc, mode); derive_batch_normalization_memory_desc(mean_var_desc, src_desc, mode); } inline void engine_ext::derive_batch_normalization_memory_desc( memory_desc_ext &desc, const memory_desc_ext &src_desc, batch_normalization_mode mode) { int src_ndims = src_desc.get_desc().get_ndims(); auto inner_blks = src_desc.get_desc().get_inner_blks(); if (src_desc.get_desc().get_ndims() != 4 || src_desc.get_desc().get_ndims() != 5) { throw std::runtime_error("derive_batch_normalization_memory_desc: only 4d " "and 5d memory descriptor supported."); } std::vector<int64_t> dims = src_desc.get_dims(); dims[0] = 1; if (mode == batch_normalization_mode::spatial) { dims[2] = 1; dims[3] = 1; if (src_ndims == 5) { dims[4] = 1; } } auto data_type = src_desc.get_desc().get_data_type(); if (data_type == ::dnnl::memory::data_type::f16) { data_type = ::dnnl::memory::data_type::f32; } if (!inner_blks.empty() && inner_blks[0] == 4) { desc.set_desc(::dnnl::memory::desc(dims, data_type, ::dnnl::memory::format_tag::nChw4c)); } else if (!inner_blks.empty() && inner_blks[0] == 32) { desc.set_desc(::dnnl::memory::desc(dims, data_type, ::dnnl::memory::format_tag::nChw32c)); } else { if (src_ndims == 4) { desc.set_desc(::dnnl::memory::desc(dims, data_type, ::dnnl::memory::format_tag::nchw)); } else { desc.set_desc(::dnnl::memory::desc(dims, data_type, ::dnnl::memory::format_tag::ncdhw)); } } } template <typename primitive_type> sycl::event engine_ext::execute_primitive( const std::pair<detail::primitive_cache_key_type, primitive_type *> &primitive, std::unordered_map<int, ::dnnl::memory> *args, const std::vector<output_argument_info> &output_args, const std::vector<void *> &device_ptrs) { std::vector<void *> caches; int output_arg_num = output_args.size(); for (int i = 0; i < output_arg_num; i++) { if (output_args[i]._beta != 0.f) { auto cache = allocate(output_args[i]._desc); caches.push_back(cache); args->insert( {output_args[i]._name, ::dnnl::memory(output_args[i]._desc.get_desc(), _eng, cache)}); } else { args->insert( {output_args[i]._name, ::dnnl::memory(output_args[i]._desc.get_desc(), _eng, output_args[i]._data)}); } } auto e = ::dnnl::sycl_interop::execute( *(static_cast<primitive_type *>(primitive.second)), _s, *args); _primitive_cache.put( primitive.first, primitive.second, [](::dnnl::primitive *p) { delete static_cast<primitive_type *>(p); }, e); int cache_index = 0; for (int i = 0; i < output_arg_num; i++) { if (output_args[i]._beta != 0.f) { e = async_sum(output_args[i]._alpha, output_args[i]._desc, caches[cache_index++], output_args[i]._beta, output_args[i]._desc, output_args[i]._data); } else { if (output_args[i]._alpha != 1.f) { e = async_scale(output_args[i]._alpha, output_args[i]._desc, output_args[i]._data); } } } caches.insert(caches.end(), device_ptrs.begin(), device_ptrs.end()); async_free(_q, e, args, caches); return e; } inline ::dnnl::memory::desc engine_ext::bn_reorder_memory_to_channel_major_format( bool is_input, ::dnnl::memory::desc &desc, void *src, void **cache, std::vector<void *> &caches) { ::dnnl::memory::desc result; result = transfer_memory_desc_to_channel_major_format(desc); if ((result != desc) || !src) { *cache = allocate(desc); if (is_input && src) { async_reorder(1.f, desc, src, 0.f, result, *cache); } caches.push_back(*cache); } return result; } inline sycl::event engine_ext::batch_normalization_backward_internal( batch_normalization_mode mode, float epsilon, float alpha_data, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param, const memory_desc_ext &diff_scale_bias_desc, void *scale, void *bias, float beta_param, void *diff_scale, void *diff_bias, const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var) { if (scale_parameter_preprocess( {{alpha_data, beta_data, diff_src_desc, diff_src}, {alpha_param, beta_param, diff_scale_bias_desc, diff_scale}, {alpha_param, beta_param, diff_scale_bias_desc, diff_bias}})) { return sycl::event(); } std::vector<void *> caches; void *reordered_src = nullptr, *reordered_diff_dst = nullptr, *reordered_diff_src = nullptr, *reordered_scale = nullptr, *reordered_bias = nullptr, *reordered_diff_scale = nullptr, *reordered_diff_bias = nullptr, *reordered_saved_mean = nullptr, *reordered_saved_var = nullptr; ::dnnl::memory::desc help_src_desc = src_desc.get_desc(); ::dnnl::memory::desc help_diff_dst_desc = diff_dst_desc.get_desc(); ::dnnl::memory::desc help_diff_src_desc = diff_src_desc.get_desc(); ::dnnl::memory::desc help_diff_scale_bias_desc = diff_scale_bias_desc.get_desc(); ::dnnl::memory::desc help_mean_var_desc = mean_var_desc.get_desc(); ::dnnl::memory::desc actual_diff_src_desc = help_diff_src_desc; ::dnnl::memory::desc actual_diff_scale_bias_desc = help_diff_scale_bias_desc; if (mode == batch_normalization_mode::per_activation) { help_src_desc = bn_reorder_memory_to_channel_major_format(true, help_src_desc, src, &reordered_src, caches); help_diff_dst_desc = bn_reorder_memory_to_channel_major_format( true, help_diff_dst_desc, diff_dst, &reordered_diff_dst, caches); help_diff_src_desc = bn_reorder_memory_to_channel_major_format( false, help_diff_src_desc, diff_src, &reordered_diff_src, caches); actual_diff_src_desc = help_diff_src_desc; help_diff_scale_bias_desc = bn_reorder_memory_to_channel_major_format( true, help_diff_scale_bias_desc, scale, &reordered_scale, caches); actual_diff_scale_bias_desc = help_diff_scale_bias_desc; if (bias) { bn_reorder_memory_to_channel_major_format(true, help_diff_scale_bias_desc, bias, &reordered_bias, caches); } bn_reorder_memory_to_channel_major_format(false, help_diff_scale_bias_desc, diff_scale, &reordered_diff_scale, caches); bn_reorder_memory_to_channel_major_format(false, help_diff_scale_bias_desc, diff_bias, &reordered_diff_bias, caches); help_mean_var_desc = bn_reorder_memory_to_channel_major_format( true, help_mean_var_desc, saved_mean, &reordered_saved_mean, caches); bn_reorder_memory_to_channel_major_format(true, help_mean_var_desc, saved_var, &reordered_saved_var, caches); help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc); help_diff_src_desc = compress_spatial_dimensions_to_channel(help_diff_src_desc); help_diff_dst_desc = compress_spatial_dimensions_to_channel(help_diff_dst_desc); } else { if ((help_src_desc != help_diff_dst_desc) || (help_src_desc != help_diff_src_desc) || (help_diff_dst_desc != help_diff_src_desc)) { help_src_desc = bn_reorder_memory_to_channel_major_format( true, help_src_desc, src, &reordered_src, caches); help_diff_dst_desc = bn_reorder_memory_to_channel_major_format( true, help_diff_dst_desc, diff_dst, &reordered_diff_dst, caches); help_diff_src_desc = bn_reorder_memory_to_channel_major_format( false, help_diff_src_desc, diff_src, &reordered_diff_src, caches); actual_diff_src_desc = help_diff_src_desc; } } help_diff_scale_bias_desc = get_bn_scale_bias_mean_var_desc(help_diff_scale_bias_desc, mode); help_mean_var_desc = get_bn_scale_bias_mean_var_desc(help_mean_var_desc, mode); auto forward_primitive = create_primitive_desc<::dnnl::batch_normalization_forward>( ::dnnl::prop_kind::forward_training, help_src_desc, help_diff_dst_desc, epsilon, ::dnnl::normalization_flags::use_scale | ::dnnl::normalization_flags::use_shift); auto primitive = create_primitive<::dnnl::batch_normalization_backward>( ::dnnl::prop_kind::backward, help_diff_src_desc, help_diff_dst_desc, help_src_desc, epsilon, ::dnnl::normalization_flags::use_scale | ::dnnl::normalization_flags::use_shift, forward_primitive); void *dst_cache = nullptr; if (!saved_mean && !saved_var) { dst_cache = allocate(diff_dst_desc); if (!reordered_saved_mean) { reordered_saved_mean = allocate(mean_var_desc); caches.push_back(reordered_saved_mean); } if (!reordered_saved_var) { reordered_saved_var = allocate(mean_var_desc); caches.push_back(reordered_saved_var); } if (!bias) { _q->fill(reordered_bias, 0, diff_scale_bias_desc.get_size()); } batch_normalization_forward_internal( true, mode, epsilon, 0.f, 1.f, src_desc, src, 0.f, diff_dst_desc, dst_cache, diff_scale_bias_desc, scale, bias ? bias : reordered_bias, mean_var_desc, reordered_saved_mean, reordered_saved_var, nullptr, nullptr); caches.push_back(dst_cache); } auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, {::dnnl::memory(help_src_desc, _eng, reordered_src ? reordered_src : src)}}, {DNNL_ARG_SCALE, {::dnnl::memory(help_diff_scale_bias_desc, _eng, reordered_scale ? reordered_scale : scale)}}, {DNNL_ARG_MEAN, {::dnnl::memory(help_mean_var_desc, _eng, reordered_saved_mean ? reordered_saved_mean : saved_mean)}}, {DNNL_ARG_VARIANCE, {::dnnl::memory(help_mean_var_desc, _eng, reordered_saved_var ? reordered_saved_var : saved_var)}}, {DNNL_ARG_DIFF_DST, {::dnnl::memory(help_diff_src_desc, _eng, reordered_diff_dst ? reordered_diff_dst : diff_dst)}}}; sycl::event e = execute_primitive( primitive, execution_args, {{alpha_data, beta_data, DNNL_ARG_DIFF_SRC, help_diff_src_desc, reordered_diff_src ? reordered_diff_src : diff_src}, {alpha_param, beta_param, DNNL_ARG_DIFF_SCALE, help_diff_scale_bias_desc, reordered_diff_scale ? reordered_diff_scale : diff_scale}, {alpha_param, beta_param, DNNL_ARG_DIFF_SHIFT, help_diff_scale_bias_desc, reordered_diff_bias ? reordered_diff_bias : diff_bias}}); if (actual_diff_src_desc != diff_src_desc.get_desc() && reordered_diff_src) { e = async_reorder(1.f, actual_diff_src_desc, reordered_diff_src, 0.f, diff_src_desc, diff_src); } if (actual_diff_scale_bias_desc != diff_scale_bias_desc.get_desc() && reordered_diff_scale && reordered_diff_bias) { async_reorder(1.f, actual_diff_scale_bias_desc, reordered_diff_scale, 0.f, diff_scale_bias_desc, diff_scale); e = async_reorder(1.f, actual_diff_scale_bias_desc, reordered_diff_bias, 0.f, diff_scale_bias_desc, diff_bias); } _q->submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { for (auto ptr : caches) { sycl::free(ptr, *_q); } }); }); return e; } inline sycl::event engine_ext::batch_normalization_forward_internal( bool is_infer, batch_normalization_mode mode, float epsilon, float factor, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &scale_bias_desc, void *scale, void *bias, const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var, void *running_mean, void *running_var) { if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) { return sycl::event(); } std::vector<void *> caches; void *reordered_src = nullptr, *reordered_dst = nullptr, *reordered_scale = nullptr, *reordered_bias = nullptr, *reordered_saved_mean = nullptr, *reordered_saved_var = nullptr; ::dnnl::memory::desc help_src_desc = src_desc.get_desc(); ::dnnl::memory::desc help_dst_desc = dst_desc.get_desc(); ::dnnl::memory::desc help_scale_bias_desc = scale_bias_desc.get_desc(); ::dnnl::memory::desc help_mean_var_desc = mean_var_desc.get_desc(); ::dnnl::memory::desc actual_dst_desc = help_dst_desc; ::dnnl::memory::desc actual_mean_var_desc = help_mean_var_desc; if (mode == batch_normalization_mode::per_activation) { help_src_desc = bn_reorder_memory_to_channel_major_format(true, help_src_desc, src, &reordered_src, caches); help_dst_desc = bn_reorder_memory_to_channel_major_format( false, help_dst_desc, dst, &reordered_dst, caches); actual_dst_desc = help_dst_desc; help_scale_bias_desc = bn_reorder_memory_to_channel_major_format( true, help_scale_bias_desc, scale, &reordered_scale, caches); bn_reorder_memory_to_channel_major_format(true, help_scale_bias_desc, bias, &reordered_bias, caches); help_mean_var_desc = bn_reorder_memory_to_channel_major_format( is_infer, help_mean_var_desc, saved_mean, &reordered_saved_mean, caches); actual_mean_var_desc = help_mean_var_desc; bn_reorder_memory_to_channel_major_format(is_infer, help_mean_var_desc, saved_var, &reordered_saved_var, caches); help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc); help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc); } else { if (help_src_desc != help_dst_desc) { help_src_desc = bn_reorder_memory_to_channel_major_format( true, help_src_desc, src, &reordered_src, caches); help_dst_desc = bn_reorder_memory_to_channel_major_format( false, help_dst_desc, dst, &reordered_dst, caches); actual_dst_desc = help_dst_desc; } } help_scale_bias_desc = get_bn_scale_bias_mean_var_desc(help_scale_bias_desc, mode); help_mean_var_desc = get_bn_scale_bias_mean_var_desc(help_mean_var_desc, mode); ::dnnl::prop_kind kind; ::dnnl::normalization_flags flag = ::dnnl::normalization_flags::use_scale | ::dnnl::normalization_flags::use_shift; if (is_infer) { kind = ::dnnl::prop_kind::forward_inference; flag = ::dnnl::normalization_flags::use_global_stats | flag; } else { kind = ::dnnl::prop_kind::forward_training; } auto primitive = create_primitive<::dnnl::batch_normalization_forward>( kind, help_src_desc, help_dst_desc, epsilon, flag); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, {::dnnl::memory(help_src_desc, _eng, reordered_src ? reordered_src : src)}}, {DNNL_ARG_SCALE, {::dnnl::memory(help_scale_bias_desc, _eng, reordered_scale ? reordered_scale : scale)}}, {DNNL_ARG_SHIFT, {::dnnl::memory(help_scale_bias_desc, _eng, reordered_bias ? reordered_bias : bias)}}, {DNNL_ARG_MEAN, {::dnnl::memory(help_mean_var_desc, _eng, reordered_saved_mean ? reordered_saved_mean : saved_mean)}}, {DNNL_ARG_VARIANCE, {::dnnl::memory(help_mean_var_desc, _eng, reordered_saved_var ? reordered_saved_var : saved_var)}}}; sycl::event e = execute_primitive(primitive, execution_args, {{alpha, beta, DNNL_ARG_DST, help_dst_desc, reordered_dst ? reordered_dst : dst}}); if (!is_infer && running_var) { auto src_ndim = src_desc.get_desc().get_ndims(); auto src_dims = src_desc.get_dims(); int element_num = src_dims[0]; if (mode == batch_normalization_mode::spatial) { for (int index = 2; index < src_ndim; index++) { element_num *= src_dims[index]; } } float unbias_factor = element_num / (element_num - 1.f); async_scale(1.f - factor, mean_var_desc, running_var); e = async_sum(factor * unbias_factor, mean_var_desc, reordered_saved_var ? reordered_saved_var : saved_var, 1.f, mean_var_desc, running_var); } if (!is_infer && running_mean) { e = async_sum(factor, mean_var_desc, reordered_saved_mean ? reordered_saved_mean : saved_mean, (1.f - factor), mean_var_desc, running_mean); } if (reordered_dst && (actual_dst_desc != dst_desc.get_desc())) { e = async_reorder(1.f, actual_dst_desc, reordered_dst, 0.f, dst_desc, dst); } if (!is_infer && reordered_saved_mean && reordered_saved_var && saved_mean && saved_var && (actual_mean_var_desc != mean_var_desc.get_desc())) { e = async_reorder(1.f, actual_mean_var_desc, reordered_saved_mean, 0.f, mean_var_desc, saved_mean); e = async_reorder(1.f, actual_mean_var_desc, reordered_saved_var, 0.f, mean_var_desc, saved_var); } _q->submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { for (auto ptr : caches) { sycl::free(ptr, *_q); } }); }); return e; } inline sycl::event engine_ext::rnn_forward_internal( const rnn_desc &desc, ::dnnl::prop_kind kind, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter, const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c, size_t weight_size, void *weight, size_t workspace_size, void *workspace, size_t scratchpad_size, void *scratchpad, bool is_get_execution_args, size_t *weight_size_query, size_t *workspace_size_query, size_t *scratchpad_size_query) { ::dnnl::memory::data_type src_dt; ::dnnl::memory::format_tag src_format_tag; rnn_mode mode; rnn_bias_mode bias_mode; rnn_direction direction; dpct::library_data_t dt; int direction_num = 1, input_size = 0, hidden_size = 0, projection_size = 0, layer_size = 0, gate_num = 1, output_size = 0, data_type_size = 0, seq_length = 1, batch_size = 1; std::vector<void *> data = {src, dst, src_iter, dst_iter, src_iter_c, dst_iter_c, weight, workspace, scratchpad}; std::vector<int> offset(6, 0); void *input_layer_cache = nullptr, *hidden_layer_cache = nullptr; sycl::event e; desc.get(&mode, &bias_mode, &direction, &dt, &input_size, &hidden_size, &projection_size, &layer_size); get_rnn_configuration(src_desc.get_desc(), direction, mode, dt, hidden_size, &src_dt, &src_format_tag, &projection_size, &output_size, &seq_length, &batch_size, &direction_num, &gate_num); if (direction == rnn_direction::bidirectional) { // Here to combine the oneDNN bidirectional_sum and // bidirectional_concat config, so call execute_rnn_forward_primitive // twice. if (layer_size > 1) { if (!is_get_execution_args) { input_layer_cache = allocate(src_desc); hidden_layer_cache = allocate(src_desc); _q->memcpy(input_layer_cache, src, src_desc.get_size()); } data[0] = input_layer_cache; data[1] = hidden_layer_cache; e = execute_rnn_forward_primitive( mode, kind, ::dnnl::rnn_direction::bidirectional_sum, bias_mode, src_dt, src_format_tag, seq_length, batch_size, output_size, output_size, 1, direction_num, hidden_size, gate_num, projection_size, data, offset, layer_size - 1, weight_size_query, workspace_size_query, scratchpad_size_query); data[0] = ((layer_size - 1) % 2 == 0) ? input_layer_cache : hidden_layer_cache; data[1] = dst; } e = execute_rnn_forward_primitive( mode, kind, ::dnnl::rnn_direction::bidirectional_concat, bias_mode, src_dt, src_format_tag, seq_length, batch_size, output_size, 2 * output_size, 1, direction_num, hidden_size, gate_num, projection_size, data, offset, 1, weight_size_query, workspace_size_query, scratchpad_size_query); } else { e = execute_rnn_forward_primitive( mode, kind, ::dnnl::rnn_direction::unidirectional_left2right, bias_mode, src_dt, src_format_tag, seq_length, batch_size, output_size, output_size, layer_size, direction_num, hidden_size, gate_num, projection_size, data, offset, 1, weight_size_query, workspace_size_query, scratchpad_size_query); } if (is_get_execution_args) { return e; } if (input_layer_cache && hidden_layer_cache) { _q->submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { sycl::free(input_layer_cache, *_q); sycl::free(hidden_layer_cache, *_q); }); }); } return e; } inline sycl::event engine_ext::execute_rnn_forward_primitive( rnn_mode mode, ::dnnl::prop_kind kind, ::dnnl::rnn_direction direction, rnn_bias_mode bias_mode, ::dnnl::memory::data_type dt, ::dnnl::memory::format_tag tag, int seq_length, int batch_size, int src_c, int dst_c, int layer_size, int direction_num, int hidden_size, int gate_num, int projection_size, std::vector<void *> &data, std::vector<int> &offset, int iter_num, size_t *weight_size, size_t *workspace_size, size_t *scratchpad_size) { sycl::event e; ::dnnl::primitive *p = nullptr; detail::primitive_cache_key_type key; std::unordered_map<int, ::dnnl::memory> *execution_args; ::dnnl::memory::desc bias_desc( {layer_size, direction_num, gate_num, hidden_size}, dt, ::dnnl::memory::format_tag::ldgo); ::dnnl::memory::desc weight_layer_desc( {layer_size, direction_num, projection_size ? projection_size : hidden_size, gate_num, hidden_size}, dt, ::dnnl::memory::format_tag::ldigo); ::dnnl::memory::desc weight_iter_desc( {layer_size, direction_num, projection_size ? projection_size : hidden_size, gate_num, hidden_size}, dt, ::dnnl::memory::format_tag::ldigo); ::dnnl::memory::desc projection_desc; if (projection_size) { projection_desc = ::dnnl::memory::desc( {layer_size, direction_num, hidden_size, projection_size}, dt, ::dnnl::memory::format_tag::ldio); } if (weight_size) { *weight_size += (weight_layer_desc.get_size() + weight_iter_desc.get_size() + projection_desc.get_size() + bias_desc.get_size()) * iter_num; return e; } ::dnnl::memory::desc src_desc({seq_length, batch_size, src_c}, dt, tag); ::dnnl::memory::desc dst_desc({seq_length, batch_size, dst_c}, dt, tag); ::dnnl::memory::desc iter_desc( {layer_size, direction_num, batch_size, projection_size ? projection_size : hidden_size}, dt, ::dnnl::memory::format_tag::ldnc); ::dnnl::memory::desc iter_c_desc( {layer_size, direction_num, batch_size, hidden_size}, dt, ::dnnl::memory::format_tag::ldnc); ::dnnl::memory::desc workspace_desc; ::dnnl::memory::desc scratchpad_desc; ::dnnl::primitive_attr attr; attr.set_scratchpad_mode(::dnnl::scratchpad_mode::user); if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) { auto pd = create_primitive_desc<::dnnl::vanilla_rnn_forward>( kind, mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu : ::dnnl::algorithm::eltwise_tanh, direction, src_desc, iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc, iter_desc, attr); workspace_desc = pd.workspace_desc(); scratchpad_desc = pd.scratchpad_desc(); if (workspace_size && scratchpad_size) { *workspace_size += workspace_desc.get_size() * iter_num; *scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size ? scratchpad_desc.get_size() : *scratchpad_size; } else { auto r = create_primitive_with_pd<::dnnl::vanilla_rnn_forward>(pd); key = r.first; p = r.second; } } else if (mode == rnn_mode::gru) { auto pd = create_primitive_desc<::dnnl::gru_forward>( kind, direction, src_desc, iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc, iter_desc, attr); workspace_desc = pd.workspace_desc(); scratchpad_desc = pd.scratchpad_desc(); if (workspace_size && scratchpad_size) { *workspace_size += workspace_desc.get_size() * iter_num; *scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size ? scratchpad_desc.get_size() : *scratchpad_size; } else { auto r = create_primitive_with_pd<::dnnl::gru_forward>(pd); key = r.first; p = r.second; } } else if (mode == rnn_mode::lstm) { auto pd = create_primitive_desc<::dnnl::lstm_forward>( kind, direction, src_desc, iter_desc, iter_c_desc, weight_layer_desc, weight_iter_desc, ::dnnl::memory::desc(), projection_desc, bias_desc, dst_desc, iter_desc, iter_c_desc, attr); workspace_desc = pd.workspace_desc(); scratchpad_desc = pd.scratchpad_desc(); if (workspace_size && scratchpad_size) { *workspace_size += workspace_desc.get_size() * iter_num; *scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size ? scratchpad_desc.get_size() : *scratchpad_size; } else { auto r = create_primitive_with_pd<::dnnl::lstm_forward>(pd); key = r.first; p = r.second; } } for (int i = 0; i < iter_num; i++) { void *in_cache = data[0], *out_cache = data[1], *dst_iter_c_cache = nullptr, *dst_iter_cache = ((uint8_t *)(data[3]) + offset[1]); if (mode == rnn_mode::lstm) { dst_iter_c_cache = (uint8_t *)(data[4]) + offset[2]; } if (!workspace_size) { execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC_LAYER, {::dnnl::memory(src_desc, _eng, data[0])}}, {DNNL_ARG_DST_LAYER, {::dnnl::memory(dst_desc, _eng, data[1])}}, {DNNL_ARG_SCRATCHPAD, {::dnnl::memory(scratchpad_desc, _eng, data[8])}}}; auto insert_args = [&](int arg_name, ::dnnl::memory::desc &d, void *data, int &offset) { execution_args->insert( {arg_name, {::dnnl::memory(d, _eng, (uint8_t *)data + offset)}}); offset += d.get_size(); }; insert_args(DNNL_ARG_SRC_ITER, iter_desc, data[2], offset[0]); insert_args(DNNL_ARG_DST_ITER, iter_desc, data[3], offset[1]); if (mode == rnn_mode::lstm) { insert_args(DNNL_ARG_SRC_ITER_C, iter_c_desc, data[4], offset[2]); insert_args(DNNL_ARG_DST_ITER_C, iter_c_desc, data[5], offset[3]); } insert_args(DNNL_ARG_WEIGHTS_LAYER, weight_layer_desc, data[6], offset[4]); insert_args(DNNL_ARG_WEIGHTS_ITER, weight_iter_desc, data[6], offset[4]); if (projection_size) { insert_args(DNNL_ARG_WEIGHTS_PROJECTION, projection_desc, data[6], offset[4]); } if (bias_mode == rnn_bias_mode::none) { _q->memset((uint8_t *)(data[6]) + offset[4], 0, bias_desc.get_size()); } insert_args(DNNL_ARG_BIAS, bias_desc, data[6], offset[4]); if (kind == ::dnnl::prop_kind::forward_training) { insert_args(DNNL_ARG_WORKSPACE, workspace_desc, data[7], offset[5]); } if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) { execute_primitive<::dnnl::vanilla_rnn_forward>( {key, static_cast<::dnnl::vanilla_rnn_forward *>(p)}, execution_args); } else if (mode == rnn_mode::gru) { execute_primitive<::dnnl::gru_forward>( {key, static_cast<::dnnl::gru_forward *>(p)}, execution_args); } else if (mode == rnn_mode::lstm) { execute_primitive<::dnnl::lstm_forward>( {key, static_cast<::dnnl::lstm_forward *>(p)}, execution_args); } if (i != iter_num - 1) { std::swap(data[0], data[1]); } } if (kind == ::dnnl::prop_kind::forward_training) { if (workspace_size) { *workspace_size += (src_desc.get_size() + dst_desc.get_size() + iter_desc.get_size()); if (mode == rnn_mode::lstm) { *workspace_size += iter_c_desc.get_size(); } } else { _q->memcpy((uint8_t *)(data[7]) + offset[5], in_cache, src_desc.get_size()); offset[5] += src_desc.get_size(); _q->memcpy((uint8_t *)(data[7]) + offset[5], out_cache, dst_desc.get_size()); offset[5] += dst_desc.get_size(); _q->memcpy((uint8_t *)(data[7]) + offset[5], dst_iter_cache, iter_desc.get_size()); offset[5] += iter_desc.get_size(); if (mode == rnn_mode::lstm) { _q->memcpy((uint8_t *)(data[7]) + offset[5], dst_iter_c_cache, iter_c_desc.get_size()); offset[5] += iter_c_desc.get_size(); } } } } return e; } inline sycl::event engine_ext::execute_rnn_backward_primitive( rnn_mode mode, ::dnnl::rnn_direction direction, rnn_bias_mode bias_mode, ::dnnl::memory::data_type dt, ::dnnl::memory::format_tag tag, int seq_length, int batch_size, int src_c, int dst_c, int layer_size, int direction_num, int hidden_size, int gate_num, int projection_size, std::vector<void *> &data, std::vector<int> &offset, int iter_num) { sycl::event e; ::dnnl::primitive *p = nullptr; detail::primitive_cache_key_type key; ::dnnl::prop_kind fkind = ::dnnl::prop_kind::forward_training; ::dnnl::prop_kind bkind = ::dnnl::prop_kind::backward; ::dnnl::memory::desc bias_desc( {layer_size, direction_num, gate_num, hidden_size}, dt, ::dnnl::memory::format_tag::ldgo); ::dnnl::memory::desc weight_layer_desc( {layer_size, direction_num, projection_size ? projection_size : hidden_size, gate_num, hidden_size}, dt, ::dnnl::memory::format_tag::ldigo); ::dnnl::memory::desc weight_iter_desc( {layer_size, direction_num, projection_size ? projection_size : hidden_size, gate_num, hidden_size}, dt, ::dnnl::memory::format_tag::ldigo); ::dnnl::memory::desc diff_weight_layer_desc( {layer_size, direction_num, projection_size ? projection_size : hidden_size, gate_num, hidden_size}, dt, ::dnnl::memory::format_tag::ldgoi); ::dnnl::memory::desc diff_weight_iter_desc( {layer_size, direction_num, projection_size ? projection_size : hidden_size, gate_num, hidden_size}, dt, ::dnnl::memory::format_tag::ldgoi); ::dnnl::memory::desc projection_desc, diff_projection_desc; if (projection_size) { projection_desc = ::dnnl::memory::desc( {layer_size, direction_num, hidden_size, projection_size}, dt, ::dnnl::memory::format_tag::ldio); diff_projection_desc = ::dnnl::memory::desc( {layer_size, direction_num, hidden_size, projection_size}, dt, ::dnnl::memory::format_tag::ldoi); } ::dnnl::memory::desc src_desc({seq_length, batch_size, src_c}, dt, tag); ::dnnl::memory::desc dst_desc({seq_length, batch_size, dst_c}, dt, tag); ::dnnl::memory::desc iter_desc( {layer_size, direction_num, batch_size, projection_size ? projection_size : hidden_size}, dt, ::dnnl::memory::format_tag::ldnc); ::dnnl::memory::desc iter_c_desc( {layer_size, direction_num, batch_size, hidden_size}, dt, ::dnnl::memory::format_tag::ldnc); ::dnnl::memory::desc workspace_desc; ::dnnl::memory::desc scratchpad_desc; ::dnnl::primitive_attr attr; attr.set_scratchpad_mode(::dnnl::scratchpad_mode::user); if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) { auto fpd = create_primitive_desc<::dnnl::vanilla_rnn_forward>( fkind, mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu : ::dnnl::algorithm::eltwise_tanh, direction, src_desc, iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc, iter_desc, attr); auto pd = create_primitive_desc<::dnnl::vanilla_rnn_backward>( bkind, mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu : ::dnnl::algorithm::eltwise_tanh, direction, src_desc, iter_desc, diff_weight_layer_desc, diff_weight_iter_desc, bias_desc, dst_desc, iter_desc, src_desc, iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc, iter_desc, fpd, attr); workspace_desc = pd.workspace_desc(); scratchpad_desc = pd.scratchpad_desc(); auto r = create_primitive_with_pd<::dnnl::vanilla_rnn_backward>(pd); key = r.first; p = r.second; } else if (mode == rnn_mode::gru) { auto fpd = create_primitive_desc<::dnnl::gru_forward>( fkind, direction, src_desc, iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc, iter_desc, attr); auto pd = create_primitive_desc<::dnnl::gru_backward>( bkind, direction, src_desc, iter_desc, diff_weight_layer_desc, diff_weight_iter_desc, bias_desc, dst_desc, iter_desc, src_desc, iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc, iter_desc, fpd, attr); workspace_desc = pd.workspace_desc(); scratchpad_desc = pd.scratchpad_desc(); auto r = create_primitive_with_pd<::dnnl::gru_backward>(pd); key = r.first; p = r.second; } else if (mode == rnn_mode::lstm) { auto fpd = create_primitive_desc<::dnnl::lstm_forward>( fkind, direction, src_desc, iter_desc, iter_c_desc, weight_layer_desc, weight_iter_desc, ::dnnl::memory::desc(), projection_desc, bias_desc, dst_desc, iter_desc, iter_c_desc, attr); auto pd = create_primitive_desc<::dnnl::lstm_backward>( bkind, direction, src_desc, iter_desc, iter_c_desc, diff_weight_layer_desc, diff_weight_iter_desc, ::dnnl::memory::desc(), diff_projection_desc, bias_desc, dst_desc, iter_desc, iter_c_desc, src_desc, iter_desc, iter_c_desc, weight_layer_desc, weight_iter_desc, ::dnnl::memory::desc(), projection_desc, bias_desc, dst_desc, iter_desc, iter_c_desc, fpd, attr); workspace_desc = pd.workspace_desc(); scratchpad_desc = pd.scratchpad_desc(); auto r = create_primitive_with_pd<::dnnl::lstm_backward>(pd); key = r.first; p = r.second; } for (int i = 0; i < iter_num; i++) { auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_DIFF_SRC_LAYER, {::dnnl::memory(src_desc, _eng, data[8])}}, {DNNL_ARG_DIFF_DST_LAYER, {::dnnl::memory(dst_desc, _eng, data[9])}}, {DNNL_ARG_SCRATCHPAD, {::dnnl::memory(scratchpad_desc, _eng, data[15])}}}; auto insert_args = [&](int arg_name, ::dnnl::memory::desc &d, void *data, int &offset) { offset += d.get_size(); execution_args->insert( {arg_name, {::dnnl::memory(d, _eng, (uint8_t *)data - offset)}}); }; if (mode == rnn_mode::lstm) { insert_args(DNNL_ARG_DST_ITER_C, iter_c_desc, data[7], offset[0]); insert_args(DNNL_ARG_SRC_ITER_C, iter_c_desc, data[4], offset[2]); } insert_args(DNNL_ARG_DST_ITER, iter_desc, data[7], offset[0]); insert_args(DNNL_ARG_DST_LAYER, dst_desc, data[7], offset[0]); insert_args(DNNL_ARG_SRC_LAYER, src_desc, data[7], offset[0]); insert_args(DNNL_ARG_WORKSPACE, workspace_desc, data[7], offset[0]); insert_args(DNNL_ARG_SRC_ITER, iter_desc, data[2], offset[1]); insert_args(DNNL_ARG_BIAS, bias_desc, data[6], offset[3]); if (projection_size) { insert_args(DNNL_ARG_WEIGHTS_PROJECTION, diff_projection_desc, data[6], offset[3]); } insert_args(DNNL_ARG_WEIGHTS_ITER, diff_weight_iter_desc, data[6], offset[3]); insert_args(DNNL_ARG_WEIGHTS_LAYER, diff_weight_layer_desc, data[6], offset[3]); insert_args(DNNL_ARG_DIFF_SRC_ITER, iter_desc, data[10], offset[4]); insert_args(DNNL_ARG_DIFF_DST_ITER, iter_desc, data[11], offset[5]); if (mode == rnn_mode::lstm) { insert_args(DNNL_ARG_DIFF_SRC_ITER_C, iter_c_desc, data[12], offset[6]); insert_args(DNNL_ARG_DIFF_DST_ITER_C, iter_c_desc, data[13], offset[7]); } insert_args(DNNL_ARG_DIFF_BIAS, bias_desc, data[14], offset[8]); if (bias_mode == rnn_bias_mode::none) { _q->memset((uint8_t *)(data[14]) - offset[8], 0, bias_desc.get_size()); } if (projection_size) { insert_args(DNNL_ARG_DIFF_WEIGHTS_PROJECTION, projection_desc, data[14], offset[8]); } insert_args(DNNL_ARG_DIFF_WEIGHTS_ITER, weight_iter_desc, data[14], offset[8]); insert_args(DNNL_ARG_DIFF_WEIGHTS_LAYER, weight_layer_desc, data[14], offset[8]); if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) { e = execute_primitive<::dnnl::vanilla_rnn_backward>( {key, static_cast<::dnnl::vanilla_rnn_backward *>(p)}, execution_args); } else if (mode == rnn_mode::gru) { e = execute_primitive<::dnnl::gru_backward>( {key, static_cast<::dnnl::gru_backward *>(p)}, execution_args); } else if (mode == rnn_mode::lstm) { e = execute_primitive<::dnnl::lstm_backward>( {key, static_cast<::dnnl::lstm_backward *>(p)}, execution_args); } if (i != iter_num - 1) { std::swap(data[8], data[9]); } } return e; } #define GENERATE_RNN_PRIMITIVE_KEY(name) \ template <> \ inline std::string \ engine_ext::generate_cache_key<::dnnl::name::primitive_desc>( \ const ::dnnl::name::primitive_desc &pd) { \ std::stringstream ss; \ ss << (std::uint8_t)pd.get_kind() << (std::uint8_t)pd.get_prop_kind() \ << (std::uint8_t)pd.get_cell_kind() << (std::uint8_t)pd.get_direction() \ << (std::uint8_t)pd.get_algorithm(); \ serialize_mem_desc(ss, pd.src_layer_desc()); \ serialize_mem_desc(ss, pd.src_iter_desc()); \ serialize_mem_desc(ss, pd.dst_layer_desc()); \ serialize_mem_desc(ss, pd.dst_iter_desc()); \ serialize_mem_desc(ss, pd.diff_src_layer_desc()); \ serialize_mem_desc(ss, pd.diff_src_iter_desc()); \ serialize_mem_desc(ss, pd.diff_dst_layer_desc()); \ serialize_mem_desc(ss, pd.diff_dst_iter_desc()); \ serialize_mem_desc(ss, pd.src_iter_c_desc()); \ serialize_mem_desc(ss, pd.dst_iter_c_desc()); \ serialize_mem_desc(ss, pd.diff_src_iter_c_desc()); \ serialize_mem_desc(ss, pd.diff_dst_iter_c_desc()); \ return ss.str(); \ } #define GENERATE_CONVOLUTION_PRIMITIVE_KEY(name, query_type) \ template <> \ inline std::string \ engine_ext::generate_cache_key<::dnnl::name::primitive_desc>( \ const ::dnnl::name::primitive_desc &pd) { \ std::stringstream ss; \ ss << (std::uint8_t)pd.get_kind() << (std::uint8_t)pd.get_prop_kind() \ << (std::uint8_t)pd.get_algorithm() \ << (std::uint8_t)pd.get_primitive_attr().get_fpmath_mode() \ << (std::uint8_t)pd.get_group_size(); \ serialize_dims(ss, pd.get_strides()); \ serialize_dims(ss, pd.get_dilations()); \ serialize_dims(ss, pd.get_padding_l()); \ serialize_mem_desc(ss, pd.src_desc()); \ serialize_mem_desc(ss, pd.diff_src_desc()); \ serialize_mem_desc(ss, pd.dst_desc()); \ serialize_mem_desc(ss, pd.diff_dst_desc()); \ serialize_mem_desc(ss, pd.query_type()); \ serialize_mem_desc(ss, pd.weights_desc()); \ serialize_mem_desc(ss, pd.diff_weights_desc()); \ return ss.str(); \ } GENERATE_RNN_PRIMITIVE_KEY(vanilla_rnn_forward) GENERATE_RNN_PRIMITIVE_KEY(vanilla_rnn_backward) GENERATE_RNN_PRIMITIVE_KEY(lstm_forward) GENERATE_RNN_PRIMITIVE_KEY(lstm_backward) GENERATE_RNN_PRIMITIVE_KEY(gru_forward) GENERATE_RNN_PRIMITIVE_KEY(gru_backward) GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_forward, bias_desc) GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_backward_data, scratchpad_desc) GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_backward_weights, diff_bias_desc) template <typename primitive_desc_type> std::string engine_ext::generate_cache_key(const primitive_desc_type &pd) { std::stringstream ss; auto kind = pd.get_kind(); ss << (std::uint8_t)kind << (std::uint8_t)pd.get_prop_kind() << (std::uint8_t)pd.get_algorithm(); serialize_mem_desc(ss, pd.src_desc()); serialize_mem_desc(ss, pd.diff_src_desc()); serialize_mem_desc(ss, pd.dst_desc()); serialize_mem_desc(ss, pd.diff_dst_desc()); switch (kind) { case ::dnnl::primitive::kind::batch_normalization: ss << pd.get_epsilon() << (std::uint8_t)pd.get_flags(); case ::dnnl::primitive::kind::reduction: ss << pd.get_p(); break; case ::dnnl::primitive::kind::eltwise: ss << pd.get_alpha() << pd.get_beta(); case ::dnnl::primitive::kind::lrn: ss << pd.get_k(); break; case ::dnnl::primitive::kind::pooling: serialize_dims(ss, pd.get_strides()); serialize_dims(ss, pd.get_dilations()); serialize_dims(ss, pd.get_padding_l()); serialize_dims(ss, pd.get_kernel()); break; case ::dnnl::primitive::kind::softmax: ss << pd.get_axis(); break; default: break; } return ss.str(); } template <typename primitive_type, typename... args_type> std::pair<detail::primitive_cache_key_type, primitive_type *> engine_ext::create_primitive(args_type &&...args) { auto pd = create_primitive_desc<primitive_type>(std::forward<args_type>(args)...); return create_primitive_with_pd<primitive_type>(pd); } template <typename primitive_type> std::pair<detail::primitive_cache_key_type, primitive_type *> engine_ext::create_primitive_with_pd( const typename primitive_type::primitive_desc &pd) { detail::primitive_cache_key_type key = generate_cache_key(pd); primitive_type *p = (primitive_type *)_primitive_cache.get(key); if (!p) { p = new primitive_type(pd); } return {key, p}; } template <typename primitive_type, typename... args_type> typename primitive_type::primitive_desc engine_ext::create_primitive_desc(args_type &&...args) { return typename primitive_type::primitive_desc( _eng, std::forward<args_type>(args)...); } inline void engine_ext::fill(const memory_desc_ext &src_desc, void *src, const void *valuePtr) { async_fill(src_desc, src, valuePtr).wait(); } inline void engine_ext::reorder(float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst) { async_reorder(alpha, src_desc, src, beta, dst_desc, dst).wait(); } inline void engine_ext::scale(float alpha, const memory_desc_ext &src_desc, void *src) { async_scale(alpha, src_desc, src).wait(); } inline void engine_ext::sum(float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst) { async_sum(alpha, src_desc, src, beta, dst_desc, dst).wait(); } inline void engine_ext::activation_forward(activation_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst) { async_activation_forward(desc, alpha, src_desc, src, beta, dst_desc, dst) .wait(); } inline void engine_ext::activation_backward( activation_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src) { async_activation_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst, src_desc, src, beta, diff_src_desc, diff_src) .wait(); } inline void engine_ext::pooling_forward(pooling_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, ::dnnl::memory *workspace) { async_pooling_forward(desc, alpha, src_desc, src, beta, dst_desc, dst, workspace).wait(); } inline void engine_ext::pooling_backward( pooling_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src, ::dnnl::memory *workspace) { async_pooling_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst, src_desc, src, beta, diff_src_desc, diff_src, workspace) .wait(); } inline void engine_ext::softmax_forward(softmax_algorithm alg, softmax_mode mode, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst) { async_softmax_forward(alg, mode, alpha, src_desc, src, beta, dst_desc, dst) .wait(); } inline void engine_ext::softmax_backward(softmax_algorithm alg, softmax_mode mode, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta, const memory_desc_ext &diff_src_desc, void *diff_src) { async_softmax_backward(alg, mode, alpha, dst_desc, dst, diff_dst_desc, diff_dst, beta, diff_src_desc, diff_src) .wait(); } inline void engine_ext::lrn_forward(lrn_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, ::dnnl::memory *workspace) { async_lrn_forward(desc, alpha, src_desc, src, beta, dst_desc, dst, workspace) .wait(); } inline void engine_ext::lrn_backward(lrn_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src, ::dnnl::memory *workspace) { async_lrn_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst, src_desc, src, beta, diff_src_desc, diff_src, workspace) .wait(); } inline sycl::event engine_ext::async_fill(const memory_desc_ext &src_desc, void *src, const void *valuePtr) { ::dnnl::memory::data_type dt = src_desc.get_desc().get_data_type(); unsigned mem_size = src_desc.get_size(); switch (dt) { case ::dnnl::memory::data_type::f32: return fill_with_type<float>(_q, src, valuePtr, mem_size); case ::dnnl::memory::data_type::f16: return fill_with_type<sycl::half>(_q, src, valuePtr, mem_size); case ::dnnl::memory::data_type::s32: return fill_with_type<int32_t>(_q, src, valuePtr, mem_size); case ::dnnl::memory::data_type::s8: return fill_with_type<int8_t>(_q, src, valuePtr, mem_size); case ::dnnl::memory::data_type::u8: return fill_with_type<uint8_t>(_q, src, valuePtr, mem_size); default: throw std::runtime_error("async_fill: unsupported data type."); } } inline sycl::event engine_ext::async_reorder(float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst) { if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) { return sycl::event(); } auto pd = ::dnnl::reorder::primitive_desc(_eng, src_desc.get_desc(), _eng, dst_desc.get_desc()); auto primitive = create_primitive_with_pd<::dnnl::reorder>(pd); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}}; return execute_primitive(primitive, execution_args, {{alpha, beta, DNNL_ARG_DST, dst_desc, dst}}); } inline sycl::event engine_ext::async_scale(float alpha, const memory_desc_ext &src_desc, void *src) { if (alpha == 1.f) { return sycl::event(); } void *src_cache = allocate(src_desc); _q->memcpy(src_cache, src, src_desc.get_size()); auto primitive = create_primitive<::dnnl::eltwise_forward>( ::dnnl::prop_kind::forward_inference, ::dnnl::algorithm::eltwise_linear, src_desc.get_desc(), src_desc.get_desc(), alpha, 0.f); auto args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_DST, ::dnnl::memory(src_desc.get_desc(), _eng, src)}, {DNNL_ARG_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src_cache)}}; return execute_primitive(primitive, args, {}, {src_cache}); } inline sycl::event engine_ext::async_sum(float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst) { if (alpha == 0.f && beta == 1.f) { return sycl::event(); } void *dst_cache = allocate(dst_desc); _q->memcpy(dst_cache, dst, dst_desc.get_size()); auto pd = create_primitive_desc<::dnnl::sum>( std::vector<float>{alpha, beta}, std::vector<::dnnl::memory::desc>{src_desc.get_desc(), dst_desc.get_desc()}); std::stringstream ss; ss << (std::uint8_t)pd.get_kind() << alpha << beta; serialize_mem_desc(ss, pd.src_desc(0)); serialize_mem_desc(ss, pd.src_desc(1)); detail::primitive_cache_key_type key = ss.str(); ::dnnl::sum *p = (::dnnl::sum *)_primitive_cache.get(key); if (!p) { p = new ::dnnl::sum(pd); } auto args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_DST, ::dnnl::memory(dst_desc.get_desc(), _eng, dst)}, {DNNL_ARG_MULTIPLE_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src)}, {DNNL_ARG_MULTIPLE_SRC + 1, ::dnnl::memory(dst_desc.get_desc(), _eng, dst_cache)}}; return execute_primitive<::dnnl::sum>({key, p}, args, {}, {dst_cache}); } inline sycl::event engine_ext::async_binary(binary_op op, float alpha_0, const memory_desc_ext &src_desc_0, void *src_0, float alpha_1, const memory_desc_ext &src_desc_1, void *src_1, float beta, const memory_desc_ext &dst_desc, void *dst) { ::dnnl::algorithm onednn_algorithm; switch (op) { case binary_op::max: onednn_algorithm = ::dnnl::algorithm::binary_max; break; case binary_op::min: onednn_algorithm = ::dnnl::algorithm::binary_min; break; case binary_op::add: onednn_algorithm = ::dnnl::algorithm::binary_add; break; case binary_op::sub: onednn_algorithm = ::dnnl::algorithm::binary_sub; break; case binary_op::mul: onednn_algorithm = ::dnnl::algorithm::binary_mul; break; case binary_op::div: onednn_algorithm = ::dnnl::algorithm::binary_div; break; case binary_op::sqrt: onednn_algorithm = ::dnnl::algorithm::eltwise_sqrt; break; case binary_op::neg: onednn_algorithm = ::dnnl::algorithm::eltwise_linear; break; } if (onednn_algorithm == ::dnnl::algorithm::eltwise_sqrt || onednn_algorithm == ::dnnl::algorithm::eltwise_linear) { void *src_cache = nullptr, *dst_cache = nullptr; src_cache = allocate(src_desc_0); dst_cache = allocate(dst_desc); _q->memcpy(src_cache, src_0, src_desc_0.get_size()); _q->memcpy(dst_cache, dst, dst_desc.get_size()); async_scale(alpha_0, src_desc_0, src_cache); async_scale(beta, dst_desc, dst_cache); // Let the output = 1 - input to simulate the behavior of neg. auto primitive = create_primitive<::dnnl::eltwise_forward>( ::dnnl::prop_kind::forward_inference, onednn_algorithm, src_desc_0.get_desc(), dst_desc.get_desc(), -1.f, 1.f); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, {::dnnl::memory(src_desc_0.get_desc(), _eng, src_cache)}}}; execute_primitive( primitive, execution_args, {{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}}); auto e = async_sum(1.f, dst_desc, dst_cache, 1.f, dst_desc, dst); _q->submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { sycl::free(src_cache, *_q); sycl::free(dst_cache, *_q); }); }); return e; } auto execution_args = new std::unordered_map<int, ::dnnl::memory>{}; void *src_0_cache = nullptr, *src_1_cache = nullptr, *dst_cache = nullptr; src_0_cache = allocate(src_desc_0); src_1_cache = allocate(src_desc_1); dst_cache = allocate(dst_desc); _q->memcpy(src_0_cache, src_0, src_desc_0.get_size()); _q->memcpy(src_1_cache, src_1, src_desc_1.get_size()); _q->memcpy(dst_cache, dst, dst_desc.get_size()); async_scale(alpha_0, src_desc_0, src_0_cache); async_scale(alpha_1, src_desc_1, src_1_cache); async_scale(beta, dst_desc, dst_cache); execution_args->insert({DNNL_ARG_SRC_0, ::dnnl::memory(src_desc_0.get_desc(), _eng, src_0_cache)}); execution_args->insert({DNNL_ARG_SRC_1, ::dnnl::memory(src_desc_1.get_desc(), _eng, src_1_cache)}); auto primitive = create_primitive<::dnnl::binary>( onednn_algorithm, src_desc_0.get_desc(), src_desc_1.get_desc(), dst_desc.get_desc()); auto e = execute_primitive(primitive, execution_args, {{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}}); async_sum(1.f, dst_desc, dst_cache, 1.f, dst_desc, dst); _q->submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { sycl::free(dst_cache, *_q); sycl::free(src_0_cache, *_q); sycl::free(src_1_cache, *_q); }); }); return e; } inline sycl::event engine_ext::async_reduction(reduction_op op, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst) { if (alpha == 0.f && beta == 1.f) { return sycl::event(); } float p = 2.f; ::dnnl::algorithm onednn_algorithm; void *cache = nullptr; switch (op) { case reduction_op::amax: cache = allocate(src_desc); activation_desc adesc; adesc.set_algorithm(::dnnl::algorithm::eltwise_abs); async_activation_forward(adesc, 1.f, src_desc, src, 0.f, src_desc, cache); onednn_algorithm = ::dnnl::algorithm::reduction_max; src = cache; break; case reduction_op::max: onednn_algorithm = ::dnnl::algorithm::reduction_max; break; case reduction_op::min: onednn_algorithm = ::dnnl::algorithm::reduction_min; break; case reduction_op::sum: onednn_algorithm = ::dnnl::algorithm::reduction_sum; break; case reduction_op::mean: onednn_algorithm = ::dnnl::algorithm::reduction_mean; break; case reduction_op::mul: onednn_algorithm = ::dnnl::algorithm::reduction_mul; break; case reduction_op::mul_no_zeros: cache = allocate(src_desc); transform_no_zero(src_desc, src, cache); onednn_algorithm = ::dnnl::algorithm::reduction_mul; src = cache; break; case reduction_op::norm1: p = 1.f; onednn_algorithm = ::dnnl::algorithm::reduction_norm_lp_power_p_sum; break; case reduction_op::norm2: onednn_algorithm = ::dnnl::algorithm::reduction_norm_lp_sum; break; } auto primitive = create_primitive<::dnnl::reduction>( onednn_algorithm, src_desc.get_desc(), dst_desc.get_desc(), p, 0.f); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src)}}; if (cache) { return execute_primitive(primitive, execution_args, {{alpha, beta, DNNL_ARG_DST, dst_desc, dst}}, {cache}); } return execute_primitive(primitive, execution_args, {{alpha, beta, DNNL_ARG_DST, dst_desc, dst}}); } inline sycl::event engine_ext::async_activation_forward(activation_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst) { if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) { return sycl::event(); } auto primitive = create_primitive<::dnnl::eltwise_forward>( ::dnnl::prop_kind::forward, desc.get_algorithm(), src_desc.get_desc(), dst_desc.get_desc(), desc.get_alpha(), desc.get_beta()); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}}; return execute_primitive(primitive, execution_args, {{alpha, beta, DNNL_ARG_DST, dst_desc, dst}}); } inline sycl::event engine_ext::async_activation_backward( activation_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src) { if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) { return sycl::event(); } ::dnnl::memory::desc data_desc = dst_desc.get_desc(); auto alg = desc.get_algorithm(); if ((alg == ::dnnl::algorithm::eltwise_clip) || (alg == ::dnnl::algorithm::eltwise_linear) || (alg == ::dnnl::algorithm::eltwise_swish)) { data_desc = src_desc.get_desc(); } auto primitive = create_primitive<::dnnl::eltwise_backward>( alg, diff_src_desc.get_desc(), diff_dst_desc.get_desc(), data_desc, desc.get_alpha(), desc.get_beta(), create_primitive_desc<::dnnl::eltwise_forward>( ::dnnl::prop_kind::forward, alg, src_desc.get_desc(), dst_desc.get_desc(), desc.get_alpha(), desc.get_beta())); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}}, {DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}, {DNNL_ARG_DIFF_DST, {::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}}; return execute_primitive( primitive, execution_args, {{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}}); } inline sycl::event engine_ext::async_pooling_forward(pooling_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, ::dnnl::memory *workspace) { if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) { return sycl::event(); } int pooling_dim = desc.get_stride().size(); std::vector<int64_t> dilation(pooling_dim, 0); auto primitive_desc = create_primitive_desc<::dnnl::pooling_forward>( ::dnnl::prop_kind::forward_training, desc.get_algorithm(), src_desc.get_desc(), dst_desc.get_desc(), desc.get_stride(), desc.get_kernel(), dilation, desc.get_padding(), desc.get_padding()); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}}; ::dnnl::memory ws_mem(primitive_desc.workspace_desc(), _eng); execution_args->insert({DNNL_ARG_WORKSPACE, ws_mem}); if (workspace) { *workspace = ws_mem; } else { insert_workspace(src, ws_mem); } auto primitive = create_primitive_with_pd<::dnnl::pooling_forward>(primitive_desc); return execute_primitive(primitive, execution_args, {{alpha, beta, DNNL_ARG_DST, dst_desc, dst}}); } inline sycl::event engine_ext::async_pooling_backward( pooling_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src, ::dnnl::memory *workspace) { if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) { return sycl::event(); } int pooling_dim = desc.get_stride().size(); std::vector<int64_t> dilation(pooling_dim, 0); auto primitive = create_primitive<::dnnl::pooling_backward>( desc.get_algorithm(), diff_src_desc.get_desc(), diff_dst_desc.get_desc(), desc.get_stride(), desc.get_kernel(), dilation, desc.get_padding(), desc.get_padding(), create_primitive_desc<::dnnl::pooling_forward>( ::dnnl::prop_kind::forward_training, desc.get_algorithm(), src_desc.get_desc(), dst_desc.get_desc(), desc.get_stride(), desc.get_kernel(), dilation, desc.get_padding(), desc.get_padding())); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}}, {DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}, {DNNL_ARG_DIFF_DST, {::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}}; if (workspace) { execution_args->insert({DNNL_ARG_WORKSPACE, *workspace}); } else { execution_args->insert({DNNL_ARG_WORKSPACE, get_workspace(src)}); } return execute_primitive( primitive, execution_args, {{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}}); } inline sycl::event engine_ext::async_softmax_forward(softmax_algorithm alg, softmax_mode mode, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst) { if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) { return sycl::event(); } ::dnnl::memory::desc help_src_desc = src_desc.get_desc(); ::dnnl::memory::desc help_dst_desc = dst_desc.get_desc(); if (mode == softmax_mode::instance) { help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc); help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc); } auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, {::dnnl::memory(help_src_desc, _eng, src)}}}; ::dnnl::algorithm softmax_alg = ::dnnl::algorithm::softmax_accurate; if (alg == softmax_algorithm::log) { softmax_alg = ::dnnl::algorithm::softmax_log; } auto primitive = create_primitive<::dnnl::softmax_forward>( ::dnnl::prop_kind::forward, softmax_alg, help_src_desc, help_dst_desc, 1); return execute_primitive( primitive, execution_args, {{alpha, beta, DNNL_ARG_DST, memory_desc_ext(help_dst_desc), dst}}); } inline sycl::event engine_ext::async_softmax_backward( softmax_algorithm alg, softmax_mode mode, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta, const memory_desc_ext &diff_src_desc, void *diff_src) { if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) { return sycl::event(); } ::dnnl::memory::desc help_diff_src_desc = diff_src_desc.get_desc(); ::dnnl::memory::desc help_dst_desc = dst_desc.get_desc(); ::dnnl::memory::desc help_diff_dst_desc = diff_dst_desc.get_desc(); if (mode == softmax_mode::instance) { help_diff_src_desc = compress_spatial_dimensions_to_channel(help_diff_src_desc); help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc); help_diff_dst_desc = compress_spatial_dimensions_to_channel(help_diff_dst_desc); } auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_DST, {::dnnl::memory(help_dst_desc, _eng, dst)}}, {DNNL_ARG_DIFF_DST, {::dnnl::memory(help_diff_dst_desc, _eng, diff_dst)}}}; ::dnnl::algorithm softmax_alg = ::dnnl::algorithm::softmax_accurate; if (alg == softmax_algorithm::log) { softmax_alg = ::dnnl::algorithm::softmax_log; } auto primitive = create_primitive<::dnnl::softmax_backward>( softmax_alg, help_diff_src_desc, help_diff_dst_desc, help_dst_desc, 1, create_primitive_desc<::dnnl::softmax_forward>( ::dnnl::prop_kind::forward, softmax_alg, help_diff_src_desc, help_dst_desc, 1)); return execute_primitive(primitive, execution_args, {{alpha, beta, DNNL_ARG_DIFF_SRC, memory_desc_ext(help_diff_src_desc), diff_src}}); } inline sycl::event engine_ext::async_lrn_forward(lrn_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, ::dnnl::memory *workspace) { if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) { return sycl::event(); } auto primitive_desc = create_primitive_desc<::dnnl::lrn_forward>( ::dnnl::prop_kind::forward_training, ::dnnl::algorithm::lrn_across_channels, src_desc.get_desc(), dst_desc.get_desc(), desc.get_local_size(), desc.get_alpha(), desc.get_beta(), desc.get_k()); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}}; ::dnnl::memory ws_mem(primitive_desc.workspace_desc(), _eng); execution_args->insert({DNNL_ARG_WORKSPACE, ws_mem}); if (workspace) { *workspace = ws_mem; } else { insert_workspace(src, ws_mem); } auto primitive = create_primitive_with_pd<::dnnl::lrn_forward>(primitive_desc); return execute_primitive(primitive, execution_args, {{alpha, beta, DNNL_ARG_DST, dst_desc, dst}}); } inline sycl::event engine_ext::async_lrn_backward(lrn_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src, ::dnnl::memory *workspace) { if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) { return sycl::event(); } auto primitive = create_primitive<::dnnl::lrn_backward>( ::dnnl::algorithm::lrn_across_channels, diff_src_desc.get_desc(), diff_dst_desc.get_desc(), src_desc.get_desc(), desc.get_local_size(), desc.get_alpha(), desc.get_beta(), desc.get_k(), create_primitive_desc<::dnnl::lrn_forward>( ::dnnl::prop_kind::forward_training, ::dnnl::algorithm::lrn_across_channels, src_desc.get_desc(), dst_desc.get_desc(), desc.get_local_size(), desc.get_alpha(), desc.get_beta(), desc.get_k())); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}}, {DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}, {DNNL_ARG_DIFF_DST, {::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}}; if (workspace) { execution_args->insert({DNNL_ARG_WORKSPACE, *workspace}); } else { execution_args->insert({DNNL_ARG_WORKSPACE, get_workspace(src)}); } return execute_primitive( primitive, execution_args, {{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}}); } inline size_t engine_ext::get_batch_normalization_workspace_size( batch_normalization_ops ops, const memory_desc_ext &src_desc) { if(ops == batch_normalization_ops::none) { return 0; } return src_desc.get_size(); } inline sycl::event engine_ext::async_batch_normalization_forward_inference( batch_normalization_mode mode, float epsilon, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias, void *mean, void *var) { return batch_normalization_forward_internal( true, mode, epsilon, 0.f, alpha, src_desc, src, beta, dst_desc, dst, scale_bias_mean_var_desc, scale, bias, scale_bias_mean_var_desc, mean, var, nullptr, nullptr); } inline sycl::event engine_ext::async_batch_normalization_forward_inference( batch_normalization_mode mode, batch_normalization_ops ops, activation_desc &adesc, float epsilon, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &summand_desc, void *summand, const memory_desc_ext &scale_bias_desc, void *scale, void *bias, const memory_desc_ext &mean_var_desc, void *mean, void *var) { bool has_post_op = (ops != batch_normalization_ops::none); sycl::event e; std::vector<void *> caches; if (has_post_op) { void *dst_cache = allocate(dst_desc); caches.push_back(dst_cache); batch_normalization_forward_internal( true, mode, epsilon, 0.f, 1.f, src_desc, src, 0.f, dst_desc, dst_cache, scale_bias_desc, scale, bias, mean_var_desc, mean, var, nullptr, nullptr); if (ops == batch_normalization_ops::add_activation) { async_sum(1.f, summand_desc, summand, 1.f, dst_desc, dst_cache); } async_activation_forward(adesc, 1.f, dst_desc, dst_cache, 0.f, dst_desc, dst_cache); e = async_sum(alpha, dst_desc, dst_cache, beta, dst_desc, dst); _q->submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { for (auto ptr : caches) { sycl::free(ptr, *_q); } }); }); return e; } return batch_normalization_forward_internal( true, mode, epsilon, 0.f, alpha, src_desc, src, beta, dst_desc, dst, scale_bias_desc, scale, bias, mean_var_desc, mean, var, nullptr, nullptr); } inline sycl::event engine_ext::async_batch_normalization_forward_training( batch_normalization_mode mode, float epsilon, float factor, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias, void *running_mean, void *running_var, void *saved_mean, void *saved_var) { return batch_normalization_forward_internal( false, mode, epsilon, factor, alpha, src_desc, src, beta, dst_desc, dst, scale_bias_mean_var_desc, scale, bias, scale_bias_mean_var_desc, saved_mean, saved_var, running_mean, running_var); } inline sycl::event engine_ext::async_batch_normalization_forward_training( batch_normalization_mode mode, batch_normalization_ops ops, activation_desc &adesc, float epsilon, float factor, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &summand_desc, void *summand, const memory_desc_ext &scale_bias_desc, void *scale, void *bias, const memory_desc_ext &mean_var_desc, void *running_mean, void *running_var, void *saved_mean, void *saved_var, size_t workspace_size, void *workspace) { bool has_post_op = (ops != batch_normalization_ops::none); sycl::event e; if (has_post_op) { if(workspace_size < dst_desc.get_desc().get_size()) { throw std::runtime_error("async_batch_normalization_forward_training_ex: " "no sufficient workspace."); } batch_normalization_forward_internal( false, mode, epsilon, factor, 1.f, src_desc, src, 0.f, dst_desc, workspace, scale_bias_desc, scale, bias, mean_var_desc, saved_mean, saved_var, running_mean, running_var); if (ops == batch_normalization_ops::add_activation) { async_sum(1.f, summand_desc, summand, 1.f, dst_desc, workspace); } return async_activation_forward(adesc, alpha, dst_desc, workspace, beta, dst_desc, dst); } return batch_normalization_forward_internal( false, mode, epsilon, factor, alpha, src_desc, src, beta, dst_desc, dst, scale_bias_desc, scale, bias, mean_var_desc, saved_mean, saved_var, running_mean, running_var); } inline sycl::event engine_ext::async_batch_normalization_forward_training( batch_normalization_mode mode, batch_normalization_ops ops, activation_desc &adesc, float epsilon, float factor, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &summand_desc, void *summand, const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias, void *running_mean, void *running_var, void *saved_mean, void *saved_var, size_t workspace_size, void *workspace) { return async_batch_normalization_forward_training( mode, ops, adesc, epsilon, factor, alpha, src_desc, src, beta, dst_desc, dst, summand_desc, summand, scale_bias_mean_var_desc, scale, bias, scale_bias_mean_var_desc, running_mean, running_var, saved_mean, saved_var, workspace_size, workspace); } inline sycl::event engine_ext::async_batch_normalization_backward( batch_normalization_mode mode, float epsilon, float alpha_data, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param, const memory_desc_ext &diff_scale_bias_mean_var_desc, void *scale, float beta_param, void *diff_scale, void *diff_bias, void *saved_mean, void *saved_var) { return batch_normalization_backward_internal( mode, epsilon, alpha_data, src_desc, src, diff_dst_desc, diff_dst, beta_data, diff_src_desc, diff_src, alpha_param, diff_scale_bias_mean_var_desc, scale, nullptr, beta_param, diff_scale, diff_bias, diff_scale_bias_mean_var_desc, saved_mean, saved_var); } inline sycl::event engine_ext::async_batch_normalization_backward( batch_normalization_mode mode, batch_normalization_ops ops, activation_desc &adesc, float epsilon, float alpha_data, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src, const memory_desc_ext &diff_summand_desc, void *diff_summand, float alpha_param, const memory_desc_ext &diff_scale_bias_desc, void *scale, void *bias, float beta_param, void *diff_scale, void *diff_bias, const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var, size_t workspace_size, void *workspace) { std::vector<void *> caches; ::dnnl::memory::desc real_diff_dst_desc = diff_dst_desc.get_desc(); void *real_diff_dst = diff_dst; if (ops != batch_normalization_ops::none && workspace_size < dst_desc.get_desc().get_size()) { throw std::runtime_error("async_batch_normalization_backward_ex: " "no sufficient workspace."); } if (ops == batch_normalization_ops::add_activation) { void *diff_summand_cache = allocate(diff_summand_desc); async_activation_backward(adesc, 1.f, dst_desc, dst, diff_dst_desc, diff_dst, dst_desc, workspace, 0.f, diff_summand_desc, diff_summand_cache); caches.push_back(diff_summand_cache); async_sum(alpha_data, diff_summand_desc, diff_summand_cache, beta_data, diff_summand_desc, diff_summand); real_diff_dst_desc = diff_summand_desc.get_desc(); real_diff_dst = diff_summand_cache; } else if (ops == batch_normalization_ops::activation) { void *diff_dst_cache = allocate(diff_dst_desc); caches.push_back(diff_dst_cache); async_activation_backward(adesc, 1.f, dst_desc, dst, diff_dst_desc, diff_dst, dst_desc, workspace, 0.f, diff_dst_desc, diff_dst_cache); real_diff_dst = diff_dst_cache; } sycl::event e = batch_normalization_backward_internal( mode, epsilon, alpha_data, src_desc, src, real_diff_dst_desc, real_diff_dst, beta_data, diff_src_desc, diff_src, alpha_param, diff_scale_bias_desc, scale, bias, beta_param, diff_scale, diff_bias, mean_var_desc, saved_mean, saved_var); _q->submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { for (auto ptr : caches) { sycl::free(ptr, *_q); } }); }); return e; } inline sycl::event engine_ext::async_batch_normalization_backward( batch_normalization_mode mode, batch_normalization_ops ops, activation_desc &adesc, float epsilon, float alpha_data, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src, const memory_desc_ext &diff_summand_desc, void *diff_summand, float alpha_param, const memory_desc_ext &diff_scale_bias_mean_var_desc, void *scale, void *bias, float beta_param, void *diff_scale, void *diff_bias, void *saved_mean, void *saved_var, size_t workspace_size, void *workspace) { return async_batch_normalization_backward( mode, ops, adesc, epsilon, alpha_data, src_desc, src, dst_desc, dst, diff_dst_desc, diff_dst, beta_data, diff_src_desc, diff_src, diff_summand_desc, diff_summand, alpha_param, diff_scale_bias_mean_var_desc, scale, bias, beta_param, diff_scale, diff_bias, diff_scale_bias_mean_var_desc, saved_mean, saved_var, workspace_size, workspace); } inline sycl::event engine_ext::async_convolution_forward(convolution_desc &desc, ::dnnl::algorithm alg, float alpha, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &weight_desc, void *weight, float beta, const memory_desc_ext &dst_desc, void *dst) { if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) { return sycl::event(); } auto help_weight_desc = get_group_weight_desc(desc.get_group_count(), weight_desc); ::dnnl::primitive_attr attr; attr.set_fpmath_mode(desc.get_math_mode()); auto origin_src_md = src_desc.get_desc(); auto origin_dst_md = dst_desc.get_desc(); auto origin_weight_md = help_weight_desc; auto src_md = transfer_memory_desc_to_format_tag_any(origin_src_md); auto dst_md = transfer_memory_desc_to_format_tag_any(origin_dst_md); auto weight_md = transfer_memory_desc_to_format_tag_any(origin_weight_md); auto primitive = create_primitive<::dnnl::convolution_forward>( ::dnnl::prop_kind::forward_training, alg, src_md, weight_md, dst_md, desc.get_stride(), desc.get_dilate(), desc.get_padding(), desc.get_padding(), attr); ::dnnl::convolution_forward::primitive_desc pd = ::dnnl::convolution_forward::primitive_desc( const_cast<dnnl_primitive_desc_t>( primitive.second->get_primitive_desc())); auto optimal_src_md = pd.src_desc(); auto optimal_dst_md = pd.dst_desc(); auto optimal_weight_md = pd.weights_desc(); void *optimal_src = src, *optimal_dst = dst, *optimal_weight = weight; std::vector<void *> input_caches, output_caches; allocate_and_reorder_memory_to_optimal(origin_src_md, src, optimal_src_md, optimal_src, input_caches); allocate_and_reorder_memory_to_optimal(origin_dst_md, dst, optimal_dst_md, optimal_dst, output_caches); allocate_and_reorder_memory_to_optimal(origin_weight_md, weight, optimal_weight_md, optimal_weight, input_caches); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, {::dnnl::memory(optimal_src_md, _eng, optimal_src)}}, {DNNL_ARG_WEIGHTS, {::dnnl::memory(optimal_weight_md, _eng, optimal_weight)}}}; auto e = execute_primitive(primitive, execution_args, {{alpha, beta, DNNL_ARG_DST, optimal_dst_md, optimal_dst}}, input_caches); if(origin_dst_md != optimal_dst_md){ e = async_reorder(1.f, optimal_dst_md, optimal_dst, 0.f, origin_dst_md, dst); } async_free(_q, e, nullptr, output_caches); return e; } inline sycl::event engine_ext::async_convolution_forward( convolution_desc &desc, ::dnnl::algorithm alg, activation_desc &adesc, float alpha_0, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &weight_desc, void *weight, float alpha_1, const memory_desc_ext &summand_desc, void *summand, const memory_desc_ext &bias_desc, void *bias, const memory_desc_ext &dst_desc, void *dst) { int channel_num = bias_desc.get_element_num(); auto help_weight_desc = get_group_weight_desc(desc.get_group_count(), weight_desc); ::dnnl::memory::desc help_bias_desc = {{channel_num}, bias_desc.get_desc().get_data_type(), ::dnnl::memory::format_tag::a}; ::dnnl::primitive_attr attr; attr.set_fpmath_mode(desc.get_math_mode()); auto primitive = create_primitive<::dnnl::convolution_forward>( ::dnnl::prop_kind::forward_training, alg, src_desc.get_desc(), help_weight_desc, help_bias_desc, dst_desc.get_desc(), desc.get_stride(), desc.get_dilate(), desc.get_padding(), desc.get_padding(), attr); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}, {DNNL_ARG_BIAS, {::dnnl::memory(help_bias_desc, _eng, bias)}}}; void *cache = nullptr; if (alpha_0 != 1.f) { cache = allocate(help_weight_desc); _q->memcpy(cache, weight, weight_desc.get_size()); async_scale(alpha_0, help_weight_desc, cache); execution_args->insert( {DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, cache)}}); execute_primitive(primitive, execution_args, {{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}}, {cache}); } else { execution_args->insert( {DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, weight)}}); execute_primitive(primitive, execution_args, {{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}}); } async_sum(alpha_1, summand_desc, summand, 1.f, dst_desc, dst); return async_activation_forward(adesc, 1.f, dst_desc, dst, 0.f, dst_desc, dst); } inline sycl::event engine_ext::async_convolution_backward_data( convolution_desc &desc, ::dnnl::algorithm alg, float alpha, const memory_desc_ext &weight_desc, void *weight, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta, const memory_desc_ext &diff_src_desc, void *diff_src) { if (scale_parameter_preprocess({{alpha, beta, diff_dst_desc, diff_dst}})) { return sycl::event(); } auto help_weight_desc = get_group_weight_desc(desc.get_group_count(), weight_desc); ::dnnl::primitive_attr attr; attr.set_fpmath_mode(desc.get_math_mode()); auto forward_primitive = create_primitive_desc<::dnnl::convolution_forward>( ::dnnl::prop_kind::forward_training, ::dnnl::algorithm::convolution_auto, diff_src_desc.get_desc(), help_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(), desc.get_dilate(), desc.get_padding(), desc.get_padding(), attr); auto primitive = create_primitive<::dnnl::convolution_backward_data>( ::dnnl::algorithm::convolution_auto, diff_src_desc.get_desc(), help_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(), desc.get_dilate(), desc.get_padding(), desc.get_padding(), forward_primitive, attr); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_DIFF_DST, {::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}, {DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, weight)}}}; return execute_primitive( primitive, execution_args, {{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}}); } inline sycl::event engine_ext::async_convolution_backward_weight( convolution_desc &desc, ::dnnl::algorithm alg, float alpha, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta, const memory_desc_ext &diff_weight_desc, void *diff_weight) { if (scale_parameter_preprocess( {{alpha, beta, diff_weight_desc, diff_weight}})) { return sycl::event(); } auto help_diff_weight_desc = get_group_weight_desc(desc.get_group_count(), diff_weight_desc); ::dnnl::primitive_attr attr; attr.set_fpmath_mode(desc.get_math_mode()); auto forward_primitive = create_primitive_desc<::dnnl::convolution_forward>( ::dnnl::prop_kind::forward_training, ::dnnl::algorithm::convolution_auto, src_desc.get_desc(), help_diff_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(), desc.get_dilate(), desc.get_padding(), desc.get_padding(), attr); auto primitive = create_primitive<::dnnl::convolution_backward_weights>( ::dnnl::algorithm::convolution_auto, src_desc.get_desc(), help_diff_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(), desc.get_dilate(), desc.get_padding(), desc.get_padding(), forward_primitive, attr); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}, {DNNL_ARG_DIFF_DST, {::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}}; return execute_primitive(primitive, execution_args, {{alpha, beta, DNNL_ARG_DIFF_WEIGHTS, help_diff_weight_desc, diff_weight}}); } inline sycl::event engine_ext::async_convolution_backward_bias( float alpha, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta, const memory_desc_ext &diff_bias_desc, void *diff_bias) { return async_reduction(reduction_op::sum, alpha, diff_dst_desc, diff_dst, beta, diff_bias_desc, diff_bias); } inline void engine_ext::rnn_get_weight_space_size(const rnn_desc &desc, size_t *weight_space_size) { *weight_space_size = 0; rnn_forward_internal(desc, ::dnnl::prop_kind::forward_inference, memory_desc_ext(), nullptr, memory_desc_ext(), nullptr, memory_desc_ext(), nullptr, nullptr, memory_desc_ext(), nullptr, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, true, weight_space_size, nullptr, nullptr); return; } inline void engine_ext::rnn_get_scratchpad_workspace_size( const rnn_desc &desc, ::dnnl::prop_kind kind, const memory_desc_ext &src_desc, size_t *scratchpad_size, size_t *workspace_size) { *workspace_size = 0; *scratchpad_size = 0; rnn_forward_internal(desc, kind, src_desc, nullptr, memory_desc_ext(), nullptr, memory_desc_ext(), nullptr, nullptr, memory_desc_ext(), nullptr, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, true, nullptr, workspace_size, scratchpad_size); return; } inline sycl::event engine_ext::async_rnn_forward( const rnn_desc &desc, ::dnnl::prop_kind kind, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter, const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c, size_t weight_size, void *weight, size_t scratchpad_size, void *scratchpad, size_t workspace_size, void *workspace) { return rnn_forward_internal( desc, kind, src_desc, src, dst_desc, dst, iter_desc, src_iter, dst_iter, iter_c_desc, src_iter_c, dst_iter_c, weight_size, weight, workspace_size, workspace, scratchpad_size, scratchpad, false, nullptr, nullptr, nullptr); } inline sycl::event engine_ext::async_rnn_backward( const rnn_desc &desc, const memory_desc_ext &dst_desc, void *dst, void *diff_dst, const memory_desc_ext &src_desc, void *src, void *diff_src, const memory_desc_ext &iter_desc, void *src_iter, void *diff_dst_iter, void *diff_src_iter, const memory_desc_ext &iter_c_desc, void *src_iter_c, void *diff_dst_iter_c, void *diff_src_iter_c, size_t weight_size, void *weight, void *diff_weight, size_t scratchpad_size, void *scratchpad, size_t workspace_size, void *workspace) { ::dnnl::memory::data_type src_dt; ::dnnl::memory::format_tag src_format_tag; rnn_mode mode; rnn_memory_format_tag format_tag; rnn_bias_mode bias_mode; rnn_direction direction; dpct::library_data_t dt; int direction_num = 1, input_size = 0, hidden_size = 0, projection_size = 0, layer_size = 0, gate_num = 1, output_size = 0, data_type_size = 0, seq_length = 1, batch_size = 1; void *last_layer_cache = nullptr; void *hidden_layer_cache = nullptr; sycl::event e; std::vector<int> offset(9, 0); std::vector<void *> data = { src, dst, (uint8_t *)src_iter + iter_desc.get_size(), nullptr, (uint8_t *)src_iter_c + iter_c_desc.get_size(), nullptr, (uint8_t *)weight + weight_size, (uint8_t *)workspace + workspace_size, diff_src, diff_dst, (uint8_t *)diff_src_iter + iter_desc.get_size(), (uint8_t *)diff_dst_iter + iter_desc.get_size(), (uint8_t *)diff_src_iter_c + iter_c_desc.get_size(), (uint8_t *)diff_dst_iter_c + iter_c_desc.get_size(), (uint8_t *)diff_weight + weight_size, scratchpad}; desc.get(&mode, &bias_mode, &direction, &dt, &input_size, &hidden_size, &projection_size, &layer_size); get_rnn_configuration(src_desc.get_desc(), direction, mode, dt, hidden_size, &src_dt, &src_format_tag, &projection_size, &output_size, &seq_length, &batch_size, &direction_num, &gate_num); if (direction == rnn_direction::bidirectional) { if (layer_size > 1) { last_layer_cache = allocate(src_desc); hidden_layer_cache = allocate(src_desc); data[8] = last_layer_cache; } e = execute_rnn_backward_primitive( mode, ::dnnl::rnn_direction::bidirectional_concat, bias_mode, src_dt, src_format_tag, seq_length, batch_size, output_size, 2 * output_size, 1, direction_num, hidden_size, gate_num, projection_size, data, offset, 1); if (layer_size > 1) { data[8] = hidden_layer_cache; data[9] = last_layer_cache; e = execute_rnn_backward_primitive( mode, ::dnnl::rnn_direction::bidirectional_sum, bias_mode, src_dt, src_format_tag, seq_length, batch_size, output_size, output_size, 1, direction_num, hidden_size, gate_num, projection_size, data, offset, layer_size - 1); _q->memcpy(diff_src, ((layer_size - 1) % 2 == 0) ? last_layer_cache : hidden_layer_cache, src_desc.get_size()); } } else { e = execute_rnn_backward_primitive( mode, ::dnnl::rnn_direction::unidirectional_left2right, bias_mode, src_dt, src_format_tag, seq_length, batch_size, output_size, output_size, layer_size, direction_num, hidden_size, gate_num, projection_size, data, offset, 1); } if (last_layer_cache && hidden_layer_cache) { _q->submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { sycl::free(last_layer_cache, *_q); sycl::free(hidden_layer_cache, *_q); }); }); } return e; } inline size_t engine_ext::get_dropout_state_size(){ #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) " "Interfaces Project does not support this API."); #else sycl::queue q; if(_random_engine_state_size == -1) { if(_q){ q = *_q; } else { q = dpct::get_current_device().default_queue(); } auto rand_engine = rng_engine_t(q, 0); _random_engine_state_size = oneapi::mkl::rng::get_state_size(rand_engine); } return _random_engine_state_size; #endif } inline size_t engine_ext::get_dropout_workspace_size(const memory_desc_ext &src_desc) { return src_desc.get_size(); } inline sycl::event engine_ext::async_dropout_forward(dropout_desc &desc, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc, void *dst, void *workspace, size_t workspace_size) { if (workspace_size < src_desc.get_size()) { throw std::runtime_error("async_dropout_forward: no sufficient workspace."); } float p = desc.get_probability(); if (p == 1.f) { return _q->memset(dst, 0, dst_desc.get_size()); } else if (p == 0.f) { return async_reorder(1.f, src_desc, src, 0.f, dst_desc, dst); } float scale_factor = 1.f / (1.f - p); void *cache = workspace; memory_desc_ext rng_data_desc( ::dnnl::memory::desc(src_desc.get_dims(), ::dnnl::memory::data_type::s32, src_desc.get_strides())); if (src_desc.get_desc().get_data_type() != ::dnnl::memory::data_type::s32) { cache = allocate(rng_data_desc); } desc.generate(_q, _random_engine_state_size, rng_data_desc.get_element_num(), (std::int32_t *)cache); if (cache == workspace) { async_scale(scale_factor, src_desc, workspace); } else { async_reorder(scale_factor, rng_data_desc, cache, 0.f, src_desc, workspace); } auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC_0, ::dnnl::memory(src_desc.get_desc(), _eng, src)}, {DNNL_ARG_SRC_1, ::dnnl::memory(src_desc.get_desc(), _eng, workspace)}, {DNNL_ARG_DST, ::dnnl::memory(dst_desc.get_desc(), _eng, dst)}}; auto primitive = create_primitive<::dnnl::binary>( ::dnnl::algorithm::binary_mul, src_desc.get_desc(), src_desc.get_desc(), dst_desc.get_desc()); auto e = execute_primitive(primitive, execution_args); if (cache != workspace) { _q->submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { sycl::free(cache, *_q); }); }); } return e; } inline sycl::event engine_ext::async_dropout_backward( dropout_desc &desc, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &diff_src_desc, void *diff_src, void *workspace, size_t workspace_size) { float p = desc.get_probability(); if (p == 1.f) { return _q->memset(diff_src, 0, diff_src_desc.get_size()); } else if (p == 0.f) { return async_reorder(1.f, diff_dst_desc, diff_dst, 0.f, diff_src_desc, diff_src); } auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC_0, ::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}, {DNNL_ARG_SRC_1, ::dnnl::memory(diff_dst_desc.get_desc(), _eng, workspace)}, {DNNL_ARG_DST, ::dnnl::memory(diff_src_desc.get_desc(), _eng, diff_src)}}; auto primitive = create_primitive<::dnnl::binary>( ::dnnl::algorithm::binary_mul, diff_dst_desc.get_desc(), diff_dst_desc.get_desc(), diff_src_desc.get_desc()); return execute_primitive(primitive, execution_args); } } // namespace dnnl } // namespace dpct #endif // __DPCT_DNNL_UTILS_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/lapack_utils.hpp
//==---- lapack_utils.hpp -------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_LAPACK_UTILS_HPP__ #define __DPCT_LAPACK_UTILS_HPP__ #include "memory.hpp" #include "util.hpp" #include "lib_common_utils.hpp" #include <oneapi/mkl.hpp> #include <sycl/sycl.hpp> namespace dpct { namespace lapack { /// Computes all eigenvalues and, optionally, eigenvectors of a real generalized /// symmetric definite eigenproblem using a divide and conquer method. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] queue Device queue where calculations will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] itype Must be 1 or 2 or 3. Specifies the problem type to be solved. /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrices A and B. /// \param [in,out] a The symmetric matrix A. /// \param [in] lda The leading dimension of matrix A. /// \param [in,out] b The symmetric matrix B. /// \param [in] ldb The leading dimension of matrix B. /// \param [out] w Eigenvalues. /// \param [in] scratchpad Scratchpad memory to be used by the routine /// for storing intermediate results. /// \param [in] scratchpad_size Size of scratchpad memory as a number of /// floating point elements of type T. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. template <typename T> inline int sygvd(sycl::queue &queue, std::int64_t itype, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb, T *w, T *scratchpad, int scratchpad_size, int *info) { #ifdef DPCT_USM_LEVEL_NONE auto info_buf = get_buffer<int>(info); auto a_buffer = get_buffer<T>(a); auto b_buffer = get_buffer<T>(b); auto w_buffer = get_buffer<T>(w); auto scratchpad_buffer = get_buffer<T>(scratchpad); int info_val = 0; int ret_val = 0; try { oneapi::mkl::lapack::sygvd(queue, itype, jobz, uplo, n, a_buffer, lda, b_buffer, ldb, w_buffer, scratchpad_buffer, scratchpad_size); } catch (oneapi::mkl::lapack::exception const& e) { std::cerr << "Unexpected exception caught during call to LAPACK API: sygvd" << std::endl << "reason: " << e.what() << std::endl << "info: " << e.info() << std::endl; info_val = static_cast<int>(e.info()); ret_val = 1; } catch (sycl::exception const& e) { std::cerr << "Caught synchronous SYCL exception:" << std::endl << "reason: " << e.what() << std::endl; ret_val = 1; } queue.submit([&, info_val](sycl::handler &cgh) { auto info_acc = info_buf.get_access<sycl::access_mode::write>(cgh); cgh.single_task<dpct_kernel_name<class sygvd_set_info, T>>( [=]() { info_acc[0] = info_val; }); }); return ret_val; #else try { oneapi::mkl::lapack::sygvd(queue, itype, jobz, uplo, n, a, lda, b, ldb, w, scratchpad, scratchpad_size); } catch (oneapi::mkl::lapack::exception const& e) { std::cerr << "Unexpected exception caught during call to LAPACK API: sygvd" << std::endl << "reason: " << e.what() << std::endl << "info: " << e.info() << std::endl; int info_val = static_cast<int>(e.info()); queue.memcpy(info, &info_val, sizeof(int)).wait(); return 1; } catch (sycl::exception const& e) { std::cerr << "Caught synchronous SYCL exception:" << std::endl << "reason: " << e.what() << std::endl; queue.memset(info, 0, sizeof(int)).wait(); return 1; } queue.memset(info, 0, sizeof(int)); return 0; #endif } /// Computes all the eigenvalues, and optionally, the eigenvectors of a complex /// generalized Hermitian positive-definite eigenproblem using a divide and /// conquer method. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] queue Device queue where calculations will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] itype Must be 1 or 2 or 3. Specifies the problem type to be solved. /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrices A and B. /// \param [in,out] a The Hermitian matrix A. /// \param [in] lda The leading dimension of matrix A. /// \param [in,out] b The Hermitian matrix B. /// \param [in] ldb The leading dimension of matrix B. /// \param [in] w Eigenvalues. /// \param [in] scratchpad Scratchpad memory to be used by the routine /// for storing intermediate results. /// \param [in] scratchpad_size Size of scratchpad memory as a number of /// floating point elements of type T. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. template <typename T, typename Tw> inline int hegvd(sycl::queue &queue, std::int64_t itype, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb, Tw *w, T *scratchpad, int scratchpad_size, int *info) { using Ty = typename DataType<T>::T2; #ifdef DPCT_USM_LEVEL_NONE auto info_buf = get_buffer<int>(info); auto a_buffer = get_buffer<Ty>(a); auto b_buffer = get_buffer<Ty>(b); auto w_buffer = get_buffer<Tw>(w); auto scratchpad_buffer = get_buffer<Ty>(scratchpad); int info_val = 0; int ret_val = 0; try { oneapi::mkl::lapack::hegvd(queue, itype, jobz, uplo, n, a_buffer, lda, b_buffer, ldb, w_buffer, scratchpad_buffer, scratchpad_size); } catch (oneapi::mkl::lapack::exception const& e) { std::cerr << "Unexpected exception caught during call to LAPACK API: hegvd" << std::endl << "reason: " << e.what() << std::endl << "info: " << e.info() << std::endl; info_val = static_cast<int>(e.info()); ret_val = 1; } catch (sycl::exception const& e) { std::cerr << "Caught synchronous SYCL exception:" << std::endl << "reason: " << e.what() << std::endl; ret_val = 1; } queue.submit([&, info_val](sycl::handler &cgh) { auto info_acc = info_buf.get_access<sycl::access_mode::write>(cgh); cgh.single_task<dpct_kernel_name<class hegvd_set_info, T>>( [=]() { info_acc[0] = info_val; }); }); return ret_val; #else try { oneapi::mkl::lapack::hegvd(queue, itype, jobz, uplo, n, (Ty *)a, lda, (Ty *)b, ldb, w, (Ty *)scratchpad, scratchpad_size); } catch (oneapi::mkl::lapack::exception const& e) { std::cerr << "Unexpected exception caught during call to LAPACK API: hegvd" << std::endl << "reason: " << e.what() << std::endl << "info: " << e.info() << std::endl; int info_val = static_cast<int>(e.info()); queue.memcpy(info, &info_val, sizeof(int)).wait(); return 1; } catch (sycl::exception const& e) { std::cerr << "Caught synchronous SYCL exception:" << std::endl << "reason: " << e.what() << std::endl; queue.memset(info, 0, sizeof(int)).wait(); return 1; } queue.memset(info, 0, sizeof(int)); return 0; #endif } /// Computes the Cholesky factorizations of a batch of symmetric (or Hermitian, /// for complex data) positive-definite matrices. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] queue Device queue where calculations will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in,out] a Array of pointers to matrix A. /// \param [in] lda The leading dimension of matrix A. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. /// \param [in] group_size The batch size. template <typename T> inline int potrf_batch(sycl::queue &queue, oneapi::mkl::uplo uplo, int n, T *a[], int lda, int *info, int group_size) { #ifdef DPCT_USM_LEVEL_NONE throw std::runtime_error("this API is unsupported when USM level is none"); #else using Ty = typename DataType<T>::T2; struct matrix_info_t { oneapi::mkl::uplo uplo_info; std::int64_t n_info; std::int64_t lda_info; std::int64_t group_size_info; }; matrix_info_t *matrix_info = (matrix_info_t *)std::malloc(sizeof(matrix_info_t)); matrix_info->uplo_info = uplo; matrix_info->n_info = n; matrix_info->lda_info = lda; matrix_info->group_size_info = group_size; std::int64_t scratchpad_size = 0; sycl::event e; Ty *scratchpad = nullptr; try { scratchpad_size = oneapi::mkl::lapack::potrf_batch_scratchpad_size<Ty>( queue, &(matrix_info->uplo_info), &(matrix_info->n_info), &(matrix_info->lda_info), 1, &(matrix_info->group_size_info)); scratchpad = sycl::malloc_device<Ty>(scratchpad_size, queue); e = oneapi::mkl::lapack::potrf_batch( queue, &(matrix_info->uplo_info), &(matrix_info->n_info), (Ty **)a, &(matrix_info->lda_info), 1, &(matrix_info->group_size_info), scratchpad, scratchpad_size); } catch (oneapi::mkl::lapack::batch_error const &be) { std::cerr << "Unexpected exception caught during call to LAPACK API: " "potrf_batch_scratchpad_size/potrf_batch" << std::endl << "reason: " << be.what() << std::endl << "number: " << be.info() << std::endl; int i = 0; auto &ids = be.ids(); std::vector<int> info_vec(group_size); for (auto const &e : be.exceptions()) { try { std::rethrow_exception(e); } catch (oneapi::mkl::lapack::exception &e) { std::cerr << "Exception " << ids[i] << std::endl << "reason: " << e.what() << std::endl << "info: " << e.info() << std::endl; info_vec[i] = e.info(); i++; } } queue.memcpy(info, info_vec.data(), group_size * sizeof(int)).wait(); std::free(matrix_info); if (scratchpad) sycl::free(scratchpad, queue); return 1; } catch (sycl::exception const &e) { std::cerr << "Caught synchronous SYCL exception:" << std::endl << "reason: " << e.what() << std::endl; queue.memset(info, 0, group_size * sizeof(int)).wait(); std::free(matrix_info); if (scratchpad) sycl::free(scratchpad, queue); return 1; } queue.submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { std::free(matrix_info); sycl::free(scratchpad, queue); }); }); queue.memset(info, 0, group_size * sizeof(int)); return 0; #endif } /// Solves a batch of systems of linear equations with a Cholesky-factored /// symmetric (Hermitian) positive-definite coefficient matrices. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] queue Device queue where calculations will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in] nrhs The number of right-hand sides. /// \param [in,out] a Array of pointers to matrix A. /// \param [in] lda The leading dimension of matrix A. /// \param [in,out] b Array of pointers to matrix B. /// \param [in] ldb The leading dimension of matrix B. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. /// \param [in] group_size The batch size. template <typename T> inline int potrs_batch(sycl::queue &queue, oneapi::mkl::uplo uplo, int n, int nrhs, T *a[], int lda, T *b[], int ldb, int *info, int group_size) { #ifdef DPCT_USM_LEVEL_NONE throw std::runtime_error("this API is unsupported when USM level is none"); #else using Ty = typename DataType<T>::T2; struct matrix_info_t { oneapi::mkl::uplo uplo_info; std::int64_t n_info; std::int64_t nrhs_info; std::int64_t lda_info; std::int64_t ldb_info; std::int64_t group_size_info; }; matrix_info_t *matrix_info = (matrix_info_t *)std::malloc(sizeof(matrix_info_t)); matrix_info->uplo_info = uplo; matrix_info->n_info = n; matrix_info->nrhs_info = nrhs; matrix_info->lda_info = lda; matrix_info->ldb_info = ldb; matrix_info->group_size_info = group_size; std::int64_t scratchpad_size = 0; sycl::event e; Ty *scratchpad = nullptr; try { scratchpad_size = oneapi::mkl::lapack::potrs_batch_scratchpad_size<Ty>( queue, &(matrix_info->uplo_info), &(matrix_info->n_info), &(matrix_info->nrhs_info), &(matrix_info->lda_info), &(matrix_info->ldb_info), 1, &(matrix_info->group_size_info)); scratchpad = sycl::malloc_device<Ty>(scratchpad_size, queue); e = oneapi::mkl::lapack::potrs_batch( queue, &(matrix_info->uplo_info), &(matrix_info->n_info), &(matrix_info->nrhs_info), (Ty **)a, &(matrix_info->lda_info), (Ty **)b, &(matrix_info->ldb_info), 1, &(matrix_info->group_size_info), scratchpad, scratchpad_size); } catch (oneapi::mkl::lapack::batch_error const &be) { std::cerr << "Unexpected exception caught during call to LAPACK API: " "potrs_batch_scratchpad_size/potrs_batch" << std::endl << "reason: " << be.what() << std::endl << "number: " << be.info() << std::endl; int i = 0; auto &ids = be.ids(); std::vector<int> info_vec(group_size); for (auto const &e : be.exceptions()) { try { std::rethrow_exception(e); } catch (oneapi::mkl::lapack::exception &e) { std::cerr << "Exception " << ids[i] << std::endl << "reason: " << e.what() << std::endl << "info: " << e.info() << std::endl; info_vec[i] = e.info(); i++; } } queue.memcpy(info, info_vec.data(), group_size * sizeof(int)).wait(); std::free(matrix_info); if (scratchpad) sycl::free(scratchpad, queue); return 1; } catch (sycl::exception const &e) { std::cerr << "Caught synchronous SYCL exception:" << std::endl << "reason: " << e.what() << std::endl; queue.memset(info, 0, group_size * sizeof(int)).wait(); std::free(matrix_info); if (scratchpad) sycl::free(scratchpad, queue); return 1; } queue.submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { std::free(matrix_info); sycl::free(scratchpad, queue); }); }); queue.memset(info, 0, group_size * sizeof(int)); return 0; #endif } namespace detail { template <template <typename> typename functor_t, typename... args_t> inline int lapack_shim(sycl::queue &q, library_data_t a_type, int *info, std::string const &lapack_api_name, args_t &&...args) { auto handle_lapack_exception = [&](const oneapi::mkl::lapack::exception &e) { std::cerr << "Unexpected exception caught during call to LAPACK API: " << lapack_api_name << std::endl << "reason: " << e.what() << std::endl << "info: " << e.info() << std::endl << "detail: " << e.detail() << std::endl; if (e.info() < std::numeric_limits<int>::min() || e.info() > std::numeric_limits<int>::max()) { throw std::runtime_error("e.info() exceeds the limit of int type"); } int info_val = static_cast<int>(e.info()); if (info) dpct::detail::dpct_memcpy(q, info, &info_val, sizeof(int), memcpy_direction::host_to_device) .wait(); return 1; }; try { switch (a_type) { case library_data_t::real_float: { functor_t<float>()(std::forward<args_t>(args)...); break; } case library_data_t::real_double: { functor_t<double>()(std::forward<args_t>(args)...); break; } case library_data_t::complex_float: { functor_t<std::complex<float>>()(std::forward<args_t>(args)...); break; } case library_data_t::complex_double: { functor_t<std::complex<double>>()(std::forward<args_t>(args)...); break; } default: throw std::runtime_error("the data type is unsupported"); } } catch (oneapi::mkl::lapack::batch_error const &be) { try { std::rethrow_exception(be.exceptions()[0]); } catch (oneapi::mkl::lapack::exception &e) { return handle_lapack_exception(e); } } catch (oneapi::mkl::lapack::exception const &e) { return handle_lapack_exception(e); } catch (sycl::exception const &e) { std::cerr << "Caught synchronous SYCL exception:" << std::endl << "reason: " << e.what() << std::endl; if (info) dpct::detail::dpct_memset(q, info, 0, sizeof(int)).wait(); return 1; } return 0; } template <typename T> class working_memory { public: working_memory(std::size_t element_number, const sycl::queue &q) : _q(q) { _ptr = dpct::detail::dpct_malloc(element_number * sizeof(T), _q); } auto get_memory() { return dpct::detail::get_memory(reinterpret_cast<T *>(_ptr)); } auto get_ptr() { return _ptr; } void set_event(sycl::event e) { _e = e; } ~working_memory() { if (_ptr) { dpct::async_dpct_free({_ptr}, {_e}, _q); } } private: void *_ptr = nullptr; sycl::event _e; sycl::queue _q; }; std::size_t byte_to_element_number(std::size_t size_in_byte, dpct::library_data_t element_type) { auto dv = std::lldiv( size_in_byte, dpct::detail::library_data_size[static_cast<unsigned int>(element_type)] / 8); if (dv.rem) { throw std::runtime_error( "size_in_byte is not divisible by the size of element (in bytes)"); } return dv.quot; } std::size_t element_number_to_byte(std::size_t size_in_element, dpct::library_data_t element_type) { auto dv = std::lldiv( dpct::detail::library_data_size[static_cast<unsigned int>(element_type)], 8); if (dv.rem) { throw std::runtime_error( "the size of element (in bits) is not divisible by 8"); } return size_in_element * dv.quot; } inline oneapi::mkl::jobsvd char2jobsvd(signed char job) { switch (job) { case 'A': return oneapi::mkl::jobsvd::vectors; case 'S': return oneapi::mkl::jobsvd::somevec; case 'O': return oneapi::mkl::jobsvd::vectorsina; case 'N': return oneapi::mkl::jobsvd::novec; default: throw std::runtime_error("the job type is unsupported"); } } template <typename T> struct getrf_scratchpad_size_impl { void operator()(sycl::queue &q, std::int64_t m, std::int64_t n, library_data_t a_type, std::int64_t lda, std::size_t &device_ws_size) { device_ws_size = oneapi::mkl::lapack::getrf_scratchpad_size<T>(q, m, n, lda); } }; template <typename T> struct getrf_impl { void operator()(sycl::queue &q, std::int64_t m, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, std::int64_t *ipiv, void *device_ws, std::size_t device_ws_size, int *info) { auto ipiv_data = dpct::detail::get_memory(ipiv); auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto device_ws_data = dpct::detail::get_memory(reinterpret_cast<T *>(device_ws)); oneapi::mkl::lapack::getrf(q, m, n, a_data, lda, ipiv_data, device_ws_data, device_ws_size); dpct::detail::dpct_memset(q, info, 0, sizeof(int)); } }; template <typename T> struct getrs_impl { void operator()(sycl::queue &q, oneapi::mkl::transpose trans, std::int64_t n, std::int64_t nrhs, library_data_t a_type, void *a, std::int64_t lda, std::int64_t *ipiv, library_data_t b_type, void *b, std::int64_t ldb, int *info) { auto ipiv_data = dpct::detail::get_memory(ipiv); std::int64_t device_ws_size = oneapi::mkl::lapack::getrs_scratchpad_size<T>( q, trans, n, nrhs, lda, ldb); working_memory<T> device_ws(device_ws_size, q); auto device_ws_data = device_ws.get_memory(); auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b)); oneapi::mkl::lapack::getrs(q, trans, n, nrhs, a_data, lda, ipiv_data, b_data, ldb, device_ws_data, device_ws_size); sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int)); device_ws.set_event(e); } }; template <typename T> struct geqrf_scratchpad_size_impl { void operator()(sycl::queue &q, std::int64_t m, std::int64_t n, library_data_t a_type, std::int64_t lda, std::size_t &device_ws_size) { device_ws_size = oneapi::mkl::lapack::geqrf_scratchpad_size<T>(q, m, n, lda); } }; template <typename T> struct geqrf_impl { void operator()(sycl::queue &q, std::int64_t m, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, library_data_t tau_type, void *tau, void *device_ws, std::size_t device_ws_size, int *info) { auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto tau_data = dpct::detail::get_memory(reinterpret_cast<T *>(tau)); auto device_ws_data = dpct::detail::get_memory(reinterpret_cast<T *>(device_ws)); oneapi::mkl::lapack::geqrf(q, m, n, a_data, lda, tau_data, device_ws_data, device_ws_size); dpct::detail::dpct_memset(q, info, 0, sizeof(int)); } }; template <typename T> struct getrfnp_impl { void operator()(sycl::queue &q, std::int64_t m, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, std::int64_t *ipiv, void *device_ws, std::size_t device_ws_size, int *info) { #ifndef __INTEL_MKL__ throw std::runtime_error( "The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else std::int64_t a_stride = m * lda; auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto device_ws_data = dpct::detail::get_memory(reinterpret_cast<T *>(device_ws)); oneapi::mkl::lapack::getrfnp_batch(q, m, n, a_data, lda, a_stride, 1, device_ws_data, device_ws_size); dpct::detail::dpct_memset(q, info, 0, sizeof(int)); #endif } }; template <typename T> struct gesvd_scratchpad_size_impl { void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu, oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n, library_data_t a_type, std::int64_t lda, library_data_t u_type, std::int64_t ldu, library_data_t vt_type, std::int64_t ldvt, std::size_t &device_ws_size) { device_ws_size = oneapi::mkl::lapack::gesvd_scratchpad_size<T>( q, jobu, jobvt, m, n, lda, ldu, ldvt); } }; template <typename T> struct ElementType { using value_tpye = T; }; template <typename T> struct ElementType<std::complex<T>> { using value_tpye = T; }; template <typename T> struct gesvd_impl { void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu, oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, library_data_t s_type, void *s, library_data_t u_type, void *u, std::int64_t ldu, library_data_t vt_type, void *vt, std::int64_t ldvt, void *device_ws, std::size_t device_ws_size, int *info) { auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto s_data = dpct::detail::get_memory( reinterpret_cast<typename ElementType<T>::value_tpye *>(s)); auto u_data = dpct::detail::get_memory(reinterpret_cast<T *>(u)); auto vt_data = dpct::detail::get_memory(reinterpret_cast<T *>(vt)); auto device_ws_data = dpct::detail::get_memory(reinterpret_cast<T *>(device_ws)); oneapi::mkl::lapack::gesvd(q, jobu, jobvt, m, n, a_data, lda, s_data, u_data, ldu, vt_data, ldvt, device_ws_data, device_ws_size); dpct::detail::dpct_memset(q, info, 0, sizeof(int)); } }; template <typename T> struct gesvd_conj_impl : public gesvd_impl<T> { void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu, oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, library_data_t s_type, void *s, library_data_t u_type, void *u, std::int64_t ldu, library_data_t vt_type, void *vt, std::int64_t ldvt, void *device_ws, std::size_t device_ws_size, int *info) { #ifndef __INTEL_MKL__ throw std::runtime_error( "The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else using base = gesvd_impl<T>; base::operator()(q, jobu, jobvt, m, n, a_type, a, lda, s_type, s, u_type, u, ldu, vt_type, vt, ldvt, device_ws, device_ws_size, info); auto vt_data = dpct::detail::get_memory(reinterpret_cast<T *>(vt)); oneapi::mkl::blas::row_major::imatcopy(q, oneapi::mkl::transpose::conjtrans, n, n, T(1.0f), vt_data, ldvt, ldvt); dpct::detail::dpct_memset(q, info, 0, sizeof(int)); #endif } }; template <typename T> struct potrf_scratchpad_size_impl { void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n, library_data_t a_type, std::int64_t lda, std::size_t &device_ws_size) { device_ws_size = oneapi::mkl::lapack::potrf_scratchpad_size<T>(q, uplo, n, lda); } }; template <typename T> struct potrf_impl { void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, void *device_ws, std::size_t device_ws_size, int *info) { auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto device_ws_data = dpct::detail::get_memory(reinterpret_cast<T *>(device_ws)); oneapi::mkl::lapack::potrf(q, uplo, n, a_data, lda, device_ws_data, device_ws_size); dpct::detail::dpct_memset(q, info, 0, sizeof(int)); } }; template <typename T> struct potrs_impl { void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t nrhs, library_data_t a_type, void *a, std::int64_t lda, library_data_t b_type, void *b, std::int64_t ldb, int *info) { std::int64_t device_ws_size = oneapi::mkl::lapack::potrs_scratchpad_size<T>( q, uplo, n, nrhs, lda, ldb); working_memory<T> device_ws(device_ws_size, q); auto device_ws_data = device_ws.get_memory(); auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b)); oneapi::mkl::lapack::potrs(q, uplo, n, nrhs, a_data, lda, b_data, ldb, device_ws_data, device_ws_size); sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int)); device_ws.set_event(e); } }; template <typename T> struct value_type_trait { using value_type = T; }; template <typename T> struct value_type_trait<std::complex<T>> { using value_type = T; }; template <typename T> auto lamch_s() { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else if constexpr (std::is_same_v<T, float>) { return slamch("S"); } else if constexpr (std::is_same_v<T, double>) { return dlamch("S"); } throw std::runtime_error("the type is unsupported"); #endif } #define DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(FUNC, ...) \ do { \ if constexpr (std::is_floating_point_v<T>) { \ device_ws_size = oneapi::mkl::lapack::sy##FUNC(__VA_ARGS__); \ } else { \ device_ws_size = oneapi::mkl::lapack::he##FUNC(__VA_ARGS__); \ } \ } while (0) #define DISPATCH_FLOAT_FOR_CALCULATION(FUNC, ...) \ do { \ if constexpr (std::is_floating_point_v<T>) { \ oneapi::mkl::lapack::sy##FUNC(__VA_ARGS__); \ } else { \ oneapi::mkl::lapack::he##FUNC(__VA_ARGS__); \ } \ } while (0) template <typename T> struct syheevx_scratchpad_size_impl { void operator()(sycl::queue &q, oneapi::mkl::compz jobz, oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda, void *vl, void *vu, std::int64_t il, std::int64_t iu, std::size_t &device_ws_size) { #ifndef __INTEL_MKL__ throw std::runtime_error( "The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else using value_t = typename value_type_trait<T>::value_type; auto vl_value = *reinterpret_cast<value_t *>(vl); auto vu_value = *reinterpret_cast<value_t *>(vu); auto abstol = 2 * lamch_s<value_t>(); DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(evx_scratchpad_size<T>, q, jobz, range, uplo, n, lda, vl_value, vu_value, il, iu, abstol, lda); #endif } }; template <typename T> constexpr library_data_t get_library_data_t_from_type() { if constexpr (std::is_same_v<T, float>) { return library_data_t::real_float; } else if constexpr (std::is_same_v<T, double>) { return library_data_t::real_double; } else if constexpr (std::is_same_v<T, sycl::float2> || std::is_same_v<T, std::complex<float>>) { return library_data_t::complex_float; } else if constexpr (std::is_same_v<T, sycl::double2> || std::is_same_v<T, std::complex<double>>) { return library_data_t::complex_double; } throw std::runtime_error("the type is unsupported"); } template <typename T> struct syheevx_impl { void operator()(sycl::queue &q, oneapi::mkl::compz jobz, oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, void *vl, void *vu, std::int64_t il, std::int64_t iu, std::int64_t *m, library_data_t w_type, void *w, void *device_ws, std::size_t device_ws_size, int *info) { #ifndef __INTEL_MKL__ throw std::runtime_error( "The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else using value_t = typename value_type_trait<T>::value_type; working_memory<T> z(n * lda, q); working_memory<std::int64_t> m_device(1, q); auto z_data = z.get_memory(); auto m_device_data = m_device.get_memory(); auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto device_ws_data = dpct::detail::get_memory(reinterpret_cast<T *>(device_ws)); auto vl_value = *reinterpret_cast<value_t *>(vl); auto vu_value = *reinterpret_cast<value_t *>(vu); auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w)); auto abstol = 2 * lamch_s<value_t>(); DISPATCH_FLOAT_FOR_CALCULATION(evx, q, jobz, range, uplo, n, a_data, lda, vl_value, vu_value, il, iu, abstol, m_device_data, w_data, z_data, lda, device_ws_data, device_ws_size); dpct::async_dpct_memcpy(a, z.get_ptr(), n * lda * sizeof(T), memcpy_direction::device_to_device, q); dpct::async_dpct_memcpy(m, m_device.get_ptr(), sizeof(std::int64_t), memcpy_direction::device_to_host, q); sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int)); z.set_event(e); m_device.set_event(e); #endif } }; template <typename T> struct syhegvx_scratchpad_size_impl { void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::compz jobz, oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda, std::int64_t ldb, void *vl, void *vu, std::int64_t il, std::int64_t iu, std::size_t &device_ws_size) { #ifndef __INTEL_MKL__ throw std::runtime_error( "The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else using value_t = typename value_type_trait<T>::value_type; auto vl_value = *reinterpret_cast<value_t *>(vl); auto vu_value = *reinterpret_cast<value_t *>(vu); auto abstol = 2 * lamch_s<value_t>(); DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(gvx_scratchpad_size<T>, q, itype, jobz, range, uplo, n, lda, ldb, vl_value, vu_value, il, iu, abstol, lda); #endif } }; template <typename T> struct syhegvx_impl { void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::compz jobz, oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, std::int64_t n, void *a, std::int64_t lda, void *b, std::int64_t ldb, void *vl, void *vu, std::int64_t il, std::int64_t iu, std::int64_t *m, void *w, void *device_ws, std::size_t device_ws_size, int *info) { #ifndef __INTEL_MKL__ throw std::runtime_error( "The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else using value_t = typename value_type_trait<T>::value_type; working_memory<T> z(n * lda, q); working_memory<std::int64_t> m_device(1, q); auto z_data = z.get_memory(); auto m_device_data = m_device.get_memory(); auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b)); auto device_ws_data = dpct::detail::get_memory(reinterpret_cast<T *>(device_ws)); auto vl_value = *reinterpret_cast<value_t *>(vl); auto vu_value = *reinterpret_cast<value_t *>(vu); auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w)); auto abstol = 2 * lamch_s<value_t>(); DISPATCH_FLOAT_FOR_CALCULATION(gvx, q, itype, jobz, range, uplo, n, a_data, lda, b_data, ldb, vl_value, vu_value, il, iu, abstol, m_device_data, w_data, z_data, lda, device_ws_data, device_ws_size); dpct::async_dpct_memcpy(a, z.get_ptr(), n * lda * sizeof(T), memcpy_direction::device_to_device, q); dpct::async_dpct_memcpy(m, m_device.get_ptr(), sizeof(std::int64_t), memcpy_direction::device_to_host, q); sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int)); z.set_event(e); m_device.set_event(e); #endif } }; template <typename T> struct syhegvd_scratchpad_size_impl { void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda, std::int64_t ldb, std::size_t &device_ws_size) { DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(gvd_scratchpad_size<T>, q, itype, jobz, uplo, n, lda, ldb); } }; template <typename T> struct syhegvd_impl { void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, std::int64_t n, void *a, std::int64_t lda, void *b, std::int64_t ldb, void *w, void *device_ws, std::size_t device_ws_size, int *info) { using value_t = typename value_type_trait<T>::value_type; auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b)); auto device_ws_data = dpct::detail::get_memory(reinterpret_cast<T *>(device_ws)); auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w)); DISPATCH_FLOAT_FOR_CALCULATION(gvd, q, itype, jobz, uplo, n, a_data, lda, b_data, ldb, w_data, device_ws_data, device_ws_size); dpct::detail::dpct_memset(q, info, 0, sizeof(int)); } }; oneapi::mkl::compz job2compz(const oneapi::mkl::job &job) { oneapi::mkl::compz ret; if (job == oneapi::mkl::job::novec) { ret = oneapi::mkl::compz::novectors; } else if (job == oneapi::mkl::job::vec) { ret = oneapi::mkl::compz::vectors; } else { throw std::runtime_error("the job type is unsupported"); } return ret; } template <typename T> struct syheev_scratchpad_size_impl { void operator()(sycl::queue &q, oneapi::mkl::compz jobz, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda, std::size_t &device_ws_size) { #ifndef __INTEL_MKL__ throw std::runtime_error( "The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(ev_scratchpad_size<T>, q, jobz, uplo, n, lda); #endif } }; template <typename T> struct syheev_impl { void operator()(sycl::queue &q, oneapi::mkl::compz jobz, oneapi::mkl::uplo uplo, std::int64_t n, void *a, std::int64_t lda, void *w, void *device_ws, std::size_t device_ws_size, int *info) { #ifndef __INTEL_MKL__ throw std::runtime_error( "The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else using value_t = typename value_type_trait<T>::value_type; auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto device_ws_data = dpct::detail::get_memory(reinterpret_cast<T *>(device_ws)); auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w)); DISPATCH_FLOAT_FOR_CALCULATION(ev, q, jobz, uplo, n, a_data, lda, w_data, device_ws_data, device_ws_size); dpct::detail::dpct_memset(q, info, 0, sizeof(int)); #endif } }; template <typename T> struct syheevd_scratchpad_size_impl { void operator()(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, std::int64_t n, library_data_t a_type, std::int64_t lda, std::size_t &device_ws_size) { DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(evd_scratchpad_size<T>, q, jobz, uplo, n, lda); } }; template <typename T> struct syheevd_impl { void operator()(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, void *w, void *device_ws, std::size_t device_ws_size, int *info) { using value_t = typename value_type_trait<T>::value_type; auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto device_ws_data = dpct::detail::get_memory(reinterpret_cast<T *>(device_ws)); auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w)); DISPATCH_FLOAT_FOR_CALCULATION(evd, q, jobz, uplo, n, a_data, lda, w_data, device_ws_data, device_ws_size); dpct::detail::dpct_memset(q, info, 0, sizeof(int)); } }; #undef DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE #undef DISPATCH_FLOAT_FOR_CALCULATION template <typename T> struct trtri_scratchpad_size_impl { void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, oneapi::mkl::diag diag, std::int64_t n, library_data_t a_type, std::int64_t lda, std::size_t &device_ws_size) { #ifndef __INTEL_MKL__ throw std::runtime_error( "The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else device_ws_size = oneapi::mkl::lapack::trtri_scratchpad_size<T>(q, uplo, diag, n, lda); #endif } }; template <typename T> struct trtri_impl { void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, oneapi::mkl::diag diag, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, void *device_ws, std::size_t device_ws_size, int *info) { #ifndef __INTEL_MKL__ throw std::runtime_error( "The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto device_ws_data = dpct::detail::get_memory(reinterpret_cast<T *>(device_ws)); oneapi::mkl::lapack::trtri(q, uplo, diag, n, a_data, lda, device_ws_data, device_ws_size); dpct::detail::dpct_memset(q, info, 0, sizeof(int)); #endif } }; } // namespace detail /// Computes the size of workspace memory of getrf function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] m The number of rows in the matrix A. /// \param [in] n The number of columns in the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [out] device_ws_size The workspace size in bytes. /// \param [out] host_ws_size The host workspace size in bytes. Currently the /// value is always zero. inline int getrf_scratchpad_size(sycl::queue &q, std::int64_t m, std::int64_t n, library_data_t a_type, std::int64_t lda, std::size_t *device_ws_size, std::size_t *host_ws_size = nullptr) { if (host_ws_size) *host_ws_size = 0; std::size_t device_ws_size_tmp; int ret = detail::lapack_shim<detail::getrf_scratchpad_size_impl>( q, a_type, nullptr, "getrf_scratchpad_size", q, m, n, a_type, lda, device_ws_size_tmp); *device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type); return ret; } /// Computes the LU factorization of a general m-by-n matrix. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q The queue where the routine should be executed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] m The number of rows in the matrix A. /// \param [in] n The number of columns in the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in, out] a The input matrix A. Overwritten by L and U. The unit /// diagonal elements of L are not stored. /// \param [in] lda The leading dimension of the matrix A. /// \param [out] ipiv The pivot indices. If \p ipiv is nullptr, non-pivoting /// LU factorization is computed. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The workspace size in bytes. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. inline int getrf(sycl::queue &q, std::int64_t m, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, std::int64_t *ipiv, void *device_ws, std::size_t device_ws_size, int *info) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else std::size_t device_ws_size_in_element_number = detail::byte_to_element_number(device_ws_size, a_type); if (ipiv == nullptr) { return detail::lapack_shim<detail::getrfnp_impl>( q, a_type, info, "getrfnp_batch", q, m, n, a_type, a, lda, ipiv, device_ws, device_ws_size_in_element_number, info); } return detail::lapack_shim<detail::getrf_impl>( q, a_type, info, "getrf", q, m, n, a_type, a, lda, ipiv, device_ws, device_ws_size_in_element_number, info); #endif } /// Solves a system of linear equations with a LU-factored square coefficient /// matrix, with multiple right-hand sides. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q The queue where the routine should be executed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] trans Indicates the form of the linear equation. /// \param [in] n The order of the matrix A and the number of rows in matrix B. /// \param [in] nrhs The number of right hand sides. /// \param [in] a_type The data type of the matrix A. /// \param [in] a The input matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] ipiv The pivot indices. /// \param [in] b_type The data type of the matrix B. /// \param [in, out] b The matrix B, whose columns are the right-hand sides /// for the systems of equations. /// \param [in] ldb The leading dimension of the matrix B. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. inline int getrs(sycl::queue &q, oneapi::mkl::transpose trans, std::int64_t n, std::int64_t nrhs, library_data_t a_type, void *a, std::int64_t lda, std::int64_t *ipiv, library_data_t b_type, void *b, std::int64_t ldb, int *info) { return detail::lapack_shim<detail::getrs_impl>( q, a_type, info, "getrs_scratchpad_size/getrs", q, trans, n, nrhs, a_type, a, lda, ipiv, b_type, b, ldb, info); } /// Computes the size of workspace memory of geqrf function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] m The number of rows in the matrix A. /// \param [in] n The number of columns in the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [out] device_ws_size The device workspace size in bytes. /// \param [out] host_ws_size The host workspace size in bytes. Currently the /// value is always zero. inline int geqrf_scratchpad_size(sycl::queue &q, std::int64_t m, std::int64_t n, library_data_t a_type, std::int64_t lda, std::size_t *device_ws_size, std::size_t *host_ws_size = nullptr) { if (host_ws_size) *host_ws_size = 0; std::size_t device_ws_size_tmp; int ret = detail::lapack_shim<detail::geqrf_scratchpad_size_impl>( q, a_type, nullptr, "geqrf_scratchpad_size", q, m, n, a_type, lda, device_ws_size_tmp); *device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type); return ret; } /// Computes the QR factorization of a general m-by-n matrix. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q The queue where the routine should be executed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] m The number of rows in the matrix A. /// \param [in] n The number of columns in the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in, out] a The input matrix A. Overwritten by the factorization data. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] tau_type The data type of the array tau. /// \param [in] tau The array contains scalars that define elementary reflectors /// for the matrix Q in its decomposition in a product of elementary reflectors. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The workspace size in bytes. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. inline int geqrf(sycl::queue &q, std::int64_t m, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, library_data_t tau_type, void *tau, void *device_ws, std::size_t device_ws_size, int *info) { std::size_t device_ws_size_in_element_number = detail::byte_to_element_number(device_ws_size, a_type); return detail::lapack_shim<detail::geqrf_impl>( q, a_type, info, "geqrf", q, m, n, a_type, a, lda, tau_type, tau, device_ws, device_ws_size_in_element_number, info); } /// Computes the size of workspace memory of gesvd function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobu Must be 'A' (representing jobsvd::vectors), 'S' /// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N' /// (representing jobsvd::novec). /// \param [in] jobvt Must be 'A' (representing jobsvd::vectors), 'S' /// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N' /// (representing jobsvd::novec). /// \param [in] m The number of rows in the matrix A. /// \param [in] n The number of columns in the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] u_type The data type of the matrix U. /// \param [in] ldu The leading dimension of the matrix U. /// \param [in] vt_type The data type of the matrix VT. /// \param [in] ldvt The leading dimension of the matrix VT. /// \param [out] device_ws_size The device workspace size in bytes. /// \param [out] host_ws_size The host workspace size in bytes. Currently the /// value is always zero. inline int gesvd_scratchpad_size(sycl::queue &q, signed char jobu, signed char jobvt, std::int64_t m, std::int64_t n, library_data_t a_type, std::int64_t lda, library_data_t u_type, std::int64_t ldu, library_data_t vt_type, std::int64_t ldvt, std::size_t *device_ws_size, std::size_t *host_ws_size = nullptr) { oneapi::mkl::jobsvd jobu_enum = detail::char2jobsvd(jobu); oneapi::mkl::jobsvd jobvt_enum = detail::char2jobsvd(jobvt); if (host_ws_size) *host_ws_size = 0; std::size_t device_ws_size_tmp; int ret = detail::lapack_shim<detail::gesvd_scratchpad_size_impl>( q, a_type, nullptr, "gesvd_scratchpad_size", q, jobu_enum, jobvt_enum, m, n, a_type, lda, u_type, ldu, vt_type, ldvt, device_ws_size_tmp); *device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type); return ret; } /// Computes the size of workspace memory of gesvd function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::vec or job::novec /// \param [in] all_vec Only have effects when \param jobz is job::vec.If the /// value is zero, all m columns of U are returned in the matrix U, otherwise /// the first min( \param m, \param n ) columns of U (the left singular vectors) /// are returned in the matrix U. /// \param [in] m The number of rows in the matrix A. /// \param [in] n The number of columns in the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] u_type The data type of the matrix U. /// \param [in] ldu The leading dimension of the matrix U. /// \param [in] vt_type The data type of the matrix VT. /// \param [in] ldvt The leading dimension of the matrix VT. /// \param [out] device_ws_size The device workspace size as a number of /// elements of type \param a_type. /// \param [out] host_ws_size The host workspace size as a number of elements /// of type \param a_type. Currently the value is always zero. inline int gesvd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz, std::int64_t all_vec, std::int64_t m, std::int64_t n, library_data_t a_type, std::int64_t lda, library_data_t u_type, std::int64_t ldu, library_data_t vt_type, std::int64_t ldvt, int *device_ws_size, std::size_t *host_ws_size = nullptr) { if (host_ws_size) *host_ws_size = 0; oneapi::mkl::jobsvd jobu; oneapi::mkl::jobsvd jobvt; if (jobz == oneapi::mkl::job::vec) { if (all_vec) { jobu = jobvt = oneapi::mkl::jobsvd::somevec; } else { jobu = jobvt = oneapi::mkl::jobsvd::vectors; } } else if (jobz == oneapi::mkl::job::novec) { jobu = jobvt = oneapi::mkl::jobsvd::novec; } else { throw std::runtime_error("the job type is unsupported"); } std::size_t device_ws_size_64; int ret = detail::lapack_shim<detail::gesvd_scratchpad_size_impl>( q, a_type, nullptr, "gesvd_scratchpad_size", q, jobu, jobvt, m, n, a_type, lda, u_type, ldu, vt_type, ldvt, device_ws_size_64); *device_ws_size = device_ws_size_64; return ret; } /// Computes the size of workspace memory of gesvd function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobu Must be 'A' (representing jobsvd::vectors), 'S' /// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N' /// (representing jobsvd::novec). /// \param [in] jobvt Must be 'A' (representing jobsvd::vectors), 'S' /// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N' /// (representing jobsvd::novec). /// \param [in] m The number of rows in the matrix A. /// \param [in] n The number of columns in the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in, out] a The input matrix A and it will be overwritten according /// to \p jobu and \p jobvt. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] s_type The data type of the matrix S. /// \param [out] s The output matrix S. /// \param [in] u_type The data type of the matrix U. /// \param [out] u The output matrix U. /// \param [in] ldu The leading dimension of the matrix U. /// \param [in] vt_type The data type of the matrix VT. /// \param [out] vt The output matrix VT. /// \param [in] ldvt The leading dimension of the matrix VT. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The workspace size in bytes. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. inline int gesvd(sycl::queue &q, signed char jobu, signed char jobvt, std::int64_t m, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, library_data_t s_type, void *s, library_data_t u_type, void *u, std::int64_t ldu, library_data_t vt_type, void *vt, std::int64_t ldvt, void *device_ws, std::size_t device_ws_size, int *info) { oneapi::mkl::jobsvd jobu_enum = detail::char2jobsvd(jobu); oneapi::mkl::jobsvd jobvt_enum = detail::char2jobsvd(jobvt); std::size_t device_ws_size_in_element_number = detail::byte_to_element_number(device_ws_size, a_type); return detail::lapack_shim<detail::gesvd_impl>( q, a_type, info, "gesvd", q, jobu_enum, jobvt_enum, m, n, a_type, a, lda, s_type, s, u_type, u, ldu, vt_type, vt, ldvt, device_ws, device_ws_size_in_element_number, info); } /// Computes the size of workspace memory of gesvd function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::vec or job::novec. /// \param [in] all_vec Only have effects when \param jobz is job::vec.If the /// value is zero, all m columns of U are returned in the matrix U, otherwise /// the first min( \param m, \param n ) columns of U (the left singular vectors) /// are returned in the matrix U. /// \param [in] m The number of rows in the matrix A. /// \param [in] n The number of columns in the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in, out] a The input matrix A and it will be overwritten according /// to \p jobu and \p jobvt. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] s_type The data type of the matrix S. /// \param [out] s The output matrix S. /// \param [in] u_type The data type of the matrix U. /// \param [out] u The output matrix U. /// \param [in] ldu The leading dimension of the matrix U. /// \param [in] vt_type The data type of the matrix VT. /// \param [out] vt The output matrix VT. /// \param [in] ldvt The leading dimension of the matrix VT. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The device workspace size as a number of /// elements of type \param a_type. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. inline int gesvd(sycl::queue &q, oneapi::mkl::job jobz, std::int64_t all_vec, std::int64_t m, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, library_data_t s_type, void *s, library_data_t u_type, void *u, std::int64_t ldu, library_data_t vt_type, void *vt, std::int64_t ldvt, void *device_ws, std::size_t device_ws_size, int *info) { oneapi::mkl::jobsvd jobu; oneapi::mkl::jobsvd jobvt; if (jobz == oneapi::mkl::job::vec) { if (all_vec) { jobu = jobvt = oneapi::mkl::jobsvd::somevec; } else { jobu = jobvt = oneapi::mkl::jobsvd::vectors; } } else if (jobz == oneapi::mkl::job::novec) { jobu = jobvt = oneapi::mkl::jobsvd::novec; } else { throw std::runtime_error("the job type is unsupported"); } detail::lapack_shim<detail::gesvd_conj_impl>( q, a_type, info, "gesvd", q, jobu, jobvt, m, n, a_type, a, lda, s_type, s, u_type, u, ldu, vt_type, vt, ldvt, device_ws, device_ws_size, info); return 0; } /// Computes the size of workspace memory of potrf function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The number of columns in the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [out] device_ws_size The device workspace size in bytes. /// \param [out] host_ws_size The host workspace size in bytes. Currently the /// value is always zero. inline int potrf_scratchpad_size(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n, library_data_t a_type, std::int64_t lda, std::size_t *device_ws_size, std::size_t *host_ws_size = nullptr) { if (host_ws_size) *host_ws_size = 0; std::size_t device_ws_size_tmp; int ret = detail::lapack_shim<detail::potrf_scratchpad_size_impl>( q, a_type, nullptr, "potrf_scratchpad_size", q, uplo, n, a_type, lda, device_ws_size_tmp); *device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type); return ret; } /// Computes the Cholesky factorization of a symmetric (Hermitian) /// positive-definite matrix. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q The queue where the routine should be executed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The number of columns in the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in, out] a The input matrix A. Overwritten by the Cholesky factor U /// or L, as specified by \p uplo. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The workspace size in bytes. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. inline int potrf(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, void *device_ws, std::size_t device_ws_size, int *info) { std::size_t device_ws_size_in_element_number = detail::byte_to_element_number(device_ws_size, a_type); return detail::lapack_shim<detail::potrf_impl>( q, a_type, info, "potrf", q, uplo, n, a_type, a, lda, device_ws, device_ws_size_in_element_number, info); } /// Solves a system of linear equations with a Cholesky-factored symmetric /// (Hermitian) positive-definite coefficient matrix. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q The queue where the routine should be executed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A and the number of rows in matrix B. /// \param [in] nrhs The number of right hand sides. /// \param [in] a_type The data type of the matrix A. /// \param [in, out] a The input matrix A. Overwritten by the Cholesky factor U /// or L, as specified by \p uplo. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] b_type The data type of the matrix B. /// \param [in, out] b The matrix B, whose columns are the right-hand sides /// for the systems of equations. /// \param [in] ldb The leading dimension of the matrix B. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. inline int potrs(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t nrhs, library_data_t a_type, void *a, std::int64_t lda, library_data_t b_type, void *b, std::int64_t ldb, int *info) { return detail::lapack_shim<detail::potrs_impl>( q, a_type, info, "potrs_scratchpad_size/potrs", q, uplo, n, nrhs, a_type, a, lda, b_type, b, ldb, info); } /// Computes the size of workspace memory of syevx/heevx function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] range Must be rangev::all, rangev::values or uplo::indices. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] vl If range == rangev::values, the lower bound of the interval /// to be searched for eigenvalues /// \param [in] vu If range == rangev::values, the upper bound of the interval /// to be searched for eigenvalues /// \param [in] il If range == rangev::indices, the indices of the smallest /// eigenvalue to be returned. /// \param [in] iu If range == rangev::indices, the indices of the largest /// eigenvalue to be returned. /// \param [in] w_type The data type of the eigenvalues. /// \param [out] device_ws_size The device workspace size in bytes. /// \param [out] host_ws_size The host workspace size in bytes. Currently the /// value is always zero. inline int syheevx_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, std::int64_t n, library_data_t a_type, std::int64_t lda, void *vl, void *vu, std::int64_t il, std::int64_t iu, library_data_t w_type, std::size_t *device_ws_size, std::size_t *host_ws_size = nullptr) { if (host_ws_size) *host_ws_size = 0; oneapi::mkl::compz compz_jobz = detail::job2compz(jobz); std::size_t device_ws_size_tmp; int ret = detail::lapack_shim<detail::syheevx_scratchpad_size_impl>( q, a_type, nullptr, "syevx_scratchpad_size/heevx_scratchpad_size", q, compz_jobz, range, uplo, n, lda, vl, vu, il, iu, device_ws_size_tmp); *device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type); return ret; } /// Computes selected eigenvalues and, optionally, eigenvectors of a /// symmetric/Hermitian matrix. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] range Must be rangev::all, rangev::values or uplo::indices. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in, out] a The input matrix A. On exit, the lower or upper triangle is /// overwritten. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] vl If range == rangev::values, the lower bound of the interval /// to be searched for eigenvalues /// \param [in] vu If range == rangev::values, the upper bound of the interval /// to be searched for eigenvalues /// \param [in] il If range == rangev::indices, the indices of the smallest /// eigenvalue to be returned. /// \param [in] iu If range == rangev::indices, the indices of the largest /// eigenvalue to be returned. /// \param [out] m The total number of eigenvalues found. /// \param [in] w_type The data type of the eigenvalues. /// \param [out] w The eigenvalues of the matrix A in ascending order. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The workspace size in bytes. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. inline int syheevx(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, void *vl, void *vu, std::int64_t il, std::int64_t iu, std::int64_t *m, library_data_t w_type, void *w, void *device_ws, std::size_t device_ws_size, int *info) { std::size_t device_ws_size_in_element_number = detail::byte_to_element_number(device_ws_size, a_type); oneapi::mkl::compz compz_jobz = detail::job2compz(jobz); int ret = detail::lapack_shim<detail::syheevx_impl>( q, a_type, info, "syevx/heevx", q, compz_jobz, range, uplo, n, a_type, a, lda, vl, vu, il, iu, m, w_type, w, device_ws, device_ws_size_in_element_number, info); q.wait(); return ret; } /// Computes the size of workspace memory of syevx/heevx function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] range Must be rangev::all, rangev::values or uplo::indices. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] vl If range == rangev::values, the lower bound of the interval /// to be searched for eigenvalues /// \param [in] vu If range == rangev::values, the upper bound of the interval /// to be searched for eigenvalues /// \param [in] il If range == rangev::indices, the indices of the smallest /// eigenvalue to be returned. /// \param [in] iu If range == rangev::indices, the indices of the largest /// eigenvalue to be returned. /// \param [out] device_ws_size The device workspace size as a number of /// elements of type \tparam T. template <typename T, typename ValueT> inline int syheevx_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, int n, int lda, ValueT vl, ValueT vu, int il, int iu, int *device_ws_size) { oneapi::mkl::compz compz_jobz = detail::job2compz(jobz); std::size_t device_ws_size_tmp; int ret = detail::lapack_shim<detail::syheevx_scratchpad_size_impl>( q, detail::get_library_data_t_from_type<T>(), nullptr, "syevx_scratchpad_size/heevx_scratchpad_size", q, compz_jobz, range, uplo, n, lda, &vl, &vu, il, iu, device_ws_size_tmp); *device_ws_size = (int)device_ws_size_tmp; return ret; } /// Computes selected eigenvalues and, optionally, eigenvectors of a /// symmetric/Hermitian matrix. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] range Must be rangev::all, rangev::values or uplo::indices. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in, out] a The input matrix A. On exit, the lower or upper triangle is /// overwritten. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] vl If range == rangev::values, the lower bound of the interval /// to be searched for eigenvalues /// \param [in] vu If range == rangev::values, the upper bound of the interval /// to be searched for eigenvalues /// \param [in] il If range == rangev::indices, the indices of the smallest /// eigenvalue to be returned. /// \param [in] iu If range == rangev::indices, the indices of the largest /// eigenvalue to be returned. /// \param [out] m The total number of eigenvalues found. /// \param [out] w The eigenvalues of the matrix A in ascending order. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The device workspace size as a number of /// elements of type \tparam T. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. template <typename T, typename ValueT> inline int syheevx(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, int n, T *a, int lda, ValueT vl, ValueT vu, int il, int iu, int *m, ValueT *w, T *device_ws, int device_ws_size, int *info) { oneapi::mkl::compz compz_jobz = detail::job2compz(jobz); std::int64_t m64; int ret = detail::lapack_shim<detail::syheevx_impl>( q, detail::get_library_data_t_from_type<T>(), info, "syevx/heevx", q, compz_jobz, range, uplo, n, detail::get_library_data_t_from_type<T>(), a, lda, &vl, &vu, il, iu, &m64, detail::get_library_data_t_from_type<ValueT>(), w, device_ws, device_ws_size, info); q.wait(); *m = (int)m64; return ret; } /// Computes the size of workspace memory of sygvx/hegvx function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] itype Must be 1, 2 or 3. /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] range Must be rangev::all, rangev::values or uplo::indices. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] ldb The leading dimension of the matrix B. /// \param [in] vl If range == rangev::values, the lower bound of the interval /// to be searched for eigenvalues /// \param [in] vu If range == rangev::values, the upper bound of the interval /// to be searched for eigenvalues /// \param [in] il If range == rangev::indices, the indices of the smallest /// eigenvalue to be returned. /// \param [in] iu If range == rangev::indices, the indices of the largest /// eigenvalue to be returned. /// \param [in] device_ws_size The device workspace size as a number of /// elements of type \tparam T. template <typename T, typename ValueT> inline int syhegvx_scratchpad_size(sycl::queue &q, int itype, oneapi::mkl::job jobz, oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, int n, int lda, int ldb, ValueT vl, ValueT vu, int il, int iu, int *device_ws_size) { oneapi::mkl::compz compz_jobz = detail::job2compz(jobz); std::size_t device_ws_size_tmp; int ret = detail::lapack_shim<detail::syhegvx_scratchpad_size_impl>( q, detail::get_library_data_t_from_type<T>(), nullptr, "sygvx_scratchpad_size/hegvx_scratchpad_size", q, itype, compz_jobz, range, uplo, n, lda, ldb, &vl, &vu, il, iu, device_ws_size_tmp); *device_ws_size = (int)device_ws_size_tmp; return ret; } /// Computes selected eigenvalues and, optionally, eigenvectors of a real /// generalized symmetric/Hermitian definite eigenproblem. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] itype Must be 1, 2 or 3. /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] range Must be rangev::all, rangev::values or uplo::indices. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in, out] a The input matrix A. On exit, the lower or upper triangle is /// overwritten. /// \param [in] lda The leading dimension of the matrix A. /// \param [in, out] b The input matrix B. /// \param [in] ldb The leading dimension of the matrix B. /// \param [in] vl If range == rangev::values, the lower bound of the interval /// to be searched for eigenvalues /// \param [in] vu If range == rangev::values, the upper bound of the interval /// to be searched for eigenvalues /// \param [in] il If range == rangev::indices, the indices of the smallest /// eigenvalue to be returned. /// \param [in] iu If range == rangev::indices, the indices of the largest /// eigenvalue to be returned. /// \param [out] m The total number of eigenvalues found. /// \param [out] w The eigenvalues of the matrix A in ascending order. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The device workspace size as a number of /// elements of type \tparam T. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. template <typename T, typename ValueT> inline int syhegvx(sycl::queue &q, int itype, oneapi::mkl::job jobz, oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb, ValueT vl, ValueT vu, int il, int iu, int *m, ValueT *w, T *device_ws, int device_ws_size, int *info) { oneapi::mkl::compz compz_jobz = detail::job2compz(jobz); std::int64_t m64; int ret = detail::lapack_shim<detail::syhegvx_impl>( q, detail::get_library_data_t_from_type<T>(), info, "sygvx/hegvx", q, itype, compz_jobz, range, uplo, n, a, lda, b, ldb, &vl, &vu, il, iu, &m64, w, device_ws, device_ws_size, info); q.wait(); *m = (int)m64; return ret; } /// Computes the size of workspace memory of sygvd/hegvd function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] itype Must be 1, 2 or 3. /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] ldb The leading dimension of the matrix B. /// \param [in] device_ws_size The device workspace size as a number of /// elements of type \tparam T. template <typename T> inline int syhegvd_scratchpad_size(sycl::queue &q, int itype, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, int n, int lda, int ldb, int *device_ws_size) { std::size_t device_ws_size_tmp; int ret = detail::lapack_shim<detail::syhegvd_scratchpad_size_impl>( q, detail::get_library_data_t_from_type<T>(), nullptr, "sygvd_scratchpad_size/hegvd_scratchpad_size", q, itype, jobz, uplo, n, lda, ldb, device_ws_size_tmp); *device_ws_size = (int)device_ws_size_tmp; return ret; } /// Computes all eigenvalues and, optionally, eigenvectors of a real generalized /// symmetric/Hermitian definite eigenproblem using a divide and conquer method. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] itype Must be 1, 2 or 3. /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in, out] a The input matrix A. On exit, it is overwritten by eigenvectors. /// \param [in] lda The leading dimension of the matrix A. /// \param [in, out] b The input matrix B. /// \param [in] ldb The leading dimension of the matrix B. /// \param [out] w The eigenvalues of the matrix A in ascending order. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The device workspace size as a number of /// elements of type \tparam T. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. template <typename T, typename ValueT> inline int syhegvd(sycl::queue &q, int itype, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb, ValueT *w, T *device_ws, int device_ws_size, int *info) { return detail::lapack_shim<detail::syhegvd_impl>( q, detail::get_library_data_t_from_type<T>(), info, "sygvd/hegvd", q, itype, jobz, uplo, n, a, lda, b, ldb, w, device_ws, device_ws_size, info); } /// Computes the size of workspace memory of syev/heev function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [out] device_ws_size The device workspace size as a number of /// elements of type \tparam T. template <typename T> inline int syheev_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, int n, int lda, int *device_ws_size) { std::size_t device_ws_size_tmp; oneapi::mkl::compz compz_jobz = detail::job2compz(jobz); int ret = detail::lapack_shim<detail::syheev_scratchpad_size_impl>( q, detail::get_library_data_t_from_type<T>(), nullptr, "syev_scratchpad_size/heev_scratchpad_size", q, compz_jobz, uplo, n, lda, device_ws_size_tmp); *device_ws_size = (int)device_ws_size_tmp; return ret; } /// Computes all eigenvalues and, optionally, eigenvectors of a real symmetric /// or Hermitian matrix. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in, out] a The input matrix A. On exit, it is overwritten by /// eigenvectors. /// \param [in] lda The leading dimension of the matrix A. /// \param [out] w The eigenvalues of the matrix A in ascending order. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The device workspace size as a number of /// elements of type \tparam T. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. template <typename T, typename ValueT> inline int syheev(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, int n, T *a, int lda, ValueT *w, T *device_ws, int device_ws_size, int *info) { oneapi::mkl::compz compz_jobz = detail::job2compz(jobz); return detail::lapack_shim<detail::syheev_impl>( q, detail::get_library_data_t_from_type<T>(), info, "syev/heev", q, compz_jobz, uplo, n, a, lda, w, device_ws, device_ws_size, info); } /// Computes the size of workspace memory of syevd/heevd function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] w_type The data type of the eigenvalues. /// \param [out] device_ws_size The device workspace in bytes. /// \param [out] host_ws_size The host workspace size in bytes. Currently the /// value is always zero. inline int syheevd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, std::int64_t n, library_data_t a_type, std::int64_t lda, library_data_t w_type, std::size_t *device_ws_size, std::size_t *host_ws_size = nullptr) { if (host_ws_size) *host_ws_size = 0; std::size_t device_ws_size_tmp; int ret = detail::lapack_shim<detail::syheevd_scratchpad_size_impl>( q, a_type, nullptr, "syevd_scratchpad_size/heevd_scratchpad_size", q, jobz, uplo, n, a_type, lda, device_ws_size_tmp); *device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type); return ret; } /// Computes all eigenvalues and, optionally, all eigenvectors of a real /// symmetric or Hermitian matrix using divide and conquer algorithm. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in, out] a The input matrix A. On exit, it is overwritten by /// eigenvectors. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] w_type The data type of the eigenvalues. /// \param [out] w The eigenvalues of the matrix A in ascending order. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The workspace size in bytes. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. inline int syheevd(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, library_data_t w_type, void *w, void *device_ws, std::size_t device_ws_size, int *info) { std::size_t device_ws_size_in_element_number = detail::byte_to_element_number(device_ws_size, a_type); return detail::lapack_shim<detail::syheevd_impl>( q, a_type, info, "syevd/heevd", q, jobz, uplo, n, a_type, a, lda, w, device_ws, device_ws_size_in_element_number, info); } /// Computes the size of workspace memory of syevd/heevd function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] w_type The data type of the eigenvalues. /// \param [out] device_ws_size The device workspace size as a number of /// elements of type \tparam T. template <typename T> inline int syheevd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda, int *device_ws_size) { std::size_t device_ws_size_tmp; int ret = detail::lapack_shim<detail::syheevd_scratchpad_size_impl>( q, detail::get_library_data_t_from_type<T>(), nullptr, "syevd_scratchpad_size/heevd_scratchpad_size", q, jobz, uplo, n, detail::get_library_data_t_from_type<T>(), lda, device_ws_size_tmp); *device_ws_size = (int)device_ws_size_tmp; return ret; } /// Computes all eigenvalues and, optionally, all eigenvectors of a real /// symmetric or Hermitian matrix using divide and conquer algorithm. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in, out] a The input matrix A. On exit, it is overwritten by /// eigenvectors. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] w_type The data type of the eigenvalues. /// \param [out] w The eigenvalues of the matrix A in ascending order. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The workspace size as a number of /// elements of type \tparam T. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. template <typename T, typename ValueT> inline int syheevd(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, std::int64_t n, T *a, std::int64_t lda, ValueT *w, T *device_ws, int device_ws_size, int *info) { return detail::lapack_shim<detail::syheevd_impl>( q, detail::get_library_data_t_from_type<T>(), info, "syevd/heevd", q, jobz, uplo, n, detail::get_library_data_t_from_type<T>(), a, lda, w, device_ws, device_ws_size, info); } /// Computes the size of workspace memory of trtri function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] diag Must be diag::nonunit or diag::unit. /// \param [in] n The order of the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [out] device_ws_size The device workspace in bytes. /// \param [out] host_ws_size The host workspace size in bytes. Currently the /// value is always zero. inline int trtri_scratchpad_size(sycl::queue &q, oneapi::mkl::uplo uplo, oneapi::mkl::diag diag, std::int64_t n, library_data_t a_type, std::int64_t lda, std::size_t *device_ws_size, std::size_t *host_ws_size = nullptr) { if (host_ws_size) *host_ws_size = 0; std::size_t device_ws_size_tmp; int ret = detail::lapack_shim<detail::trtri_scratchpad_size_impl>( q, a_type, nullptr, "trtri_scratchpad_size", q, uplo, diag, n, a_type, lda, device_ws_size_tmp); *device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type); return ret; } /// Computes the inverse of a triangular matrix. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] diag Must be diag::nonunit or diag::unit. /// \param [in] n The order of the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in, out] a The input matrix A. On exit, it is overwritten by /// the inverse matrix of A. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The workspace size in bytes. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. inline int trtri(sycl::queue &q, oneapi::mkl::uplo uplo, oneapi::mkl::diag diag, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, void *device_ws, std::size_t device_ws_size, int *info) { #ifdef DPCT_USM_LEVEL_NONE throw std::runtime_error("this API is unsupported when USM level is none"); #else std::size_t device_ws_size_in_element_number = detail::byte_to_element_number(device_ws_size, a_type); return detail::lapack_shim<detail::trtri_impl>( q, a_type, info, "trtri", q, uplo, diag, n, a_type, a, lda, device_ws, device_ws_size_in_element_number, info); #endif } } // namespace lapack } // namespace dpct #endif // __DPCT_LAPACK_UTILS_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/fft_utils.hpp
//==---- fft_utils.hpp ----------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_FFT_UTILS_HPP__ #define __DPCT_FFT_UTILS_HPP__ #include <sycl/sycl.hpp> #include <oneapi/mkl.hpp> #include <optional> #include <utility> #include "lib_common_utils.hpp" namespace dpct { namespace fft { /// An enumeration type to describe the FFT direction is forward or backward. enum fft_direction : int { forward = 0, backward }; /// An enumeration type to describe the types of FFT input and output data. enum fft_type : int { real_float_to_complex_float = 0, complex_float_to_real_float, real_double_to_complex_double, complex_double_to_real_double, complex_float_to_complex_float, complex_double_to_complex_double, }; /// A class to perform FFT calculation. class fft_engine { public: /// Default constructor. fft_engine() {} /// Commit the configuration to calculate n-D FFT. /// \param [in] exec_queue The queue where the calculation should be executed. /// \param [in] dim Dimension number of the data. /// \param [in] n Pointer to an array containing each dimension's size. /// \param [in] inembed Pointer to an array containing each dimension's size /// of the embedded input data. /// \param [in] istride Stride size of the input data. /// \param [in] idist Distance between the two batches of the input data. /// \param [in] input_type Input data type. /// \param [in] onembed Pointer to an array containing each dimension's size /// of the embedded output data. /// \param [in] ostride Stride size of the output data. /// \param [in] odist Distance between the two batches of the output data. /// \param [in] output_type Output data type. /// \param [in] batch The number of FFT operations to perform. /// \param [out] scratchpad_size The workspace size required for this FFT. /// If this value is used to allocate memory, \p direction_and_placement need /// to be specified explicitly to get correct result. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If this value is specified, the direction parameter /// will be ignored in the fft_engine::compute function. If it is not set, /// forward direction(if current FFT is complex-to-complex) and out-of-place /// (false) are set by default. void commit(sycl::queue *exec_queue, int dim, long long *n, long long *inembed, long long istride, long long idist, library_data_t input_type, long long *onembed, long long ostride, long long odist, library_data_t output_type, long long batch, size_t *scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { _q = exec_queue; init<long long>(dim, n, inembed, istride, idist, input_type, onembed, ostride, odist, output_type, batch, direction_and_placement); if (scratchpad_size) { if (_is_estimate_call) *scratchpad_size = _workspace_estimate_bytes; else *scratchpad_size = _workspace_bytes; } } /// Commit the configuration to calculate n-D FFT. /// \param [in] exec_queue The queue where the calculation should be executed. /// \param [in] dim Dimension number of the data. /// \param [in] n Pointer to an array containing each dimension's size. /// \param [in] inembed Pointer to an array containing each dimension's size /// of the embedded input data. /// \param [in] istride Stride size of the input data. /// \param [in] idist Distance between the two batches of the input data. /// \param [in] input_type Input data type. /// \param [in] onembed Pointer to an array containing each dimension's size /// of the embedded output data. /// \param [in] ostride Stride size of the output data. /// \param [in] odist Distance between the two batches of the output data. /// \param [in] output_type Output data type. /// \param [in] batch The number of FFT operations to perform. /// \param [out] scratchpad_size The workspace size required for this FFT. /// If this value is used to allocate memory, \p direction_and_placement need /// to be specified explicitly to get correct result. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If this value is specified, the direction parameter /// will be ignored in the fft_engine::compute function. If it is not set, /// forward direction(if current FFT is complex-to-complex) and out-of-place /// (false) are set by default. void commit(sycl::queue *exec_queue, int dim, int *n, int *inembed, int istride, int idist, library_data_t input_type, int *onembed, int ostride, int odist, library_data_t output_type, int batch, size_t *scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { _q = exec_queue; init<int>(dim, n, inembed, istride, idist, input_type, onembed, ostride, odist, output_type, batch, direction_and_placement); if (scratchpad_size) { if (_is_estimate_call) *scratchpad_size = _workspace_estimate_bytes; else *scratchpad_size = _workspace_bytes; } } /// Commit the configuration to calculate n-D FFT. /// \param [in] exec_queue The queue where the calculation should be executed. /// \param [in] dim Dimension number of the data. /// \param [in] n Pointer to an array containing each dimension's size. /// \param [in] inembed Pointer to an array containing each dimension's size /// of the embedded input data. /// \param [in] istride Stride size of the input data. /// \param [in] idist Distance between the two batches of the input data. /// \param [in] onembed Pointer to an array containing each dimension's size /// of the embedded output data. /// \param [in] ostride Stride size of the output data. /// \param [in] odist Distance between the two batches of the output data. /// \param [in] type The FFT type. /// \param [in] batch The number of FFT operations to perform. /// \param [out] scratchpad_size The workspace size required for this FFT. /// If this value is used to allocate memory, \p direction_and_placement need /// to be specified explicitly to get correct result. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If this value is specified, the direction parameter /// will be ignored in the fft_engine::compute function. If it is not set, /// forward direction(if current FFT is complex-to-complex) and out-of-place /// (false) are set by default. void commit(sycl::queue *exec_queue, int dim, long long *n, long long *inembed, long long istride, long long idist, long long *onembed, long long ostride, long long odist, fft_type type, long long batch, size_t *scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { commit(exec_queue, dim, n, inembed, istride, idist, fft_type_to_data_type(type).first, onembed, ostride, odist, fft_type_to_data_type(type).second, batch, scratchpad_size, direction_and_placement); } /// Commit the configuration to calculate n-D FFT. /// \param [in] exec_queue The queue where the calculation should be executed. /// \param [in] dim Dimension number of the data. /// \param [in] n Pointer to an array containing each dimension's size. /// \param [in] inembed Pointer to an array containing each dimension's size /// of the embedded input data. /// \param [in] istride Stride size of the input data. /// \param [in] idist Distance between the two batches of the input data. /// \param [in] onembed Pointer to an array containing each dimension's size /// of the embedded output data. /// \param [in] ostride Stride size of the output data. /// \param [in] odist Distance between the two batches of the output data. /// \param [in] type The FFT type. /// \param [in] batch The number of FFT operations to perform. /// \param [out] scratchpad_size The workspace size required for this FFT. /// If this value is used to allocate memory, \p direction_and_placement need /// to be specified explicitly to get correct result. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If this value is specified, the direction parameter /// will be ignored in the fft_engine::compute function. If it is not set, /// forward direction(if current FFT is complex-to-complex) and out-of-place /// (false) are set by default. void commit(sycl::queue *exec_queue, int dim, int *n, int *inembed, int istride, int idist, int *onembed, int ostride, int odist, fft_type type, int batch, size_t *scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { commit(exec_queue, dim, n, inembed, istride, idist, fft_type_to_data_type(type).first, onembed, ostride, odist, fft_type_to_data_type(type).second, batch, scratchpad_size, direction_and_placement); } /// Commit the configuration to calculate 1-D FFT. /// \param [in] exec_queue The queue where the calculation should be executed. /// \param [in] n1 The size of the dimension of the data. /// \param [in] type The FFT type. /// \param [in] batch The number of FFT operations to perform. /// \param [out] scratchpad_size The workspace size required for this FFT. /// If this value is used to allocate memory, \p direction_and_placement need /// to be specified explicitly to get correct result. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If this value is specified, the direction parameter /// will be ignored in the fft_engine::compute function. If it is not set, /// forward direction(if current FFT is complex-to-complex) and out-of-place /// (false) are set by default. void commit(sycl::queue *exec_queue, int n1, fft_type type, int batch, size_t *scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { _q = exec_queue; _n.resize(1); _n[0] = n1; std::tie(_input_type, _output_type) = fft_type_to_data_type(type); _dim = 1; _batch = batch; _is_basic = true; if (direction_and_placement.has_value()) { _is_user_specified_dir_and_placement = true; _direction = direction_and_placement->first; _is_inplace = direction_and_placement->second; } config_and_commit_basic(); if (scratchpad_size) { if (_is_estimate_call) *scratchpad_size = _workspace_estimate_bytes; else *scratchpad_size = _workspace_bytes; } } /// Commit the configuration to calculate 2-D FFT. /// \param [in] exec_queue The queue where the calculation should be executed. /// \param [in] n2 The size of the 2nd dimension (outermost) of the data. /// \param [in] n1 The size of the 1st dimension (innermost) of the data. /// \param [in] type The FFT type. /// \param [out] scratchpad_size The workspace size required for this FFT. /// If this value is used to allocate memory, \p direction_and_placement need /// to be specified explicitly to get correct result. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If this value is specified, the direction parameter /// will be ignored in the fft_engine::compute function. If it is not set, /// forward direction(if current FFT is complex-to-complex) and out-of-place /// (false) are set by default. void commit(sycl::queue *exec_queue, int n2, int n1, fft_type type, size_t *scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { _q = exec_queue; _n.resize(2); _n[0] = n2; _n[1] = n1; std::tie(_input_type, _output_type) = fft_type_to_data_type(type); _dim = 2; _is_basic = true; if (direction_and_placement.has_value()) { _is_user_specified_dir_and_placement = true; _direction = direction_and_placement->first; _is_inplace = direction_and_placement->second; } config_and_commit_basic(); if (scratchpad_size) { if (_is_estimate_call) *scratchpad_size = _workspace_estimate_bytes; else *scratchpad_size = _workspace_bytes; } } /// Commit the configuration to calculate 3-D FFT. /// \param [in] exec_queue The queue where the calculation should be executed. /// \param [in] n3 The size of the 3rd dimension (outermost) of the data. /// \param [in] n2 The size of the 2nd dimension of the data. /// \param [in] n1 The size of the 1st dimension (innermost) of the data. /// \param [in] type The FFT type. /// \param [out] scratchpad_size The workspace size required for this FFT. /// If this value is used to allocate memory, \p direction_and_placement need /// to be specified explicitly to get correct result. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If this value is specified, the direction parameter /// will be ignored in the fft_engine::compute function. If it is not set, /// forward direction(if current FFT is complex-to-complex) and out-of-place /// (false) are set by default. void commit(sycl::queue *exec_queue, int n3, int n2, int n1, fft_type type, size_t *scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { _q = exec_queue; _n.resize(3); _n[0] = n3; _n[1] = n2; _n[2] = n1; std::tie(_input_type, _output_type) = fft_type_to_data_type(type); _dim = 3; _is_basic = true; if (direction_and_placement.has_value()) { _is_user_specified_dir_and_placement = true; _direction = direction_and_placement->first; _is_inplace = direction_and_placement->second; } config_and_commit_basic(); if (scratchpad_size) { if (_is_estimate_call) *scratchpad_size = _workspace_estimate_bytes; else *scratchpad_size = _workspace_bytes; } } /// Create the class for calculate 1-D FFT. /// \param [in] exec_queue The queue where the calculation should be executed. /// \param [in] n1 The size of the dimension of the data. /// \param [in] type The FFT type. /// \param [in] batch The number of FFT operations to perform. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If this value is specified, the direction parameter /// will be ignored in the fft_engine::compute function. If it is not set, /// forward direction(if current FFT is complex-to-complex) and out-of-place /// (false) are set by default. static fft_engine * create(sycl::queue *exec_queue, int n1, fft_type type, int batch, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { fft_engine *engine = new fft_engine(); engine->commit(exec_queue, n1, type, batch, nullptr, direction_and_placement); return engine; } /// Create the class for calculate 2-D FFT. /// \param [in] exec_queue The queue where the calculation should be executed. /// \param [in] n2 The size of the 2nd dimension (outermost) of the data. /// \param [in] n1 The size of the 1st dimension (innermost) of the data. /// \param [in] type The FFT type. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If this value is specified, the direction parameter /// will be ignored in the fft_engine::compute function. If it is not set, /// forward direction(if current FFT is complex-to-complex) and out-of-place /// (false) are set by default. static fft_engine * create(sycl::queue *exec_queue, int n2, int n1, fft_type type, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { fft_engine *engine = new fft_engine(); engine->commit(exec_queue, n2, n1, type, nullptr, direction_and_placement); return engine; } /// Create the class for calculate 3-D FFT. /// \param [in] exec_queue The queue where the calculation should be executed. /// \param [in] n3 The size of the 3rd dimension (outermost) of the data. /// \param [in] n2 The size of the 2nd dimension of the data. /// \param [in] n1 The size of the 1st dimension (innermost) of the data. /// \param [in] type The FFT type. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If this value is specified, the direction parameter /// will be ignored in the fft_engine::compute function. If it is not set, /// forward direction(if current FFT is complex-to-complex) and out-of-place /// (false) are set by default. static fft_engine * create(sycl::queue *exec_queue, int n3, int n2, int n1, fft_type type, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { fft_engine *engine = new fft_engine(); engine->commit(exec_queue, n3, n2, n1, type, nullptr, direction_and_placement); return engine; } /// Create the class for calculate n-D FFT. /// \param [in] exec_queue The queue where the calculation should be executed. /// \param [in] dim Dimension number of the data. /// \param [in] n Pointer to an array containing each dimension's size. /// \param [in] inembed Pointer to an array containing each dimension's size /// of the embedded input data. /// \param [in] istride Stride size of the input data. /// \param [in] idist Distance between the two batches of the input data. /// \param [in] onembed Pointer to an array containing each dimension's size /// of the embedded output data. /// \param [in] ostride Stride size of the output data. /// \param [in] odist Distance between the two batches of the output data. /// \param [in] type The FFT type. /// \param [in] batch The number of FFT operations to perform. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If this value is specified, the direction parameter /// will be ignored in the fft_engine::compute function. If it is not set, /// forward direction(if current FFT is complex-to-complex) and out-of-place /// (false) are set by default. static fft_engine * create(sycl::queue *exec_queue, int dim, int *n, int *inembed, int istride, int idist, int *onembed, int ostride, int odist, fft_type type, int batch, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { fft_engine *engine = new fft_engine(); engine->commit(exec_queue, dim, n, inembed, istride, idist, onembed, ostride, odist, type, batch, nullptr, direction_and_placement); return engine; } /// Create the class for calculate FFT without commit any config. static fft_engine *create() { fft_engine *engine = new fft_engine(); return engine; } /// Destroy the class for calculate FFT. /// \param [in] engine Pointer returned from fft_engine::craete. static void destroy(fft_engine *engine) { delete engine; } #ifdef __INTEL_MKL__ /// Estimates the workspace size for calculating n-D FFT. /// \param [in] dim Dimension number of the data. /// \param [in] n Pointer to an array containing each dimension's size. /// \param [in] inembed Pointer to an array containing each dimension's size /// of the embedded input data. /// \param [in] istride Stride size of the input data. /// \param [in] idist Distance between the two batches of the input data. /// \param [in] onembed Pointer to an array containing each dimension's size /// of the embedded output data. /// \param [in] ostride Stride size of the output data. /// \param [in] odist Distance between the two batches of the output data. /// \param [in] type The FFT type. /// \param [in] batch The number of FFT operations to perform. /// \param [out] estimated_scratchpad_size The estimated workspace size /// required for this FFT. If this value is used to allocate memory, /// \p direction_and_placement need to be specified explicitly to get correct /// result. /// \param [in] direction_and_placement Explicitly specify the FFT /// direction and placement info. If it is not set, forward direction(if /// current FFT is complex-to-complex) and out-of-place (false) are set by default. static void estimate_size(int dim, long long *n, long long *inembed, long long istride, long long idist, long long *onembed, long long ostride, long long odist, fft_type type, long long batch, size_t *estimated_scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { fft_engine *engine = fft_engine::create(); engine->_is_estimate_call = true; engine->commit(&dpct::get_default_queue(), dim, n, inembed, istride, idist, fft_type_to_data_type(type).first, onembed, ostride, odist, fft_type_to_data_type(type).second, batch, estimated_scratchpad_size, direction_and_placement); fft_engine::destroy(engine); } /// Estimates the workspace size for calculating n-D FFT. /// \param [in] dim Dimension number of the data. /// \param [in] n Pointer to an array containing each dimension's size. /// \param [in] inembed Pointer to an array containing each dimension's size /// of the embedded input data. /// \param [in] istride Stride size of the input data. /// \param [in] idist Distance between the two batches of the input data. /// \param [in] onembed Pointer to an array containing each dimension's size /// of the embedded output data. /// \param [in] ostride Stride size of the output data. /// \param [in] odist Distance between the two batches of the output data. /// \param [in] type The FFT type. /// \param [in] batch The number of FFT operations to perform. /// \param [out] estimated_scratchpad_size The estimated workspace size /// required for this FFT. If this value is used to allocate memory, /// \p direction_and_placement need to be specified explicitly to get correct /// result. /// \param [in] direction_and_placement Explicitly specify the FFT /// direction and placement info. If it is not set, forward direction(if /// current FFT is complex-to-complex) and out-of-place (false) are set by default. static void estimate_size(int dim, int *n, int *inembed, int istride, int idist, int *onembed, int ostride, int odist, fft_type type, int batch, size_t *estimated_scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { fft_engine *engine = fft_engine::create(); engine->_is_estimate_call = true; engine->commit(&dpct::get_default_queue(), dim, n, inembed, istride, idist, fft_type_to_data_type(type).first, onembed, ostride, odist, fft_type_to_data_type(type).second, batch, estimated_scratchpad_size, direction_and_placement); fft_engine::destroy(engine); } /// Estimates the workspace size for calculating 1-D FFT. /// \param [in] n1 The size of the dimension of the data. /// \param [in] type The FFT type. /// \param [in] batch The number of FFT operations to perform. /// \param [out] estimated_scratchpad_size The estimated workspace size /// required for this FFT. If this value is used to allocate memory, /// \p direction_and_placement need to be specified explicitly to get correct /// result. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If it is not set, forward direction(if current FFT is /// complex-to-complex) and out-of-place (false) are set by default. static void estimate_size(int n1, fft_type type, int batch, size_t *estimated_scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { fft_engine *engine = fft_engine::create(); engine->_is_estimate_call = true; engine->commit(&dpct::get_default_queue(), n1, type, batch, estimated_scratchpad_size, direction_and_placement); fft_engine::destroy(engine); } /// Estimates the workspace size for calculating 2-D FFT. /// \param [in] n2 The size of the 2nd dimension (outermost) of the data. /// \param [in] n1 The size of the 1st dimension (innermost) of the data. /// \param [in] type The FFT type. /// \param [out] estimated_scratchpad_size The estimated workspace size /// required for this FFT. If this value is used to allocate memory, /// \p direction_and_placement need to be specified explicitly to get correct /// result. /// \param [in] direction_and_placement Explicitly specify the FFT /// direction and placement info. If it is not set, forward direction(if /// current FFT is complex-to-complex) and out-of-place (false) are set by default. static void estimate_size(int n2, int n1, fft_type type, size_t *estimated_scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { fft_engine *engine = fft_engine::create(); engine->_is_estimate_call = true; engine->commit(&dpct::get_default_queue(), n2, n1, type, estimated_scratchpad_size, direction_and_placement); fft_engine::destroy(engine); } /// Estimates the workspace size for calculating 3-D FFT. /// \param [in] n3 The size of the 3rd dimension (outermost) of the data. /// \param [in] n2 The size of the 2nd dimension of the data. /// \param [in] n1 The size of the 1st dimension (innermost) of the data. /// \param [in] type The FFT type. /// \param [out] estimated_scratchpad_size The estimated workspace size /// required for this FFT. If this value is used to allocate memory, /// \p direction_and_placement need to be specified explicitly to get correct /// result. /// \param [in] direction_and_placement Explicitly specify the FFT /// direction and placement info. If it is not set, forward direction(if /// current FFT is complex-to-complex) and out-of-place (false) are set by default. static void estimate_size(int n3, int n2, int n1, fft_type type, size_t *estimated_scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { fft_engine *engine = fft_engine::create(); engine->_is_estimate_call = true; engine->commit(&dpct::get_default_queue(), n3, n2, n1, type, estimated_scratchpad_size, direction_and_placement); fft_engine::destroy(engine); } #endif /// Execute the FFT calculation. /// \param [in] input Pointer to the input data. /// \param [out] output Pointer to the output data. /// \param [in] direction The FFT direction. template <typename input_t, typename output_t> void compute(input_t *input, output_t *output, fft_direction direction) { if (_input_type == library_data_t::complex_float && _output_type == library_data_t::complex_float) { compute_complex<float, oneapi::mkl::dft::precision::SINGLE>( (float *)input, (float *)output, direction); } else if (_input_type == library_data_t::complex_double && _output_type == library_data_t::complex_double) { compute_complex<double, oneapi::mkl::dft::precision::DOUBLE>( (double *)input, (double *)output, direction); } else if (_input_type == library_data_t::real_float && _output_type == library_data_t::complex_float) { _direction = direction; compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input, (float *)output); } else if (_input_type == library_data_t::complex_float && _output_type == library_data_t::real_float) { _direction = direction; compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input, (float *)output); } else if (_input_type == library_data_t::real_double && _output_type == library_data_t::complex_double) { _direction = direction; compute_real<double, oneapi::mkl::dft::precision::DOUBLE>( (double *)input, (double *)output); } else if (_input_type == library_data_t::complex_double && _output_type == library_data_t::real_double) { _direction = direction; compute_real<double, oneapi::mkl::dft::precision::DOUBLE>( (double *)input, (double *)output); } } template <> void compute(float *input, sycl::float2 *output, fft_direction direction) { _direction = direction; compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input, (float *)output); } template <> void compute(sycl::float2 *input, float *output, fft_direction direction) { _direction = direction; compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input, (float *)output); } template <> void compute(double *input, sycl::double2 *output, fft_direction direction) { _direction = direction; compute_real<double, oneapi::mkl::dft::precision::DOUBLE>((double *)input, (double *)output); } template <> void compute(sycl::double2 *input, double *output, fft_direction direction) { _direction = direction; compute_real<double, oneapi::mkl::dft::precision::DOUBLE>((double *)input, (double *)output); } template <> void compute(sycl::float2 *input, sycl::float2 *output, fft_direction direction) { compute_complex<float, oneapi::mkl::dft::precision::SINGLE>( (float *)input, (float *)output, direction); } template <> void compute(sycl::double2 *input, sycl::double2 *output, fft_direction direction) { compute_complex<double, oneapi::mkl::dft::precision::DOUBLE>( (double *)input, (double *)output, direction); } /// Setting the user's SYCL queue for calculation. /// \param [in] q Pointer to the SYCL queue. void set_queue(sycl::queue *q) { _q = q; } #ifdef __INTEL_MKL__ /// Setting whether to use external or internal workspace. /// \param [in] flag True means using internal workspace. False means using /// external workspace. void use_internal_workspace(bool flag = true) { _use_external_workspace = !flag; } /// Specify the external workspace. /// \param [in] ptr Pointer to the workspace. void set_workspace(void *ptr) { if (!_use_external_workspace) { return; } if (_input_type == library_data_t::complex_float && _output_type == library_data_t::complex_float) { if (_q->get_device().is_gpu()) { auto data = dpct::detail::get_memory(reinterpret_cast<float *>(ptr)); _desc_sc->set_workspace(data); } } else if (_input_type == library_data_t::complex_double && _output_type == library_data_t::complex_double) { if (_q->get_device().is_gpu()) { auto data = dpct::detail::get_memory(reinterpret_cast<double *>(ptr)); _desc_dc->set_workspace(data); } } else if ((_input_type == library_data_t::real_float && _output_type == library_data_t::complex_float) || (_input_type == library_data_t::complex_float && _output_type == library_data_t::real_float)) { if (_q->get_device().is_gpu()) { auto data = dpct::detail::get_memory(reinterpret_cast<float *>(ptr)); _desc_sr->set_workspace(data); } } else if ((_input_type == library_data_t::real_double && _output_type == library_data_t::complex_double) || (_input_type == library_data_t::complex_double && _output_type == library_data_t::real_double)) { if (_q->get_device().is_gpu()) { auto data = dpct::detail::get_memory(reinterpret_cast<double *>(ptr)); _desc_dr->set_workspace(data); } } else { throw sycl::exception(sycl::make_error_code(sycl::errc::invalid), "invalid fft type"); } } #endif /// Get the workspace size. /// \param [out] scratchpad_size Workspace size in bytes. void get_workspace_size(size_t *scratchpad_size) { if (scratchpad_size) { *scratchpad_size = _workspace_bytes; } } private: static std::pair<library_data_t, library_data_t> fft_type_to_data_type(fft_type type) { switch (type) { case fft_type::real_float_to_complex_float: { return std::make_pair(library_data_t::real_float, library_data_t::complex_float); } case fft_type::complex_float_to_real_float: { return std::make_pair(library_data_t::complex_float, library_data_t::real_float); } case fft_type::real_double_to_complex_double: { return std::make_pair(library_data_t::real_double, library_data_t::complex_double); } case fft_type::complex_double_to_real_double: { return std::make_pair(library_data_t::complex_double, library_data_t::real_double); } case fft_type::complex_float_to_complex_float: { return std::make_pair(library_data_t::complex_float, library_data_t::complex_float); } case fft_type::complex_double_to_complex_double: { return std::make_pair(library_data_t::complex_double, library_data_t::complex_double); } } } void config_and_commit_basic() { if (_input_type == library_data_t::complex_float && _output_type == library_data_t::complex_float) { _desc_sc = std::make_shared< oneapi::mkl::dft::descriptor<oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::COMPLEX>>(_n); std::int64_t distance = 1; for (auto i : _n) distance = distance * i; _fwd_dist = distance; _bwd_dist = distance; _desc_sc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, distance); _desc_sc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, distance); _desc_sc->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, _batch); #ifdef __INTEL_MKL__ if (_is_user_specified_dir_and_placement && _is_inplace) _desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_INPLACE); else _desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); if (_use_external_workspace) { if (_q->get_device().is_gpu()) { _desc_sc->set_value( oneapi::mkl::dft::config_param::WORKSPACE, oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL); } } if (_is_estimate_call) { if (_q->get_device().is_gpu()) { _desc_sc->get_value( oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES, &_workspace_estimate_bytes); } } else { _desc_sc->commit(*_q); if (_q->get_device().is_gpu()) { _desc_sc->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES, &_workspace_bytes); } } #else if (_is_user_specified_dir_and_placement && _is_inplace) _desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::INPLACE); else _desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::NOT_INPLACE); _desc_sc->commit(*_q); #endif } else if (_input_type == library_data_t::complex_double && _output_type == library_data_t::complex_double) { _desc_dc = std::make_shared< oneapi::mkl::dft::descriptor<oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::COMPLEX>>(_n); std::int64_t distance = 1; for (auto i : _n) distance = distance * i; _fwd_dist = distance; _bwd_dist = distance; _desc_dc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, distance); _desc_dc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, distance); _desc_dc->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, _batch); #ifdef __INTEL_MKL__ if (_is_user_specified_dir_and_placement && _is_inplace) _desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_INPLACE); else _desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); if (_use_external_workspace) { if (_q->get_device().is_gpu()) { _desc_dc->set_value( oneapi::mkl::dft::config_param::WORKSPACE, oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL); } } if (_is_estimate_call) { if (_q->get_device().is_gpu()) { _desc_dc->get_value( oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES, &_workspace_estimate_bytes); } } else { _desc_dc->commit(*_q); if (_q->get_device().is_gpu()) { _desc_dc->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES, &_workspace_bytes); } } #else if (_is_user_specified_dir_and_placement && _is_inplace) _desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::INPLACE); else _desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::NOT_INPLACE); _desc_dc->commit(*_q); #endif } else if ((_input_type == library_data_t::real_float && _output_type == library_data_t::complex_float) || (_input_type == library_data_t::complex_float && _output_type == library_data_t::real_float)) { _desc_sr = std::make_shared<oneapi::mkl::dft::descriptor< oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::REAL>>( _n); if (_input_type == library_data_t::real_float && _output_type == library_data_t::complex_float) _direction = fft_direction::forward; else _direction = fft_direction::backward; _desc_sr->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, _batch); #ifdef __INTEL_MKL__ if (_is_user_specified_dir_and_placement && _is_inplace) { _desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_INPLACE); set_stride_and_distance_basic<true>(_desc_sr); } else { _desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); set_stride_and_distance_basic<false>(_desc_sr); } if (_use_external_workspace) { if (_q->get_device().is_gpu()) { _desc_sr->set_value( oneapi::mkl::dft::config_param::WORKSPACE, oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL); } } if (_is_estimate_call) { if (_q->get_device().is_gpu()) { _desc_sr->get_value( oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES, &_workspace_estimate_bytes); } } else { _desc_sr->commit(*_q); if (_q->get_device().is_gpu()) { _desc_sr->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES, &_workspace_bytes); } } #else if (_is_user_specified_dir_and_placement && _is_inplace) { _desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::INPLACE); set_stride_and_distance_basic<true>(_desc_sr); } else { _desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::NOT_INPLACE); set_stride_and_distance_basic<false>(_desc_sr); } _desc_sr->commit(*_q); #endif } else if ((_input_type == library_data_t::real_double && _output_type == library_data_t::complex_double) || (_input_type == library_data_t::complex_double && _output_type == library_data_t::real_double)) { _desc_dr = std::make_shared<oneapi::mkl::dft::descriptor< oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::REAL>>( _n); if (_input_type == library_data_t::real_double && _output_type == library_data_t::complex_double) _direction = fft_direction::forward; else _direction = fft_direction::backward; _desc_dr->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, _batch); #ifdef __INTEL_MKL__ if (_is_user_specified_dir_and_placement && _is_inplace) { _desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_INPLACE); set_stride_and_distance_basic<true>(_desc_dr); } else { _desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); set_stride_and_distance_basic<false>(_desc_dr); } if (_use_external_workspace) { if (_q->get_device().is_gpu()) { _desc_dr->set_value( oneapi::mkl::dft::config_param::WORKSPACE, oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL); } } if (_is_estimate_call) { if (_q->get_device().is_gpu()) { _desc_dr->get_value( oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES, &_workspace_estimate_bytes); } } else { _desc_dr->commit(*_q); if (_q->get_device().is_gpu()) { _desc_dr->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES, &_workspace_bytes); } } #else if (_is_user_specified_dir_and_placement && _is_inplace) { _desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::INPLACE); set_stride_and_distance_basic<true>(_desc_dr); } else { _desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::NOT_INPLACE); set_stride_and_distance_basic<false>(_desc_dr); } _desc_dr->commit(*_q); #endif } else { throw sycl::exception(sycl::make_error_code(sycl::errc::invalid), "invalid fft type"); } } void config_and_commit_advanced() { #ifdef __INTEL_MKL__ #define CONFIG_AND_COMMIT(DESC, PREC, DOM, TYPE) \ { \ DESC = std::make_shared<oneapi::mkl::dft::descriptor< \ oneapi::mkl::dft::precision::PREC, oneapi::mkl::dft::domain::DOM>>( \ _n); \ set_stride_advanced(DESC); \ DESC->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _fwd_dist); \ DESC->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _bwd_dist); \ DESC->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, \ _batch); \ if (_is_user_specified_dir_and_placement && _is_inplace) \ DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \ DFTI_CONFIG_VALUE::DFTI_INPLACE); \ else \ DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \ DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); \ if (_use_external_workspace) { \ DESC->set_value(oneapi::mkl::dft::config_param::WORKSPACE, \ oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL); \ } \ if (_is_estimate_call) { \ if (_q->get_device().is_gpu()) { \ DESC->get_value( \ oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES, \ &_workspace_estimate_bytes); \ } \ } else { \ DESC->commit(*_q); \ if (_is_estimate_call) { \ DESC->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES, \ &_workspace_bytes); \ } \ } \ } #else #define CONFIG_AND_COMMIT(DESC, PREC, DOM, TYPE) \ { \ DESC = std::make_shared<oneapi::mkl::dft::descriptor< \ oneapi::mkl::dft::precision::PREC, oneapi::mkl::dft::domain::DOM>>( \ _n); \ set_stride_advanced(DESC); \ DESC->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _fwd_dist); \ DESC->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _bwd_dist); \ DESC->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, \ _batch); \ if (_is_user_specified_dir_and_placement && _is_inplace) \ DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \ oneapi::mkl::dft::config_value::INPLACE); \ else \ DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \ oneapi::mkl::dft::config_value::NOT_INPLACE); \ DESC->commit(*_q); \ } #endif if (_input_type == library_data_t::complex_float && _output_type == library_data_t::complex_float) { CONFIG_AND_COMMIT(_desc_sc, SINGLE, COMPLEX, float); } else if (_input_type == library_data_t::complex_double && _output_type == library_data_t::complex_double) { CONFIG_AND_COMMIT(_desc_dc, DOUBLE, COMPLEX, double); } else if ((_input_type == library_data_t::real_float && _output_type == library_data_t::complex_float) || (_input_type == library_data_t::complex_float && _output_type == library_data_t::real_float)) { CONFIG_AND_COMMIT(_desc_sr, SINGLE, REAL, float); } else if ((_input_type == library_data_t::real_double && _output_type == library_data_t::complex_double) || (_input_type == library_data_t::complex_double && _output_type == library_data_t::real_double)) { CONFIG_AND_COMMIT(_desc_dr, DOUBLE, REAL, double); } else { throw sycl::exception(sycl::make_error_code(sycl::errc::invalid), "invalid fft type"); } #undef CONFIG_AND_COMMIT } template <typename T> void init(int dim, T *n, T *inembed, T istride, T idist, library_data_t input_type, T *onembed, T ostride, T odist, library_data_t output_type, T batch, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement) { if (direction_and_placement.has_value()) { _is_user_specified_dir_and_placement = true; _direction = direction_and_placement->first; _is_inplace = direction_and_placement->second; } _n.resize(dim); _inembed.resize(dim); _onembed.resize(dim); _input_type = input_type; _output_type = output_type; for (int i = 0; i < dim; i++) { _n[i] = n[i]; } if (inembed && onembed) { for (int i = 0; i < dim; i++) { _inembed[i] = inembed[i]; _onembed[i] = onembed[i]; } _istride = istride; _ostride = ostride; if ((_input_type == library_data_t::real_float && _output_type == library_data_t::complex_float) || (_input_type == library_data_t::real_double && _output_type == library_data_t::complex_double)) { _fwd_dist = idist; _bwd_dist = odist; } else if ((_output_type == library_data_t::real_float && _input_type == library_data_t::complex_float) || (_output_type == library_data_t::real_double && _input_type == library_data_t::complex_double)) { _fwd_dist = odist; _bwd_dist = idist; } else { if (_is_user_specified_dir_and_placement && (_direction == fft_direction::backward)) { _fwd_dist = odist; _bwd_dist = idist; } else { _fwd_dist = idist; _bwd_dist = odist; } } } else { _is_basic = true; } _batch = batch; _dim = dim; if (_is_basic) config_and_commit_basic(); else config_and_commit_advanced(); } template <class Desc_t> void set_stride_advanced(std::shared_ptr<Desc_t> desc) { if (_dim == 1) { std::int64_t input_stride[2] = {0, _istride}; std::int64_t output_stride[2] = {0, _ostride}; desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES, input_stride); desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES, output_stride); } else if (_dim == 2) { std::int64_t input_stride[3] = {0, _inembed[1] * _istride, _istride}; std::int64_t output_stride[3] = {0, _onembed[1] * _ostride, _ostride}; desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES, input_stride); desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES, output_stride); } else if (_dim == 3) { std::int64_t input_stride[4] = {0, _inembed[2] * _inembed[1] * _istride, _inembed[2] * _istride, _istride}; std::int64_t output_stride[4] = {0, _onembed[2] * _onembed[1] * _ostride, _onembed[2] * _ostride, _ostride}; desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES, input_stride); desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES, output_stride); } } template <class Desc_t> void swap_distance(std::shared_ptr<Desc_t> desc) { desc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _bwd_dist); desc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _fwd_dist); std::int64_t temp = _bwd_dist; _bwd_dist = _fwd_dist; _fwd_dist = temp; } template <bool Is_inplace, class Desc_t> void set_stride_and_distance_basic(std::shared_ptr<Desc_t> desc) { std::int64_t forward_distance = 0; std::int64_t backward_distance = 0; #define SET_STRIDE \ { \ if (_direction == fft_direction::forward) { \ desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES, \ real_stride); \ desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES, \ complex_stride); \ } else { \ desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES, \ complex_stride); \ desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES, \ real_stride); \ } \ } if (_dim == 1) { if constexpr (Is_inplace) { std::int64_t real_stride[2] = {0, 1}; std::int64_t complex_stride[2] = {0, 1}; SET_STRIDE; forward_distance = 2 * (_n[0] / 2 + 1); backward_distance = _n[0] / 2 + 1; } else { std::int64_t real_stride[2] = {0, 1}; std::int64_t complex_stride[2] = {0, 1}; SET_STRIDE; forward_distance = _n[0]; backward_distance = _n[0] / 2 + 1; } } else if (_dim == 2) { if constexpr (Is_inplace) { std::int64_t complex_stride[3] = {0, _n[1] / 2 + 1, 1}; std::int64_t real_stride[3] = {0, 2 * (_n[1] / 2 + 1), 1}; SET_STRIDE; forward_distance = _n[0] * 2 * (_n[1] / 2 + 1); backward_distance = _n[0] * (_n[1] / 2 + 1); } else { std::int64_t complex_stride[3] = {0, _n[1] / 2 + 1, 1}; std::int64_t real_stride[3] = {0, _n[1], 1}; SET_STRIDE; forward_distance = _n[0] * _n[1]; backward_distance = _n[0] * (_n[1] / 2 + 1); } } else if (_dim == 3) { if constexpr (Is_inplace) { std::int64_t complex_stride[4] = {0, _n[1] * (_n[2] / 2 + 1), _n[2] / 2 + 1, 1}; std::int64_t real_stride[4] = {0, _n[1] * 2 * (_n[2] / 2 + 1), 2 * (_n[2] / 2 + 1), 1}; SET_STRIDE; forward_distance = _n[0] * _n[1] * 2 * (_n[2] / 2 + 1); backward_distance = _n[0] * _n[1] * (_n[2] / 2 + 1); } else { std::int64_t complex_stride[4] = {0, _n[1] * (_n[2] / 2 + 1), _n[2] / 2 + 1, 1}; std::int64_t real_stride[4] = {0, _n[1] * _n[2], _n[2], 1}; SET_STRIDE; forward_distance = _n[0] * _n[1] * _n[2]; backward_distance = _n[0] * _n[1] * (_n[2] / 2 + 1); } } #undef SET_STRIDE desc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, forward_distance); desc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, backward_distance); } #define COMPUTE(DESC) \ { \ if (_is_inplace) { \ auto data_input = \ dpct::detail::get_memory(reinterpret_cast<T *>(input)); \ if (_direction == fft_direction::forward) { \ oneapi::mkl::dft::compute_forward(*DESC, data_input); \ } else { \ oneapi::mkl::dft::compute_backward(*DESC, data_input); \ } \ } else { \ auto data_input = \ dpct::detail::get_memory(reinterpret_cast<T *>(input)); \ auto data_output = \ dpct::detail::get_memory(reinterpret_cast<T *>(output)); \ if (_direction == fft_direction::forward) { \ oneapi::mkl::dft::compute_forward(*DESC, data_input, data_output); \ } else { \ oneapi::mkl::dft::compute_backward(*DESC, data_input, data_output); \ } \ } \ } template <class T, oneapi::mkl::dft::precision Precision> void compute_complex(T *input, T *output, fft_direction direction) { bool is_this_compute_inplace = input == output; if (!_is_user_specified_dir_and_placement) { // The complex domain descriptor need different config values if the // FFT direction or placement is different. // Here we check the conditions, and new config values are set and // re-committed if needed. if (direction != _direction || is_this_compute_inplace != _is_inplace) { if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) { if (direction != _direction) { swap_distance(_desc_sc); _direction = direction; } if (is_this_compute_inplace != _is_inplace) { _is_inplace = is_this_compute_inplace; #ifdef __INTEL_MKL__ if (_is_inplace) { _desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_INPLACE); } else { _desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); } #else if (_is_inplace) { _desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::INPLACE); } else { _desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::NOT_INPLACE); } #endif } _desc_sc->commit(*_q); } else { if (direction != _direction) { swap_distance(_desc_dc); _direction = direction; } if (is_this_compute_inplace != _is_inplace) { _is_inplace = is_this_compute_inplace; #ifdef __INTEL_MKL__ if (_is_inplace) { _desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_INPLACE); } else { _desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); } #else if (_is_inplace) { _desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::INPLACE); } else { _desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::NOT_INPLACE); } #endif } _desc_dc->commit(*_q); } } } if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) { COMPUTE(_desc_sc); } else { COMPUTE(_desc_dc); } } template <class T, oneapi::mkl::dft::precision Precision> void compute_real(T *input, T *output) { bool is_this_compute_inplace = input == output; if (!_is_user_specified_dir_and_placement) { // The real domain descriptor need different config values if the // FFT placement is different. // Here we check the condition, and new config values are set and // re-committed if needed. if (is_this_compute_inplace != _is_inplace) { if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) { _is_inplace = is_this_compute_inplace; if (_is_inplace) { #ifdef __INTEL_MKL__ _desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_INPLACE); #else _desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::INPLACE); #endif if (_is_basic) set_stride_and_distance_basic<true>(_desc_sr); } else { #ifdef __INTEL_MKL__ _desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); #else _desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::NOT_INPLACE); #endif if (_is_basic) set_stride_and_distance_basic<false>(_desc_sr); } _desc_sr->commit(*_q); } else { _is_inplace = is_this_compute_inplace; if (_is_inplace) { #ifdef __INTEL_MKL__ _desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_INPLACE); #else _desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::INPLACE); #endif if (_is_basic) set_stride_and_distance_basic<true>(_desc_dr); } else { #ifdef __INTEL_MKL__ _desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); #else _desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::NOT_INPLACE); #endif if (_is_basic) set_stride_and_distance_basic<false>(_desc_dr); } _desc_dr->commit(*_q); } } } if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) { COMPUTE(_desc_sr); } else { COMPUTE(_desc_dr); } } #undef COMPUTE private: sycl::queue *_q = nullptr; int _dim; std::vector<std::int64_t> _n; std::vector<std::int64_t> _inembed; std::int64_t _istride; std::int64_t _fwd_dist; library_data_t _input_type; std::vector<std::int64_t> _onembed; std::int64_t _ostride; std::int64_t _bwd_dist; library_data_t _output_type; std::int64_t _batch = 1; bool _is_basic = false; bool _is_inplace = false; fft_direction _direction = fft_direction::forward; bool _is_user_specified_dir_and_placement = false; bool _use_external_workspace = false; void *_external_workspace_ptr = nullptr; size_t _workspace_bytes = 0; bool _is_estimate_call = false; size_t _workspace_estimate_bytes = 0; std::shared_ptr<oneapi::mkl::dft::descriptor< oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::REAL>> _desc_sr; std::shared_ptr<oneapi::mkl::dft::descriptor< oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::REAL>> _desc_dr; std::shared_ptr<oneapi::mkl::dft::descriptor< oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::COMPLEX>> _desc_sc; std::shared_ptr<oneapi::mkl::dft::descriptor< oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::COMPLEX>> _desc_dc; }; using fft_engine_ptr = fft_engine *; } // namespace fft } // namespace dpct #endif // __DPCT_FFT_UTILS_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/lib_common_utils.hpp
//==---- lib_common_utils.hpp ---------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_LIB_COMMON_UTILS_HPP__ #define __DPCT_LIB_COMMON_UTILS_HPP__ #include <sycl/sycl.hpp> #include <oneapi/mkl.hpp> #include "memory.hpp" #include "util.hpp" namespace dpct { namespace detail { template <typename T> inline auto get_memory(T *x) { #ifdef DPCT_USM_LEVEL_NONE return dpct::get_buffer<std::remove_cv_t<T>>(x); #else return x; #endif } template <typename T> inline typename DataType<T>::T2 get_value(const T *s, sycl::queue &q) { using Ty = typename DataType<T>::T2; Ty s_h; detail::dpct_memcpy(q, (void *)&s_h, (void *)s, sizeof(T), automatic).wait(); return s_h; } } enum class version_field : int { major, minor, update, patch }; /// Returns the requested field of Intel(R) oneAPI Math Kernel Library version. /// \param field The version information field (major, minor, update or patch). /// \param result The result value. inline void mkl_get_version(version_field field, int *result) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else MKLVersion version; mkl_get_version(&version); if (version_field::major == field) { *result = version.MajorVersion; } else if (version_field::minor == field) { *result = version.MinorVersion; } else if (version_field::update == field) { *result = version.UpdateVersion; } else if (version_field::patch == field) { *result = 0; } else { throw std::runtime_error("unknown field"); } #endif } enum class library_data_t : unsigned char { real_float = 0, complex_float, real_double, complex_double, real_half, complex_half, real_bfloat16, complex_bfloat16, real_int4, complex_int4, real_uint4, complex_uint4, real_int8, complex_int8, real_uint8, complex_uint8, real_int16, complex_int16, real_uint16, complex_uint16, real_int32, complex_int32, real_uint32, complex_uint32, real_int64, complex_int64, real_uint64, complex_uint64, real_int8_4, real_int8_32, real_uint8_4, library_data_t_size }; namespace detail { template <typename ArgT> inline constexpr std::uint64_t get_type_combination_id(ArgT Val) { static_assert((unsigned char)library_data_t::library_data_t_size <= std::numeric_limits<unsigned char>::max() && "library_data_t size exceeds limit."); static_assert(std::is_same_v<ArgT, library_data_t>, "Unsupported ArgT"); return (std::uint64_t)Val; } template <typename FirstT, typename... RestT> inline constexpr std::uint64_t get_type_combination_id(FirstT FirstVal, RestT... RestVal) { static_assert((std::uint8_t)library_data_t::library_data_t_size <= std::numeric_limits<unsigned char>::max() && "library_data_t size exceeds limit."); static_assert(sizeof...(RestT) <= 8 && "Too many parameters"); static_assert(std::is_same_v<FirstT, library_data_t>, "Unsupported FirstT"); return get_type_combination_id(RestVal...) << 8 | ((std::uint64_t)FirstVal); } inline constexpr std::size_t library_data_size[] = { 8 * sizeof(float), // real_float 8 * sizeof(std::complex<float>), // complex_float 8 * sizeof(double), // real_double 8 * sizeof(std::complex<double>), // complex_double 8 * sizeof(sycl::half), // real_half 8 * sizeof(std::complex<sycl::half>), // complex_half 16, // real_bfloat16 16 * 2, // complex_bfloat16 4, // real_int4 4 * 2, // complex_int4 4, // real_uint4 4 * 2, // complex_uint4 8, // real_int8 8 * 2, // complex_int8 8, // real_uint8 8 * 2, // complex_uint8 16, // real_int16 16 * 2, // complex_int16 16, // real_uint16 16 * 2, // complex_uint16 32, // real_int32 32 * 2, // complex_int32 32, // real_uint32 32 * 2, // complex_uint32 64, // real_int64 64 * 2, // complex_int64 64, // real_uint64 64 * 2, // complex_uint64 8, // real_int8_4 8, // real_int8_32 8 // real_uint8_4 }; } // namespace detail } // namespace dpct #endif // __DPCT_LIB_COMMON_UTILS_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/sparse_utils.hpp
//==---- sparse_utils.hpp -------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_SPARSE_UTILS_HPP__ #define __DPCT_SPARSE_UTILS_HPP__ #include "lib_common_utils.hpp" #include <oneapi/mkl.hpp> #include <sycl/sycl.hpp> namespace dpct { namespace sparse { /// Describes properties of a sparse matrix. /// The properties are matrix type, diag, uplo and index base. class matrix_info { public: /// Matrix types are: /// ge: General matrix /// sy: Symmetric matrix /// he: Hermitian matrix /// tr: Triangular matrix enum class matrix_type : int { ge = 0, sy, he, tr }; auto get_matrix_type() const { return _matrix_type; } auto get_diag() const { return _diag; } auto get_uplo() const { return _uplo; } auto get_index_base() const { return _index_base; } void set_matrix_type(matrix_type mt) { _matrix_type = mt; } void set_diag(oneapi::mkl::diag d) { _diag = d; } void set_uplo(oneapi::mkl::uplo u) { _uplo = u; } void set_index_base(oneapi::mkl::index_base ib) { _index_base = ib; } private: matrix_type _matrix_type = matrix_type::ge; oneapi::mkl::diag _diag = oneapi::mkl::diag::nonunit; oneapi::mkl::uplo _uplo = oneapi::mkl::uplo::upper; oneapi::mkl::index_base _index_base = oneapi::mkl::index_base::zero; }; /// Computes a CSR format sparse matrix-dense vector product. /// y = alpha * op(A) * x + beta * y /// \param [in] queue The queue where the routine should be executed. It must /// have the in_order property when using the USM mode. /// \param [in] trans The operation applied to the matrix A. /// \param [in] num_rows Number of rows of the matrix A. /// \param [in] num_cols Number of columns of the matrix A. /// \param [in] alpha Scaling factor for the matrix A. /// \param [in] info Matrix info of the matrix A. /// \param [in] val An array containing the non-zero elements of the matrix A. /// \param [in] row_ptr An array of length \p num_rows + 1. /// \param [in] col_ind An array containing the column indices in index-based /// numbering. /// \param [in] x Data of the vector x. /// \param [in] beta Scaling factor for the vector x. /// \param [in, out] y Data of the vector y. template <typename T> void csrmv(sycl::queue &queue, oneapi::mkl::transpose trans, int num_rows, int num_cols, const T *alpha, const std::shared_ptr<matrix_info> info, const T *val, const int *row_ptr, const int *col_ind, const T *x, const T *beta, T *y) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else using Ty = typename dpct::DataType<T>::T2; auto alpha_value = dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue); auto beta_value = dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue); oneapi::mkl::sparse::matrix_handle_t *sparse_matrix_handle = new oneapi::mkl::sparse::matrix_handle_t; oneapi::mkl::sparse::init_matrix_handle(sparse_matrix_handle); auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr)); auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind)); auto data_val = dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val))); oneapi::mkl::sparse::set_csr_data(queue, *sparse_matrix_handle, num_rows, num_cols, info->get_index_base(), data_row_ptr, data_col_ind, data_val); auto data_x = dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(x))); auto data_y = dpct::detail::get_memory(reinterpret_cast<Ty *>(y)); switch (info->get_matrix_type()) { case matrix_info::matrix_type::ge: { oneapi::mkl::sparse::optimize_gemv(queue, trans, *sparse_matrix_handle); oneapi::mkl::sparse::gemv(queue, trans, alpha_value, *sparse_matrix_handle, data_x, beta_value, data_y); break; } case matrix_info::matrix_type::sy: { oneapi::mkl::sparse::symv(queue, info->get_uplo(), alpha_value, *sparse_matrix_handle, data_x, beta_value, data_y); break; } case matrix_info::matrix_type::tr: { oneapi::mkl::sparse::optimize_trmv(queue, info->get_uplo(), trans, info->get_diag(), *sparse_matrix_handle); oneapi::mkl::sparse::trmv(queue, info->get_uplo(), trans, info->get_diag(), alpha_value, *sparse_matrix_handle, data_x, beta_value, data_y); break; } default: throw std::runtime_error( "the spmv does not support matrix_info::matrix_type::he"); } sycl::event e = oneapi::mkl::sparse::release_matrix_handle(queue, sparse_matrix_handle); queue.submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { delete sparse_matrix_handle; }); }); #endif } /// Computes a CSR format sparse matrix-dense matrix product. /// C = alpha * op(A) * B + beta * C /// \param [in] queue The queue where the routine should be executed. It must /// have the in_order property when using the USM mode. /// \param [in] trans The operation applied to the matrix A. /// \param [in] sparse_rows Number of rows of the matrix A. /// \param [in] dense_cols Number of columns of the matrix B or C. /// \param [in] sparse_cols Number of columns of the matrix A. /// \param [in] alpha Scaling factor for the matrix A. /// \param [in] info Matrix info of the matrix A. /// \param [in] val An array containing the non-zero elements of the matrix A. /// \param [in] row_ptr An array of length \p num_rows + 1. /// \param [in] col_ind An array containing the column indices in index-based /// numbering. /// \param [in] b Data of the matrix B. /// \param [in] ldb Leading dimension of the matrix B. /// \param [in] beta Scaling factor for the matrix B. /// \param [in, out] c Data of the matrix C. /// \param [in] ldc Leading dimension of the matrix C. template <typename T> void csrmm(sycl::queue &queue, oneapi::mkl::transpose trans, int sparse_rows, int dense_cols, int sparse_cols, const T *alpha, const std::shared_ptr<matrix_info> info, const T *val, const int *row_ptr, const int *col_ind, const T *b, int ldb, const T *beta, T *c, int ldc) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else using Ty = typename dpct::DataType<T>::T2; auto alpha_value = dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue); auto beta_value = dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue); oneapi::mkl::sparse::matrix_handle_t *sparse_matrix_handle = new oneapi::mkl::sparse::matrix_handle_t; oneapi::mkl::sparse::init_matrix_handle(sparse_matrix_handle); auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr)); auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind)); auto data_val = dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val))); oneapi::mkl::sparse::set_csr_data(queue, *sparse_matrix_handle, sparse_rows, sparse_cols, info->get_index_base(), data_row_ptr, data_col_ind, data_val); auto data_b = dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(b))); auto data_c = dpct::detail::get_memory(reinterpret_cast<Ty *>(c)); switch (info->get_matrix_type()) { case matrix_info::matrix_type::ge: { oneapi::mkl::sparse::gemm(queue, oneapi::mkl::layout::row_major, trans, oneapi::mkl::transpose::nontrans, alpha_value, *sparse_matrix_handle, data_b, dense_cols, ldb, beta_value, data_c, ldc); break; } default: throw std::runtime_error( "the csrmm does not support matrix_info::matrix_type::sy, " "matrix_info::matrix_type::tr and matrix_info::matrix_type::he"); } sycl::event e = oneapi::mkl::sparse::release_matrix_handle(queue, sparse_matrix_handle); queue.submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { delete sparse_matrix_handle; }); }); #endif } #ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this. /// Saving the optimization information for solving a system of linear /// equations. class optimize_info { public: /// Constructor optimize_info() { oneapi::mkl::sparse::init_matrix_handle(&_matrix_handle); } /// Destructor ~optimize_info() { oneapi::mkl::sparse::release_matrix_handle(get_default_queue(), &_matrix_handle, _deps) .wait(); } /// Add dependency for the destructor. /// \param [in] e The event which the destructor depends on. void add_dependency(sycl::event e) { _deps.push_back(e); } /// Get the internal saved matrix handle. /// \return Returns the matrix handle. oneapi::mkl::sparse::matrix_handle_t get_matrix_handle() const noexcept { return _matrix_handle; } private: oneapi::mkl::sparse::matrix_handle_t _matrix_handle = nullptr; std::vector<sycl::event> _deps; }; #endif #ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this. /// Performs internal optimizations for solving a system of linear equations for /// a CSR format sparse matrix. /// \param [in] queue The queue where the routine should be executed. It must /// have the in_order property when using the USM mode. /// \param [in] trans The operation applied to the sparse matrix. /// \param [in] row_col Number of rows of the sparse matrix. /// \param [in] info Matrix info of the sparse matrix. /// \param [in] val An array containing the non-zero elements of the sparse matrix. /// \param [in] row_ptr An array of length \p num_rows + 1. /// \param [in] col_ind An array containing the column indices in index-based /// numbering. /// \param [out] optimize_info The result of the optimizations. template <typename T> void optimize_csrsv(sycl::queue &queue, oneapi::mkl::transpose trans, int row_col, const std::shared_ptr<matrix_info> info, const T *val, const int *row_ptr, const int *col_ind, std::shared_ptr<optimize_info> optimize_info) { using Ty = typename dpct::DataType<T>::T2; auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr)); auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind)); auto data_val = dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val))); oneapi::mkl::sparse::set_csr_data(queue, optimize_info->get_matrix_handle(), row_col, row_col, info->get_index_base(), data_row_ptr, data_col_ind, data_val); if (info->get_matrix_type() != matrix_info::matrix_type::tr) return; #ifndef DPCT_USM_LEVEL_NONE sycl::event e; e = #endif oneapi::mkl::sparse::optimize_trsv(queue, info->get_uplo(), trans, info->get_diag(), optimize_info->get_matrix_handle()); #ifndef DPCT_USM_LEVEL_NONE optimize_info->add_dependency(e); #endif } #endif class sparse_matrix_desc; using sparse_matrix_desc_t = std::shared_ptr<sparse_matrix_desc>; /// Structure for describe a dense vector class dense_vector_desc { public: dense_vector_desc(std::int64_t ele_num, void *value, library_data_t value_type) : _ele_num(ele_num), _value(value), _value_type(value_type) {} void get_desc(std::int64_t *ele_num, const void **value, library_data_t *value_type) const noexcept { *ele_num = _ele_num; *value = _value; *value_type = _value_type; } void get_desc(std::int64_t *ele_num, void **value, library_data_t *value_type) const noexcept { get_desc(ele_num, const_cast<const void **>(value), value_type); } void *get_value() const noexcept { return _value; } void set_value(void *value) { _value = value; } private: std::int64_t _ele_num; void *_value; library_data_t _value_type; }; /// Structure for describe a dense matrix class dense_matrix_desc { public: dense_matrix_desc(std::int64_t row_num, std::int64_t col_num, std::int64_t leading_dim, void *value, library_data_t value_type, oneapi::mkl::layout layout) : _row_num(row_num), _col_num(col_num), _leading_dim(leading_dim), _value(value), _value_type(value_type), _layout(layout) {} void get_desc(std::int64_t *row_num, std::int64_t *col_num, std::int64_t *leading_dim, void **value, library_data_t *value_type, oneapi::mkl::layout *layout) const noexcept { *row_num = _row_num; *col_num = _col_num; *leading_dim = _leading_dim; *value = _value; *value_type = _value_type; *layout = _layout; } void *get_value() const noexcept { return _value; } void set_value(void *value) { _value = value; } std::int64_t get_col_num() const noexcept { return _col_num; } std::int64_t get_leading_dim() const noexcept { return _leading_dim; } oneapi::mkl::layout get_layout() const noexcept { return _layout; } private: std::int64_t _row_num; std::int64_t _col_num; std::int64_t _leading_dim; void *_value; library_data_t _value_type; oneapi::mkl::layout _layout; }; /// Sparse matrix data format enum matrix_format : int { csr = 1, }; /// Sparse matrix attribute enum matrix_attribute : int { uplo = 0, diag }; #ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this. /// Structure for describe a sparse matrix class sparse_matrix_desc { public: /// Constructor /// \param [out] desc The descriptor to be created /// \param [in] row_num Number of rows of the sparse matrix. /// \param [in] col_num Number of colums of the sparse matrix. /// \param [in] nnz Non-zero elements in the sparse matrix. /// \param [in] row_ptr An array of length \p row_num + 1. /// \param [in] col_ind An array containing the column indices in index-based /// numbering. /// \param [in] value An array containing the non-zero elements of the sparse matrix. /// \param [in] row_ptr_type Data type of the \p row_ptr . /// \param [in] col_ind_type Data type of the \p col_ind . /// \param [in] base Indicates how input arrays are indexed. /// \param [in] value_type Data type of the \p value . /// \param [in] data_format The matrix data format. sparse_matrix_desc(std::int64_t row_num, std::int64_t col_num, std::int64_t nnz, void *row_ptr, void *col_ind, void *value, library_data_t row_ptr_type, library_data_t col_ind_type, oneapi::mkl::index_base base, library_data_t value_type, matrix_format data_format) : _row_num(row_num), _col_num(col_num), _nnz(nnz), _row_ptr(row_ptr), _col_ind(col_ind), _value(value), _row_ptr_type(row_ptr_type), _col_ind_type(col_ind_type), _base(base), _value_type(value_type), _data_format(data_format) { if (_data_format != matrix_format::csr) { throw std::runtime_error("the sparse matrix data format is unsupported"); } oneapi::mkl::sparse::init_matrix_handle(&_matrix_handle); construct(); } /// Destructor ~sparse_matrix_desc() { oneapi::mkl::sparse::release_matrix_handle(get_default_queue(), &_matrix_handle, _deps) .wait(); } /// Add dependency for the destroy method. /// \param [in] e The event which the destroy method depends on. void add_dependency(sycl::event e) { _deps.push_back(e); } /// Get the internal saved matrix handle. /// \return Returns the matrix handle. oneapi::mkl::sparse::matrix_handle_t get_matrix_handle() const noexcept { return _matrix_handle; } /// Get the values saved in the descriptor /// \param [out] row_num Number of rows of the sparse matrix. /// \param [out] col_num Number of colums of the sparse matrix. /// \param [out] nnz Non-zero elements in the sparse matrix. /// \param [out] row_ptr An array of length \p row_num + 1. /// \param [out] col_ind An array containing the column indices in index-based /// numbering. /// \param [out] value An array containing the non-zero elements of the sparse matrix. /// \param [out] row_ptr_type Data type of the \p row_ptr . /// \param [out] col_ind_type Data type of the \p col_ind . /// \param [out] base Indicates how input arrays are indexed. /// \param [out] value_type Data type of the \p value . void get_desc(int64_t *row_num, int64_t *col_num, int64_t *nnz, void **row_ptr, void **col_ind, void **value, library_data_t *row_ptr_type, library_data_t *col_ind_type, oneapi::mkl::index_base *base, library_data_t *value_type) const noexcept { *row_num = _row_num; *col_num = _col_num; *nnz = _nnz; *row_ptr = _row_ptr; *col_ind = _col_ind; *value = _value; *row_ptr_type = _row_ptr_type; *col_ind_type = _col_ind_type; *base = _base; *value_type = _value_type; } /// Get the sparse matrix data format of this descriptor /// \param [out] format The matrix data format result void get_format(matrix_format *data_format) const noexcept { *data_format = _data_format; } /// Get the index base of this descriptor /// \param [out] base The index base result void get_base(oneapi::mkl::index_base *base) const noexcept { *base = _base; } /// Get the value pointer of this descriptor /// \param [out] value The value pointer result void get_value(void **value) const noexcept { *value = _value; } /// Set the value pointer of this descriptor /// \param [in] value The input value pointer void set_value(void *value) { // Assume the new data is different from the old data _value = value; construct(); } /// Get the size of the sparse matrix /// \param [out] row_num Number of rows of the sparse matrix. /// \param [out] col_num Number of colums of the sparse matrix. /// \param [out] nnz Non-zero elements in the sparse matrix. void get_size(int64_t *row_num, int64_t *col_num, int64_t *nnz) const noexcept { *row_num = _row_num; *col_num = _col_num; *nnz = _nnz; } /// Set the sparse matrix attribute /// \param [in] attribute The attribute type /// \param [in] data The attribute value /// \param [in] data_size The data size of the attribute value void set_attribute(matrix_attribute attribute, const void *data, size_t data_size) { if (attribute == matrix_attribute::diag) { const oneapi::mkl::diag *diag_ptr = reinterpret_cast<const oneapi::mkl::diag *>(data); if (*diag_ptr == oneapi::mkl::diag::unit) { _diag = oneapi::mkl::diag::unit; } else if (*diag_ptr == oneapi::mkl::diag::nonunit) { _diag = oneapi::mkl::diag::nonunit; } else { throw std::runtime_error("unsupported diag value"); } } else if (attribute == matrix_attribute::uplo) { const oneapi::mkl::uplo *uplo_ptr = reinterpret_cast<const oneapi::mkl::uplo *>(data); if (*uplo_ptr == oneapi::mkl::uplo::upper) { _uplo = oneapi::mkl::uplo::upper; } else if (*uplo_ptr == oneapi::mkl::uplo::lower) { _uplo = oneapi::mkl::uplo::lower; } else { throw std::runtime_error("unsupported uplo value"); } } else { throw std::runtime_error("unsupported attribute"); } } /// Get the sparse matrix attribute /// \param [out] attribute The attribute type /// \param [out] data The attribute value /// \param [out] data_size The data size of the attribute value void get_attribute(matrix_attribute attribute, void *data, size_t data_size) const { if (attribute == matrix_attribute::diag) { oneapi::mkl::diag *diag_ptr = reinterpret_cast<oneapi::mkl::diag *>(data); if (_diag.has_value()) { *diag_ptr = _diag.value(); } else { *diag_ptr = oneapi::mkl::diag::nonunit; } } else if (attribute == matrix_attribute::uplo) { oneapi::mkl::uplo *uplo_ptr = reinterpret_cast<oneapi::mkl::uplo *>(data); if (_uplo.has_value()) { *uplo_ptr = _uplo.value(); } else { *uplo_ptr = oneapi::mkl::uplo::lower; } } else { throw std::runtime_error("unsupported attribute"); } } /// Set the pointers for describing the sparse matrix /// \param [in] row_ptr An array of length \p row_num + 1. /// \param [in] col_ind An array containing the column indices in index-based /// numbering. /// \param [in] value An array containing the non-zero elements of the sparse matrix. void set_pointers(void *row_ptr, void *col_ind, void *value) { // Assume the new data is different from the old data _row_ptr = row_ptr; _col_ind = col_ind; _value = value; construct(); } /// Get the diag attribute /// \return diag value std::optional<oneapi::mkl::diag> get_diag() const noexcept { return _diag; } /// Get the uplo attribute /// \return uplo value std::optional<oneapi::mkl::uplo> get_uplo() const noexcept { return _uplo; } private: template <typename index_t, typename value_t> void set_data() { auto data_row_ptr = dpct::detail::get_memory(reinterpret_cast<index_t *>(_row_ptr)); auto data_col_ind = dpct::detail::get_memory(reinterpret_cast<index_t *>(_col_ind)); auto data_value = dpct::detail::get_memory(reinterpret_cast<value_t *>(_value)); oneapi::mkl::sparse::set_csr_data(get_default_queue(), _matrix_handle, _row_num, _col_num, _base, data_row_ptr, data_col_ind, data_value); get_default_queue().wait(); } void construct() { std::uint64_t key = dpct::detail::get_type_combination_id( _row_ptr_type, _col_ind_type, _value_type); switch (key) { case dpct::detail::get_type_combination_id(library_data_t::real_int32, library_data_t::real_int32, library_data_t::real_float): { set_data<std::int32_t, float>(); break; } case dpct::detail::get_type_combination_id(library_data_t::real_int32, library_data_t::real_int32, library_data_t::real_double): { set_data<std::int32_t, double>(); break; } case dpct::detail::get_type_combination_id(library_data_t::real_int32, library_data_t::real_int32, library_data_t::complex_float): { set_data<std::int32_t, std::complex<float>>(); break; } case dpct::detail::get_type_combination_id( library_data_t::real_int32, library_data_t::real_int32, library_data_t::complex_double): { set_data<std::int32_t, std::complex<double>>(); break; } case dpct::detail::get_type_combination_id(library_data_t::real_int64, library_data_t::real_int64, library_data_t::real_float): { set_data<std::int64_t, float>(); break; } case dpct::detail::get_type_combination_id(library_data_t::real_int64, library_data_t::real_int64, library_data_t::real_double): { set_data<std::int64_t, double>(); break; } case dpct::detail::get_type_combination_id(library_data_t::real_int64, library_data_t::real_int64, library_data_t::complex_float): { set_data<std::int64_t, std::complex<float>>(); break; } case dpct::detail::get_type_combination_id( library_data_t::real_int64, library_data_t::real_int64, library_data_t::complex_double): { set_data<std::int64_t, std::complex<double>>(); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } } std::int64_t _row_num; std::int64_t _col_num; std::int64_t _nnz; void *_row_ptr; void *_col_ind; void *_value; library_data_t _row_ptr_type; library_data_t _col_ind_type; oneapi::mkl::index_base _base; library_data_t _value_type; oneapi::mkl::sparse::matrix_handle_t _matrix_handle = nullptr; std::vector<sycl::event> _deps; matrix_format _data_format; std::optional<oneapi::mkl::uplo> _uplo; std::optional<oneapi::mkl::diag> _diag; }; namespace detail { #ifdef DPCT_USM_LEVEL_NONE #define SPARSE_CALL(X) \ do { \ X; \ } while (0) #else #define SPARSE_CALL(X) \ do { \ sycl::event e = X; \ a->add_dependency(e); \ } while (0) #endif template <typename Ty> inline void spmv_impl(sycl::queue queue, oneapi::mkl::transpose trans, const void *alpha, sparse_matrix_desc_t a, std::shared_ptr<dense_vector_desc> x, const void *beta, std::shared_ptr<dense_vector_desc> y) { auto alpha_value = dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue); auto beta_value = dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue); auto data_x = dpct::detail::get_memory(reinterpret_cast<Ty *>(x->get_value())); auto data_y = dpct::detail::get_memory(reinterpret_cast<Ty *>(y->get_value())); if (a->get_diag().has_value() && a->get_uplo().has_value()) { oneapi::mkl::sparse::optimize_trmv(queue, a->get_uplo().value(), trans, a->get_diag().value(), a->get_matrix_handle()); SPARSE_CALL(oneapi::mkl::sparse::trmv( queue, a->get_uplo().value(), trans, a->get_diag().value(), alpha_value, a->get_matrix_handle(), data_x, beta_value, data_y)); } else { oneapi::mkl::sparse::optimize_gemv(queue, trans, a->get_matrix_handle()); SPARSE_CALL(oneapi::mkl::sparse::gemv(queue, trans, alpha_value, a->get_matrix_handle(), data_x, beta_value, data_y)); } } template <typename Ty> inline void spmm_impl(sycl::queue queue, oneapi::mkl::transpose trans_a, oneapi::mkl::transpose trans_b, const void *alpha, sparse_matrix_desc_t a, std::shared_ptr<dense_matrix_desc> b, const void *beta, std::shared_ptr<dense_matrix_desc> c) { auto alpha_value = dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue); auto beta_value = dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue); auto data_b = dpct::detail::get_memory(reinterpret_cast<Ty *>(b->get_value())); auto data_c = dpct::detail::get_memory(reinterpret_cast<Ty *>(c->get_value())); SPARSE_CALL(oneapi::mkl::sparse::gemm( queue, b->get_layout(), trans_a, trans_b, alpha_value, a->get_matrix_handle(), data_b, b->get_col_num(), b->get_leading_dim(), beta_value, data_c, c->get_leading_dim())); } #undef SPARSE_CALL } // namespace detail /// Computes a sparse matrix-dense vector product: y = alpha * op(a) * x + beta * y. /// \param [in] queue The queue where the routine should be executed. It must /// have the in_order property when using the USM mode. /// \param [in] trans Specifies operation on input matrix. /// \param [in] alpha Specifies the scalar alpha. /// \param [in] a Specifies the sparse matrix a. /// \param [in] x Specifies the dense vector x. /// \param [in] beta Specifies the scalar beta. /// \param [in, out] y Specifies the dense vector y. /// \param [in] data_type Specifies the data type of \param a, \param x and \param y . inline void spmv(sycl::queue queue, oneapi::mkl::transpose trans, const void *alpha, sparse_matrix_desc_t a, std::shared_ptr<dense_vector_desc> x, const void *beta, std::shared_ptr<dense_vector_desc> y, library_data_t data_type) { switch (data_type) { case library_data_t::real_float: { detail::spmv_impl<float>(queue, trans, alpha, a, x, beta, y); break; } case library_data_t::real_double: { detail::spmv_impl<double>(queue, trans, alpha, a, x, beta, y); break; } case library_data_t::complex_float: { detail::spmv_impl<std::complex<float>>(queue, trans, alpha, a, x, beta, y); break; } case library_data_t::complex_double: { detail::spmv_impl<std::complex<double>>(queue, trans, alpha, a, x, beta, y); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } } /// Computes a sparse matrix-dense matrix product: c = alpha * op(a) * op(b) + beta * c. /// \param [in] queue The queue where the routine should be executed. It must /// have the in_order property when using the USM mode. /// \param [in] trans_a Specifies operation on input matrix a. /// \param [in] trans_b Specifies operation on input matrix b. /// \param [in] alpha Specifies the scalar alpha. /// \param [in] a Specifies the sparse matrix a. /// \param [in] b Specifies the dense matrix b. /// \param [in] beta Specifies the scalar beta. /// \param [in, out] c Specifies the dense matrix c. /// \param [in] data_type Specifies the data type of \param a, \param b and \param c . inline void spmm(sycl::queue queue, oneapi::mkl::transpose trans_a, oneapi::mkl::transpose trans_b, const void *alpha, sparse_matrix_desc_t a, std::shared_ptr<dense_matrix_desc> b, const void *beta, std::shared_ptr<dense_matrix_desc> c, library_data_t data_type) { if (b->get_layout() != c->get_layout()) throw std::runtime_error("the layout of b and c are different"); switch (data_type) { case library_data_t::real_float: { detail::spmm_impl<float>(queue, trans_a, trans_b, alpha, a, b, beta, c); break; } case library_data_t::real_double: { detail::spmm_impl<double>(queue, trans_a, trans_b, alpha, a, b, beta, c); break; } case library_data_t::complex_float: { detail::spmm_impl<std::complex<float>>(queue, trans_a, trans_b, alpha, a, b, beta, c); break; } case library_data_t::complex_double: { detail::spmm_impl<std::complex<double>>(queue, trans_a, trans_b, alpha, a, b, beta, c); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } } #endif } // namespace sparse } // namespace dpct #endif // __DPCT_SPARSE_UTILS_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/device.hpp
//==---- device.hpp -------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_DEVICE_HPP__ #define __DPCT_DEVICE_HPP__ #include <sycl/sycl.hpp> #include <algorithm> #include <array> #include <cstring> #include <iostream> #include <mutex> #include <set> #include <sstream> #include <map> #include <vector> #include <thread> #if defined(__linux__) #include <unistd.h> #include <sys/syscall.h> #endif #if defined(_WIN64) #define NOMINMAX #include <windows.h> #endif namespace dpct { /// SYCL default exception handler inline auto exception_handler = [](sycl::exception_list exceptions) { for (std::exception_ptr const &e : exceptions) { try { std::rethrow_exception(e); } catch (sycl::exception const &e) { std::cerr << "Caught asynchronous SYCL exception:" << std::endl << e.what() << std::endl << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; } } }; typedef sycl::event *event_ptr; typedef sycl::queue *queue_ptr; typedef char *device_ptr; /// Destroy \p event pointed memory. /// /// \param event Pointer to the sycl::event address. static void destroy_event(event_ptr event) { delete event; } class device_info { public: // get interface const char *get_name() const { return _name; } char *get_name() { return _name; } template <typename WorkItemSizesTy = sycl::id<3>, std::enable_if_t<std::is_same_v<WorkItemSizesTy, sycl::id<3>> || std::is_same_v<WorkItemSizesTy, int *>, int> = 0> auto get_max_work_item_sizes() const { if constexpr (std::is_same_v<WorkItemSizesTy, sycl::id<3>>) return _max_work_item_sizes; else return _max_work_item_sizes_i; } template <typename WorkItemSizesTy = sycl::id<3>, std::enable_if_t<std::is_same_v<WorkItemSizesTy, sycl::id<3>> || std::is_same_v<WorkItemSizesTy, int *>, int> = 0> auto get_max_work_item_sizes() { if constexpr (std::is_same_v<WorkItemSizesTy, sycl::id<3>>) return _max_work_item_sizes; else return _max_work_item_sizes_i; } bool get_host_unified_memory() const { return _host_unified_memory; } int get_major_version() const { return _major; } int get_minor_version() const { return _minor; } int get_integrated() const { return _integrated; } int get_max_clock_frequency() const { return _frequency; } int get_max_compute_units() const { return _max_compute_units; } int get_max_work_group_size() const { return _max_work_group_size; } int get_max_sub_group_size() const { return _max_sub_group_size; } int get_max_work_items_per_compute_unit() const { return _max_work_items_per_compute_unit; } int get_max_register_size_per_work_group() const { return _max_register_size_per_work_group; } template <typename NDRangeSizeTy = size_t *, std::enable_if_t<std::is_same_v<NDRangeSizeTy, size_t *> || std::is_same_v<NDRangeSizeTy, int *>, int> = 0> auto get_max_nd_range_size() const { if constexpr (std::is_same_v<NDRangeSizeTy, size_t *>) return _max_nd_range_size; else return _max_nd_range_size_i; } template <typename NDRangeSizeTy = size_t *, std::enable_if_t<std::is_same_v<NDRangeSizeTy, size_t *> || std::is_same_v<NDRangeSizeTy, int *>, int> = 0> auto get_max_nd_range_size() { if constexpr (std::is_same_v<NDRangeSizeTy, size_t *>) return _max_nd_range_size; else return _max_nd_range_size_i; } size_t get_global_mem_size() const { return _global_mem_size; } size_t get_local_mem_size() const { return _local_mem_size; } /// Returns the maximum clock rate of device's global memory in kHz. If /// compiler does not support this API then returns default value 3200000 kHz. unsigned int get_memory_clock_rate() const { return _memory_clock_rate; } /// Returns the maximum bus width between device and memory in bits. If /// compiler does not support this API then returns default value 64 bits. unsigned int get_memory_bus_width() const { return _memory_bus_width; } uint32_t get_device_id() const { return _device_id; } std::array<unsigned char, 16> get_uuid() const { return _uuid; } // set interface void set_name(const char* name) { size_t length = strlen(name); if (length < 256) { std::memcpy(_name, name, length + 1); } else { std::memcpy(_name, name, 255); _name[255] = '\0'; } } void set_max_work_item_sizes(const sycl::id<3> max_work_item_sizes) { _max_work_item_sizes = max_work_item_sizes; for (int i = 0; i < 3; ++i) _max_work_item_sizes_i[i] = max_work_item_sizes[i]; } void set_host_unified_memory(bool host_unified_memory) { _host_unified_memory = host_unified_memory; } void set_major_version(int major) { _major = major; } void set_minor_version(int minor) { _minor = minor; } void set_integrated(int integrated) { _integrated = integrated; } void set_max_clock_frequency(int frequency) { _frequency = frequency; } void set_max_compute_units(int max_compute_units) { _max_compute_units = max_compute_units; } void set_global_mem_size(size_t global_mem_size) { _global_mem_size = global_mem_size; } void set_local_mem_size(size_t local_mem_size) { _local_mem_size = local_mem_size; } void set_max_work_group_size(int max_work_group_size) { _max_work_group_size = max_work_group_size; } void set_max_sub_group_size(int max_sub_group_size) { _max_sub_group_size = max_sub_group_size; } void set_max_work_items_per_compute_unit(int max_work_items_per_compute_unit) { _max_work_items_per_compute_unit = max_work_items_per_compute_unit; } void set_max_nd_range_size(int max_nd_range_size[]) { for (int i = 0; i < 3; i++) { _max_nd_range_size[i] = max_nd_range_size[i]; _max_nd_range_size_i[i] = max_nd_range_size[i]; } } void set_memory_clock_rate(unsigned int memory_clock_rate) { _memory_clock_rate = memory_clock_rate; } void set_memory_bus_width(unsigned int memory_bus_width) { _memory_bus_width = memory_bus_width; } void set_max_register_size_per_work_group(int max_register_size_per_work_group) { _max_register_size_per_work_group = max_register_size_per_work_group; } void set_device_id(uint32_t device_id) { _device_id = device_id; } void set_uuid(std::array<unsigned char, 16> uuid) { _uuid = std::move(uuid); } private: char _name[256]; sycl::id<3> _max_work_item_sizes; int _max_work_item_sizes_i[3]; bool _host_unified_memory = false; int _major; int _minor; int _integrated = 0; int _frequency; // Set estimated value 3200000 kHz as default value. unsigned int _memory_clock_rate = 3200000; // Set estimated value 64 bits as default value. unsigned int _memory_bus_width = 64; int _max_compute_units; int _max_work_group_size; int _max_sub_group_size; int _max_work_items_per_compute_unit; int _max_register_size_per_work_group; size_t _global_mem_size; size_t _local_mem_size; size_t _max_nd_range_size[3]; int _max_nd_range_size_i[3]; uint32_t _device_id; std::array<unsigned char, 16> _uuid; }; /// dpct device extension class device_ext : public sycl::device { typedef std::mutex mutex_type; public: device_ext() : sycl::device(), _ctx(*this) {} ~device_ext() { std::lock_guard<mutex_type> lock(m_mutex); clear_queues(); } device_ext(const sycl::device &base) : sycl::device(base), _ctx(*this) { std::lock_guard<mutex_type> lock(m_mutex); init_queues(); } int is_native_atomic_supported() { return 0; } int get_major_version() const { int major, minor; get_version(major, minor); return major; } int get_minor_version() const { int major, minor; get_version(major, minor); return minor; } int get_max_compute_units() const { return get_device_info().get_max_compute_units(); } /// Return the maximum clock frequency of this device in KHz. int get_max_clock_frequency() const { return get_device_info().get_max_clock_frequency(); } int get_integrated() const { return get_device_info().get_integrated(); } int get_max_sub_group_size() const { return get_device_info().get_max_sub_group_size(); } int get_max_register_size_per_work_group() const { return get_device_info().get_max_register_size_per_work_group(); } int get_max_work_group_size() const { return get_device_info().get_max_work_group_size(); } int get_mem_base_addr_align() const { return get_info<sycl::info::device::mem_base_addr_align>(); } size_t get_global_mem_size() const { return get_device_info().get_global_mem_size(); } /// Get the number of bytes of free and total memory on the SYCL device. /// \param [out] free_memory The number of bytes of free memory on the SYCL device. /// \param [out] total_memory The number of bytes of total memory on the SYCL device. void get_memory_info(size_t &free_memory, size_t &total_memory) { #if (defined(__SYCL_COMPILER_VERSION) && __SYCL_COMPILER_VERSION >= 20221105) if (!has(sycl::aspect::ext_intel_free_memory)) { std::cerr << "get_memory_info: ext_intel_free_memory is not supported." << std::endl; free_memory = 0; } else { free_memory = get_info<sycl::ext::intel::info::device::free_memory>(); } #else std::cerr << "get_memory_info: ext_intel_free_memory is not supported." << std::endl; free_memory = 0; #if defined(_MSC_VER) && !defined(__clang__) #pragma message("Querying the number of bytes of free memory is not supported") #else #warning "Querying the number of bytes of free memory is not supported" #endif #endif total_memory = get_device_info().get_global_mem_size(); } void get_device_info(device_info &out) const { device_info prop; prop.set_name(get_info<sycl::info::device::name>().c_str()); int major, minor; get_version(major, minor); prop.set_major_version(major); prop.set_minor_version(minor); prop.set_max_work_item_sizes( #if (__SYCL_COMPILER_VERSION && __SYCL_COMPILER_VERSION<20220902) // oneAPI DPC++ compiler older than 2022/09/02, where max_work_item_sizes is an enum class element get_info<sycl::info::device::max_work_item_sizes>()); #else // SYCL 2020-conformant code, max_work_item_sizes is a struct templated by an int get_info<sycl::info::device::max_work_item_sizes<3>>()); #endif prop.set_host_unified_memory( this->has(sycl::aspect::usm_host_allocations)); prop.set_max_clock_frequency( get_info<sycl::info::device::max_clock_frequency>() * 1000); prop.set_max_compute_units( get_info<sycl::info::device::max_compute_units>()); prop.set_max_work_group_size( get_info<sycl::info::device::max_work_group_size>()); prop.set_global_mem_size( get_info<sycl::info::device::global_mem_size>()); prop.set_local_mem_size(get_info<sycl::info::device::local_mem_size>()); #if (defined(SYCL_EXT_INTEL_DEVICE_INFO) && SYCL_EXT_INTEL_DEVICE_INFO >= 6) if (this->has(sycl::aspect::ext_intel_memory_clock_rate)) { unsigned int tmp = this->get_info<sycl::ext::intel::info::device::memory_clock_rate>(); if (tmp != 0) prop.set_memory_clock_rate(1000 * tmp); } if (this->has(sycl::aspect::ext_intel_memory_bus_width)) { prop.set_memory_bus_width( this->get_info<sycl::ext::intel::info::device::memory_bus_width>()); } if (this->has(sycl::aspect::ext_intel_device_id)) { prop.set_device_id( this->get_info<sycl::ext::intel::info::device::device_id>()); } if (this->has(sycl::aspect::ext_intel_device_info_uuid)) { prop.set_uuid( this->get_info<sycl::ext::intel::info::device::uuid>()); } #elif defined(_MSC_VER) && !defined(__clang__) #pragma message("get_device_info: querying memory_clock_rate and \ memory_bus_width are not supported by the compiler used. \ Use 3200000 kHz as memory_clock_rate default value. \ Use 64 bits as memory_bus_width default value.") #else #warning "get_device_info: querying memory_clock_rate and \ memory_bus_width are not supported by the compiler used. \ Use 3200000 kHz as memory_clock_rate default value. \ Use 64 bits as memory_bus_width default value." #endif size_t max_sub_group_size = 1; std::vector<size_t> sub_group_sizes = get_info<sycl::info::device::sub_group_sizes>(); for (const auto &sub_group_size : sub_group_sizes) { if (max_sub_group_size < sub_group_size) max_sub_group_size = sub_group_size; } prop.set_max_sub_group_size(max_sub_group_size); prop.set_max_work_items_per_compute_unit( get_info<sycl::info::device::max_work_group_size>()); int max_nd_range_size[] = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF}; prop.set_max_nd_range_size(max_nd_range_size); // Estimates max register size per work group, feel free to update the value // according to device properties. prop.set_max_register_size_per_work_group(65536); out = prop; } device_info get_device_info() const { device_info prop; get_device_info(prop); return prop; } void reset() { std::lock_guard<mutex_type> lock(m_mutex); clear_queues(); init_queues(); } sycl::queue &in_order_queue() { return *_q_in_order; } sycl::queue &out_of_order_queue() { return *_q_out_of_order; } sycl::queue &default_queue() { #ifdef DPCT_USM_LEVEL_NONE return out_of_order_queue(); #else return in_order_queue(); #endif // DPCT_USM_LEVEL_NONE } void queues_wait_and_throw() { std::unique_lock<mutex_type> lock(m_mutex); std::vector<std::shared_ptr<sycl::queue>> current_queues( _queues); lock.unlock(); for (const auto &q : current_queues) { q->wait_and_throw(); } // Guard the destruct of current_queues to make sure the ref count is safe. lock.lock(); } sycl::queue *create_queue(bool enable_exception_handler = false) { #ifdef DPCT_USM_LEVEL_NONE return create_out_of_order_queue(enable_exception_handler); #else return create_in_order_queue(enable_exception_handler); #endif // DPCT_USM_LEVEL_NONE } sycl::queue *create_in_order_queue(bool enable_exception_handler = false) { std::lock_guard<mutex_type> lock(m_mutex); return create_queue_impl(enable_exception_handler, sycl::property::queue::in_order()); } sycl::queue *create_out_of_order_queue(bool enable_exception_handler = false) { std::lock_guard<mutex_type> lock(m_mutex); return create_queue_impl(enable_exception_handler); } void destroy_queue(sycl::queue *&queue) { std::lock_guard<mutex_type> lock(m_mutex); _queues.erase(std::remove_if(_queues.begin(), _queues.end(), [=](const std::shared_ptr<sycl::queue> &q) -> bool { return q.get() == queue; }), _queues.end()); queue = nullptr; } void set_saved_queue(sycl::queue* q) { std::lock_guard<mutex_type> lock(m_mutex); _saved_queue = q; } sycl::queue *get_saved_queue() const { std::lock_guard<mutex_type> lock(m_mutex); return _saved_queue; } sycl::context get_context() const { return _ctx; } private: void clear_queues() { _queues.clear(); _q_in_order = _q_out_of_order = _saved_queue = nullptr; } void init_queues() { _q_in_order = create_queue_impl(true, sycl::property::queue::in_order()); _q_out_of_order = create_queue_impl(true); _saved_queue = &default_queue(); } /// Caller should acquire resource \p m_mutex before calling this function. template <class... Properties> sycl::queue *create_queue_impl(bool enable_exception_handler, Properties... properties) { sycl::async_handler eh = {}; if (enable_exception_handler) { eh = exception_handler; } _queues.push_back(std::make_shared<sycl::queue>( _ctx, *this, eh, sycl::property_list( #ifdef DPCT_PROFILING_ENABLED sycl::property::queue::enable_profiling(), #endif properties...))); return _queues.back().get(); } void get_version(int &major, int &minor) const { // Version string has the following format: // a. OpenCL<space><major.minor><space><vendor-specific-information> // b. <major.minor> std::string ver; ver = get_info<sycl::info::device::version>(); std::string::size_type i = 0; while (i < ver.size()) { if (isdigit(ver[i])) break; i++; } major = std::stoi(&(ver[i])); while (i < ver.size()) { if (ver[i] == '.') break; i++; } i++; minor = std::stoi(&(ver[i])); } sycl::queue *_q_in_order, *_q_out_of_order; sycl::queue *_saved_queue; sycl::context _ctx; std::vector<std::shared_ptr<sycl::queue>> _queues; mutable mutex_type m_mutex; }; static inline unsigned int get_tid() { #if defined(__linux__) return syscall(SYS_gettid); #elif defined(_WIN64) return GetCurrentThreadId(); #else #error "Only support Windows and Linux." #endif } /// device manager class dev_mgr { public: device_ext &current_device() { unsigned int dev_id=current_device_id(); check_id(dev_id); return *_devs[dev_id]; } device_ext &cpu_device() const { std::lock_guard<std::recursive_mutex> lock(m_mutex); if (_cpu_device == -1) { throw std::runtime_error("no valid cpu device"); } else { return *_devs[_cpu_device]; } } device_ext &get_device(unsigned int id) const { std::lock_guard<std::recursive_mutex> lock(m_mutex); check_id(id); return *_devs[id]; } unsigned int current_device_id() const { std::lock_guard<std::recursive_mutex> lock(m_mutex); auto it=_thread2dev_map.find(get_tid()); if(it != _thread2dev_map.end()) return it->second; return DEFAULT_DEVICE_ID; } /// Select device with a device ID. /// \param [in] id The id of the device which can /// be obtained through get_device_id(const sycl::device). void select_device(unsigned int id) { std::lock_guard<std::recursive_mutex> lock(m_mutex); check_id(id); _thread2dev_map[get_tid()]=id; } unsigned int device_count() { return _devs.size(); } unsigned int get_device_id(const sycl::device &dev) { unsigned int id = 0; for(auto dev_item : _devs) { if (*dev_item == dev) { break; } id++; } return id; } template <class DeviceSelector> std::enable_if_t< std::is_invocable_r_v<int, DeviceSelector, const sycl::device &>> select_device(const DeviceSelector &selector = sycl::gpu_selector_v) { sycl::device selected_device = sycl::device(selector); unsigned int selected_device_id = get_device_id(selected_device); select_device(selected_device_id); } /// Returns the instance of device manager singleton. static dev_mgr &instance() { static dev_mgr d_m; return d_m; } dev_mgr(const dev_mgr &) = delete; dev_mgr &operator=(const dev_mgr &) = delete; dev_mgr(dev_mgr &&) = delete; dev_mgr &operator=(dev_mgr &&) = delete; private: mutable std::recursive_mutex m_mutex; dev_mgr() { sycl::device default_device = sycl::device(sycl::default_selector_v); _devs.push_back(std::make_shared<device_ext>(default_device)); std::vector<sycl::device> sycl_all_devs = sycl::device::get_devices(sycl::info::device_type::all); // Collect other devices except for the default device. if (default_device.is_cpu()) _cpu_device = 0; for (auto &dev : sycl_all_devs) { if (dev == default_device) { continue; } _devs.push_back(std::make_shared<device_ext>(dev)); if (_cpu_device == -1 && dev.is_cpu()) { _cpu_device = _devs.size() - 1; } } } void check_id(unsigned int id) const { if (id >= _devs.size()) { throw std::runtime_error("invalid device id"); } } std::vector<std::shared_ptr<device_ext>> _devs; /// DEFAULT_DEVICE_ID is used, if current_device_id() can not find current /// thread id in _thread2dev_map, which means default device should be used /// for the current thread. const unsigned int DEFAULT_DEVICE_ID = 0; /// thread-id to device-id map. std::map<unsigned int, unsigned int> _thread2dev_map; int _cpu_device = -1; }; /// Util function to get the default queue of current selected device depends on /// the USM config. Return the default out-of-ordered queue when USM-none is /// enabled, otherwise return the default in-ordered queue. static inline sycl::queue &get_default_queue() { return dev_mgr::instance().current_device().default_queue(); } /// Util function to get the default in-ordered queue of current device in /// dpct device manager. static inline sycl::queue &get_in_order_queue() { return dev_mgr::instance().current_device().in_order_queue(); } /// Util function to get the default out-of-ordered queue of current device in /// dpct device manager. static inline sycl::queue &get_out_of_order_queue() { return dev_mgr::instance().current_device().out_of_order_queue(); } /// Util function to get the id of current device in /// dpct device manager. static inline unsigned int get_current_device_id() { return dev_mgr::instance().current_device_id(); } /// Util function to get the current device. static inline device_ext &get_current_device() { return dev_mgr::instance().current_device(); } /// Util function to get a device by id. static inline device_ext &get_device(unsigned int id) { return dev_mgr::instance().get_device(id); } /// Util function to get the context of the default queue of current /// device in dpct device manager. static inline sycl::context get_default_context() { return dpct::get_current_device().get_context(); } /// Util function to get a CPU device. static inline device_ext &cpu_device() { return dev_mgr::instance().cpu_device(); } static inline unsigned int select_device(unsigned int id) { dev_mgr::instance().select_device(id); return id; } template <class DeviceSelector> static inline std::enable_if_t< std::is_invocable_r_v<int, DeviceSelector, const sycl::device &>> select_device(const DeviceSelector &selector = sycl::gpu_selector_v) { dev_mgr::instance().select_device(selector); } static inline unsigned int get_device_id(const sycl::device &dev){ return dev_mgr::instance().get_device_id(dev); } /// Util function to check whether a device supports some kinds of sycl::aspect. inline void has_capability_or_fail(const sycl::device &dev, const std::initializer_list<sycl::aspect> &props) { for (const auto &it : props) { if (dev.has(it)) continue; switch (it) { case sycl::aspect::fp64: throw std::runtime_error("'double' is not supported in '" + dev.get_info<sycl::info::device::name>() + "' device"); break; case sycl::aspect::fp16: throw std::runtime_error("'half' is not supported in '" + dev.get_info<sycl::info::device::name>() + "' device"); break; default: #define __SYCL_ASPECT(ASPECT, ID) \ case sycl::aspect::ASPECT: \ return #ASPECT; #define __SYCL_ASPECT_DEPRECATED(ASPECT, ID, MESSAGE) __SYCL_ASPECT(ASPECT, ID) #define __SYCL_ASPECT_DEPRECATED_ALIAS(ASPECT, ID, MESSAGE) auto getAspectNameStr = [](sycl::aspect AspectNum) -> std::string { switch (AspectNum) { #include <sycl/info/aspects.def> #include <sycl/info/aspects_deprecated.def> default: return "unknown aspect"; } }; #undef __SYCL_ASPECT_DEPRECATED_ALIAS #undef __SYCL_ASPECT_DEPRECATED #undef __SYCL_ASPECT throw std::runtime_error( "'" + getAspectNameStr(it) + "' is not supported in '" + dev.get_info<sycl::info::device::name>() + "' device"); } break; } } } // namespace dpct #endif // __DPCT_DEVICE_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/memory.hpp
//==---- memory.hpp -------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_MEMORY_HPP__ #define __DPCT_MEMORY_HPP__ #include "device.hpp" #include <sycl/sycl.hpp> #include <cassert> #include <cstdint> #include <cstring> #include <mutex> #include <unordered_map> #include <map> #include <utility> #include <thread> #include <type_traits> #if defined(__linux__) #include <sys/mman.h> #elif defined(_WIN64) #define NOMINMAX #include <windows.h> #else #error "Only support Windows and Linux." #endif namespace dpct { enum memcpy_direction { host_to_host, host_to_device, device_to_host, device_to_device, automatic }; enum memory_region { global = 0, // device global memory constant, // device constant memory local, // device local memory shared, // memory which can be accessed by host and device }; typedef uint8_t byte_t; /// Buffer type to be used in Memory Management runtime. typedef sycl::buffer<byte_t> buffer_t; /// Pitched 2D/3D memory data. class pitched_data { public: pitched_data() : pitched_data(nullptr, 0, 0, 0) {} pitched_data(void *data, size_t pitch, size_t x, size_t y) : _data(data), _pitch(pitch), _x(x), _y(y) {} void *get_data_ptr() { return _data; } void set_data_ptr(void *data) { _data = data; } size_t get_pitch() { return _pitch; } void set_pitch(size_t pitch) { _pitch = pitch; } size_t get_x() { return _x; } void set_x(size_t x) { _x = x; }; size_t get_y() { return _y; } void set_y(size_t y) { _y = y; } private: void *_data; size_t _pitch, _x, _y; }; namespace detail { class mem_mgr { mem_mgr() { // Reserved address space, no real memory allocation happens here. #if defined(__linux__) mapped_address_space = (byte_t *)mmap(nullptr, mapped_region_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); #elif defined(_WIN64) mapped_address_space = (byte_t *)VirtualAlloc( NULL, // NULL specified as the base address parameter mapped_region_size, // Size of allocation MEM_RESERVE, // Allocate reserved pages PAGE_NOACCESS); // Protection = no access #else #error "Only support Windows and Linux." #endif next_free = mapped_address_space; }; public: using buffer_id_t = int; struct allocation { buffer_t buffer; byte_t *alloc_ptr; size_t size; }; ~mem_mgr() { #if defined(__linux__) munmap(mapped_address_space, mapped_region_size); #elif defined(_WIN64) VirtualFree(mapped_address_space, 0, MEM_RELEASE); #else #error "Only support Windows and Linux." #endif }; mem_mgr(const mem_mgr &) = delete; mem_mgr &operator=(const mem_mgr &) = delete; mem_mgr(mem_mgr &&) = delete; mem_mgr &operator=(mem_mgr &&) = delete; /// Allocate void *mem_alloc(size_t size) { if (!size) return nullptr; std::lock_guard<std::mutex> lock(m_mutex); if (next_free + size > mapped_address_space + mapped_region_size) { throw std::runtime_error("dpct_malloc: out of memory for virtual memory pool"); } // Allocation sycl::range<1> r(size); buffer_t buf(r); allocation A{buf, next_free, size}; // Map allocation to device pointer void *result = next_free; m_map.emplace(next_free + size, A); // Update pointer to the next free space. next_free += (size + extra_padding + alignment - 1) & ~(alignment - 1); return result; } /// Deallocate void mem_free(const void *ptr) { if (!ptr) return; std::lock_guard<std::mutex> lock(m_mutex); auto it = get_map_iterator(ptr); m_map.erase(it); } /// map: device pointer -> allocation(buffer, alloc_ptr, size) allocation translate_ptr(const void *ptr) { std::lock_guard<std::mutex> lock(m_mutex); auto it = get_map_iterator(ptr); return it->second; } /// Check if the pointer represents device pointer or not. bool is_device_ptr(const void *ptr) const { std::lock_guard<std::mutex> lock(m_mutex); return (mapped_address_space <= ptr) && (ptr < mapped_address_space + mapped_region_size); } /// Returns the instance of memory manager singleton. static mem_mgr &instance() { static mem_mgr m; return m; } private: std::map<byte_t *, allocation> m_map; mutable std::mutex m_mutex; byte_t *mapped_address_space; byte_t *next_free; const size_t mapped_region_size = 128ull * 1024 * 1024 * 1024; const size_t alignment = 256; /// This padding may be defined to some positive value to debug /// out of bound accesses. const size_t extra_padding = 0; std::map<byte_t *, allocation>::iterator get_map_iterator(const void *ptr) { auto it = m_map.upper_bound((byte_t *)ptr); if (it == m_map.end()) { // Not a virtual pointer. throw std::runtime_error("can not get buffer from non-virtual pointer"); } const allocation &alloc = it->second; if (ptr < alloc.alloc_ptr) { // Out of bound. // This may happen if there's a gap between allocations due to alignment // or extra padding and pointer points to this gap. throw std::runtime_error("invalid virtual pointer"); } return it; } }; template <class T, memory_region Memory, size_t Dimension> class accessor; template <memory_region Memory, class T = byte_t> class memory_traits { public: static constexpr sycl::access::target target = sycl::access::target::device; static constexpr sycl::access_mode mode = (Memory == constant) ? sycl::access_mode::read : sycl::access_mode::read_write; static constexpr size_t type_size = sizeof(T); using element_t = typename std::conditional<Memory == constant, const T, T>::type; using value_t = typename std::remove_cv<T>::type; template <size_t Dimension = 1> using accessor_t = typename std::conditional< Memory == local, sycl::local_accessor<value_t, Dimension>, sycl::accessor<T, Dimension, mode, target>>::type; using pointer_t = T *; }; static inline void *dpct_malloc(size_t size, sycl::queue &q) { #ifdef DPCT_USM_LEVEL_NONE return mem_mgr::instance().mem_alloc(size * sizeof(byte_t)); #else return sycl::malloc_device(size, q.get_device(), q.get_context()); #endif // DPCT_USM_LEVEL_NONE } #define PITCH_DEFAULT_ALIGN(x) (((x) + 31) & ~(0x1F)) static inline void *dpct_malloc(size_t &pitch, size_t x, size_t y, size_t z, sycl::queue &q) { pitch = PITCH_DEFAULT_ALIGN(x); return dpct_malloc(pitch * y * z, q); } /// Set \p value to the first \p size bytes starting from \p dev_ptr in \p q. /// /// \param q The queue in which the operation is done. /// \param dev_ptr Pointer to the device memory address. /// \param value Value to be set. /// \param size Number of bytes to be set to the value. /// \returns An event representing the memset operation. static inline sycl::event dpct_memset(sycl::queue &q, void *dev_ptr, int value, size_t size) { #ifdef DPCT_USM_LEVEL_NONE auto &mm = mem_mgr::instance(); assert(mm.is_device_ptr(dev_ptr)); auto alloc = mm.translate_ptr(dev_ptr); size_t offset = (byte_t *)dev_ptr - alloc.alloc_ptr; return q.submit([&](sycl::handler &cgh) { auto r = sycl::range<1>(size); auto o = sycl::id<1>(offset); sycl::accessor<byte_t, 1, sycl::access_mode::write, sycl::access::target::device> acc(alloc.buffer, cgh, r, o); cgh.fill(acc, (byte_t)value); }); #else return q.memset(dev_ptr, value, size); #endif // DPCT_USM_LEVEL_NONE } /// Set \p value to the 3D memory region pointed by \p data in \p q. \p size /// specifies the 3D memory size to set. /// /// \param q The queue in which the operation is done. /// \param data Pointer to the device memory region. /// \param value Value to be set. /// \param size Memory region size. /// \returns An event list representing the memset operations. static inline std::vector<sycl::event> dpct_memset(sycl::queue &q, pitched_data data, int value, sycl::range<3> size) { std::vector<sycl::event> event_list; size_t slice = data.get_pitch() * data.get_y(); unsigned char *data_surface = (unsigned char *)data.get_data_ptr(); for (size_t z = 0; z < size.get(2); ++z) { unsigned char *data_ptr = data_surface; for (size_t y = 0; y < size.get(1); ++y) { event_list.push_back(dpct_memset(q, data_ptr, value, size.get(0))); data_ptr += data.get_pitch(); } data_surface += slice; } return event_list; } /// memset 2D matrix with pitch. static inline std::vector<sycl::event> dpct_memset(sycl::queue &q, void *ptr, size_t pitch, int val, size_t x, size_t y) { return dpct_memset(q, pitched_data(ptr, pitch, x, 1), val, sycl::range<3>(x, y, 1)); } enum class pointer_access_attribute { host_only = 0, device_only, host_device, end }; static pointer_access_attribute get_pointer_attribute(sycl::queue &q, const void *ptr) { #ifdef DPCT_USM_LEVEL_NONE return mem_mgr::instance().is_device_ptr(ptr) ? pointer_access_attribute::device_only : pointer_access_attribute::host_only; #else switch (sycl::get_pointer_type(ptr, q.get_context())) { case sycl::usm::alloc::unknown: return pointer_access_attribute::host_only; case sycl::usm::alloc::device: return pointer_access_attribute::device_only; case sycl::usm::alloc::shared: case sycl::usm::alloc::host: return pointer_access_attribute::host_device; } #endif } static memcpy_direction deduce_memcpy_direction(sycl::queue &q, void *to_ptr, const void *from_ptr, memcpy_direction dir) { switch (dir) { case memcpy_direction::host_to_host: case memcpy_direction::host_to_device: case memcpy_direction::device_to_host: case memcpy_direction::device_to_device: return dir; case memcpy_direction::automatic: { // table[to_attribute][from_attribute] static const memcpy_direction direction_table[static_cast<unsigned>(pointer_access_attribute::end)] [static_cast<unsigned>(pointer_access_attribute::end)] = {{memcpy_direction::host_to_host, memcpy_direction::device_to_host, memcpy_direction::host_to_host}, {memcpy_direction::host_to_device, memcpy_direction::device_to_device, memcpy_direction::device_to_device}, {memcpy_direction::host_to_host, memcpy_direction::device_to_device, memcpy_direction::device_to_device}}; return direction_table[static_cast<unsigned>(get_pointer_attribute( q, to_ptr))][static_cast<unsigned>(get_pointer_attribute(q, from_ptr))]; } default: throw std::runtime_error("dpct_memcpy: invalid direction value"); } } static sycl::event dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, size_t size, memcpy_direction direction, const std::vector<sycl::event> &dep_events = {}) { if (!size) return sycl::event{}; #ifdef DPCT_USM_LEVEL_NONE auto &mm = mem_mgr::instance(); auto real_direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction); switch (real_direction) { case host_to_host: return q.submit([&](sycl::handler &cgh) { cgh.depends_on(dep_events); cgh.host_task([=] { std::memcpy(to_ptr, from_ptr, size); }); }); case host_to_device: { auto alloc = mm.translate_ptr(to_ptr); size_t offset = (byte_t *)to_ptr - alloc.alloc_ptr; return q.submit([&](sycl::handler &cgh) { cgh.depends_on(dep_events); auto r = sycl::range<1>(size); auto o = sycl::id<1>(offset); sycl::accessor<byte_t, 1, sycl::access_mode::write, sycl::access::target::device> acc(alloc.buffer, cgh, r, o); cgh.copy(from_ptr, acc); }); } case device_to_host: { auto alloc = mm.translate_ptr(from_ptr); size_t offset = (byte_t *)from_ptr - alloc.alloc_ptr; return q.submit([&](sycl::handler &cgh) { cgh.depends_on(dep_events); auto r = sycl::range<1>(size); auto o = sycl::id<1>(offset); sycl::accessor<byte_t, 1, sycl::access_mode::read, sycl::access::target::device> acc(alloc.buffer, cgh, r, o); cgh.copy(acc, to_ptr); }); } case device_to_device: { auto to_alloc = mm.translate_ptr(to_ptr); auto from_alloc = mm.translate_ptr(from_ptr); size_t to_offset = (byte_t *)to_ptr - to_alloc.alloc_ptr; size_t from_offset = (byte_t *)from_ptr - from_alloc.alloc_ptr; return q.submit([&](sycl::handler &cgh) { cgh.depends_on(dep_events); auto r = sycl::range<1>(size); auto to_o = sycl::id<1>(to_offset); auto from_o = sycl::id<1>(from_offset); sycl::accessor<byte_t, 1, sycl::access_mode::write, sycl::access::target::device> to_acc(to_alloc.buffer, cgh, r, to_o); sycl::accessor<byte_t, 1, sycl::access_mode::read, sycl::access::target::device> from_acc(from_alloc.buffer, cgh, r, from_o); cgh.copy(from_acc, to_acc); }); } default: throw std::runtime_error("dpct_memcpy: invalid direction value"); } #else return q.memcpy(to_ptr, from_ptr, size, dep_events); #endif // DPCT_USM_LEVEL_NONE } // Get actual copy range and make sure it will not exceed range. static inline size_t get_copy_range(sycl::range<3> size, size_t slice, size_t pitch) { return slice * (size.get(2) - 1) + pitch * (size.get(1) - 1) + size.get(0); } static inline size_t get_offset(sycl::id<3> id, size_t slice, size_t pitch) { return slice * id.get(2) + pitch * id.get(1) + id.get(0); } /// copy 3D matrix specified by \p size from 3D matrix specified by \p from_ptr /// and \p from_range to another specified by \p to_ptr and \p to_range. static inline std::vector<sycl::event> dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, sycl::range<3> to_range, sycl::range<3> from_range, sycl::id<3> to_id, sycl::id<3> from_id, sycl::range<3> size, memcpy_direction direction, const std::vector<sycl::event> &dep_events = {}) { // RAII for host pointer class host_buffer { void *_buf; size_t _size; sycl::queue &_q; const std::vector<sycl::event> &_deps; // free operation depends public: host_buffer(size_t size, sycl::queue &q, const std::vector<sycl::event> &deps) : _buf(std::malloc(size)), _size(size), _q(q), _deps(deps) {} void *get_ptr() const { return _buf; } size_t get_size() const { return _size; } ~host_buffer() { if (_buf) { _q.submit([&](sycl::handler &cgh) { cgh.depends_on(_deps); cgh.host_task([buf = _buf] { std::free(buf); }); }); } } }; std::vector<sycl::event> event_list; size_t to_slice = to_range.get(1) * to_range.get(0), from_slice = from_range.get(1) * from_range.get(0); unsigned char *to_surface = (unsigned char *)to_ptr + get_offset(to_id, to_slice, to_range.get(0)); const unsigned char *from_surface = (const unsigned char *)from_ptr + get_offset(from_id, from_slice, from_range.get(0)); if (to_slice == from_slice && to_slice == size.get(1) * size.get(0)) { return {dpct_memcpy(q, to_surface, from_surface, to_slice * size.get(2), direction, dep_events)}; } direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction); size_t size_slice = size.get(1) * size.get(0); switch (direction) { case host_to_host: for (size_t z = 0; z < size.get(2); ++z) { unsigned char *to_ptr = to_surface; const unsigned char *from_ptr = from_surface; if (to_range.get(0) == from_range.get(0) && to_range.get(0) == size.get(0)) { event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size_slice, direction, dep_events)); } else { for (size_t y = 0; y < size.get(1); ++y) { event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size.get(0), direction, dep_events)); to_ptr += to_range.get(0); from_ptr += from_range.get(0); } } to_surface += to_slice; from_surface += from_slice; } break; case host_to_device: { host_buffer buf(get_copy_range(size, to_slice, to_range.get(0)), q, event_list); std::vector<sycl::event> host_events; if (to_slice == size_slice) { // Copy host data to a temp host buffer with the shape of target. host_events = dpct_memcpy(q, buf.get_ptr(), from_surface, to_range, from_range, sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host, dep_events); } else { // Copy host data to a temp host buffer with the shape of target. host_events = dpct_memcpy( q, buf.get_ptr(), from_surface, to_range, from_range, sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host, // If has padding data, not sure whether it is useless. So fill temp // buffer with it. std::vector<sycl::event>{ dpct_memcpy(q, buf.get_ptr(), to_surface, buf.get_size(), device_to_host, dep_events)}); } // Copy from temp host buffer to device with only one submit. event_list.push_back(dpct_memcpy(q, to_surface, buf.get_ptr(), buf.get_size(), host_to_device, host_events)); break; } case device_to_host: { host_buffer buf(get_copy_range(size, from_slice, from_range.get(0)), q, event_list); // Copy from host temp buffer to host target with reshaping. event_list = dpct_memcpy( q, to_surface, buf.get_ptr(), to_range, from_range, sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host, // Copy from device to temp host buffer with only one submit. std::vector<sycl::event>{dpct_memcpy(q, buf.get_ptr(), from_surface, buf.get_size(), device_to_host, dep_events)}); break; } case device_to_device: #ifdef DPCT_USM_LEVEL_NONE { auto &mm = mem_mgr::instance(); auto to_alloc = mm.translate_ptr(to_surface); auto from_alloc = mm.translate_ptr(from_surface); size_t to_offset = (byte_t *)to_surface - to_alloc.alloc_ptr; size_t from_offset = (byte_t *)from_surface - from_alloc.alloc_ptr; event_list.push_back(q.submit([&](sycl::handler &cgh) { cgh.depends_on(dep_events); auto to_o = sycl::id<1>(to_offset); auto from_o = sycl::id<1>(from_offset); sycl::accessor<byte_t, 1, sycl::access_mode::write, sycl::access::target::device> to_acc(to_alloc.buffer, cgh, get_copy_range(size, to_slice, to_range.get(0)), to_o); sycl::accessor<byte_t, 1, sycl::access_mode::read, sycl::access::target::device> from_acc(from_alloc.buffer, cgh, get_copy_range(size, from_slice, from_range.get(0)), from_o); cgh.parallel_for<class dpct_memcpy_3d_detail_usmnone>( size, [=](sycl::id<3> id) { to_acc[get_offset(id, to_slice, to_range.get(0))] = from_acc[get_offset(id, from_slice, from_range.get(0))]; }); })); } #else event_list.push_back(q.submit([&](sycl::handler &cgh) { cgh.depends_on(dep_events); cgh.parallel_for<class dpct_memcpy_3d_detail>( size, [=](sycl::id<3> id) { to_surface[get_offset(id, to_slice, to_range.get(0))] = from_surface[get_offset(id, from_slice, from_range.get(0))]; }); })); #endif break; default: throw std::runtime_error("dpct_memcpy: invalid direction value"); } return event_list; } /// memcpy 2D/3D matrix specified by pitched_data. static inline std::vector<sycl::event> dpct_memcpy(sycl::queue &q, pitched_data to, sycl::id<3> to_id, pitched_data from, sycl::id<3> from_id, sycl::range<3> size, memcpy_direction direction = automatic) { return dpct_memcpy(q, to.get_data_ptr(), from.get_data_ptr(), sycl::range<3>(to.get_pitch(), to.get_y(), 1), sycl::range<3>(from.get_pitch(), from.get_y(), 1), to_id, from_id, size, direction); } /// memcpy 2D matrix with pitch. static inline std::vector<sycl::event> dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, size_t to_pitch, size_t from_pitch, size_t x, size_t y, memcpy_direction direction = automatic) { return dpct_memcpy(q, to_ptr, from_ptr, sycl::range<3>(to_pitch, y, 1), sycl::range<3>(from_pitch, y, 1), sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), sycl::range<3>(x, y, 1), direction); } namespace deprecated { template <typename T, sycl::usm::alloc AllocKind> class usm_allocator { private: using Alloc = sycl::usm_allocator<T, AllocKind>; Alloc _impl; public: using value_type = typename std::allocator_traits<Alloc>::value_type; using pointer = typename std::allocator_traits<Alloc>::pointer; using const_pointer = typename std::allocator_traits<Alloc>::const_pointer; using void_pointer = typename std::allocator_traits<Alloc>::void_pointer; using const_void_pointer = typename std::allocator_traits<Alloc>::const_void_pointer; using reference = typename std::allocator_traits<Alloc>::value_type &; using const_reference = const typename std::allocator_traits<Alloc>::value_type &; using difference_type = typename std::allocator_traits<Alloc>::difference_type; using size_type = typename std::allocator_traits<Alloc>::size_type; using propagate_on_container_copy_assignment = typename std::allocator_traits< Alloc>::propagate_on_container_copy_assignment; using propagate_on_container_move_assignment = typename std::allocator_traits< Alloc>::propagate_on_container_move_assignment; using propagate_on_container_swap = typename std::allocator_traits<Alloc>::propagate_on_container_swap; using is_always_equal = typename std::allocator_traits<Alloc>::is_always_equal; template <typename U> struct rebind { typedef usm_allocator<U, AllocKind> other; }; usm_allocator() : _impl(dpct::get_default_queue()) {} ~usm_allocator() {} usm_allocator(const usm_allocator &other) : _impl(other._impl) {} usm_allocator(usm_allocator &&other) : _impl(std::move(other._impl)) {} pointer address(reference r) { return &r; } const_pointer address(const_reference r) { return &r; } pointer allocate(size_type cnt, const_void_pointer hint = nullptr) { return std::allocator_traits<Alloc>::allocate(_impl, cnt, hint); } void deallocate(pointer p, size_type cnt) { std::allocator_traits<Alloc>::deallocate(_impl, p, cnt); } size_type max_size() const { return std::allocator_traits<Alloc>::max_size(_impl); } bool operator==(const usm_allocator &other) const { return _impl == other._impl; } bool operator!=(const usm_allocator &other) const { return _impl != other._impl; } }; } // namespace deprecated inline void dpct_free(void *ptr, const sycl::queue &q) { if (ptr) { #ifdef DPCT_USM_LEVEL_NONE detail::mem_mgr::instance().mem_free(ptr); #else sycl::free(ptr, q.get_context()); #endif // DPCT_USM_LEVEL_NONE } } } // namespace detail #ifdef DPCT_USM_LEVEL_NONE /// Check if the pointer \p ptr represents device pointer or not. /// /// \param ptr The pointer to be checked. /// \returns true if \p ptr is a device pointer. template<class T> static inline bool is_device_ptr(T ptr) { if constexpr (std::is_pointer<T>::value) { return detail::mem_mgr::instance().is_device_ptr(ptr); } return false; } #endif /// Get the buffer and the offset of a piece of memory pointed to by \p ptr. /// /// \param ptr Pointer to a piece of memory. /// If NULL is passed as an argument, an exception will be thrown. /// \returns a pair containing both the buffer and the offset. static std::pair<buffer_t, size_t> get_buffer_and_offset(const void *ptr) { if (ptr) { auto alloc = detail::mem_mgr::instance().translate_ptr(ptr); size_t offset = (byte_t *)ptr - alloc.alloc_ptr; return std::make_pair(alloc.buffer, offset); } else { throw std::runtime_error( "NULL pointer argument in get_buffer_and_offset function is invalid"); } } /// Get the data pointed from \p ptr as a 1D buffer reinterpreted as type T. template <typename T> static sycl::buffer<T> get_buffer(const void *ptr) { if (!ptr) return sycl::buffer<T>(sycl::range<1>(0)); auto alloc = detail::mem_mgr::instance().translate_ptr(ptr); return alloc.buffer.reinterpret<T>( sycl::range<1>(alloc.size / sizeof(T))); } /// Get the buffer of a piece of memory pointed to by \p ptr. /// /// \param ptr Pointer to a piece of memory. /// \returns the buffer. static buffer_t get_buffer(const void *ptr) { return detail::mem_mgr::instance().translate_ptr(ptr).buffer; } /// A wrapper class contains an accessor and an offset. template <typename dataT, sycl::access_mode accessMode = sycl::access_mode::read_write> class access_wrapper { sycl::accessor<byte_t, 1, accessMode> accessor; size_t offset; public: /// Construct the accessor wrapper for memory pointed by \p ptr. /// /// \param ptr Pointer to memory. /// \param cgh The command group handler. access_wrapper(const void *ptr, sycl::handler &cgh) : accessor(get_buffer(ptr).get_access<accessMode>(cgh)), offset(0) { auto alloc = detail::mem_mgr::instance().translate_ptr(ptr); offset = (byte_t *)ptr - alloc.alloc_ptr; } /// Get the device pointer. /// /// \returns a device pointer with offset. dataT get_raw_pointer() const { return (dataT)(&accessor[0] + offset); } }; /// Get the accessor for memory pointed by \p ptr. /// /// \param ptr Pointer to memory. /// If NULL is passed as an argument, an exception will be thrown. /// \param cgh The command group handler. /// \returns an accessor. template <sycl::access_mode accessMode = sycl::access_mode::read_write> static sycl::accessor<byte_t, 1, accessMode> get_access(const void *ptr, sycl::handler &cgh) { if (ptr) { auto alloc = detail::mem_mgr::instance().translate_ptr(ptr); return alloc.buffer.get_access<accessMode>(cgh); } else { throw std::runtime_error( "NULL pointer argument in get_access function is invalid"); } } /// Allocate memory block on the device. /// \param num_bytes Number of bytes to allocate. /// \param q Queue to execute the allocate task. /// \returns A pointer to the newly allocated memory. template <typename T> static inline void *dpct_malloc(T num_bytes, sycl::queue &q = get_default_queue()) { return detail::dpct_malloc(static_cast<size_t>(num_bytes), q); } /// Get the host pointer from a buffer that is mapped to virtual pointer ptr. /// \param ptr Virtual Pointer mapped to device buffer /// \returns A host pointer template <typename T> static inline T *get_host_ptr(const void *ptr) { auto BufferOffset = get_buffer_and_offset(ptr); auto host_ptr = BufferOffset.first.get_host_access() .get_pointer(); return (T *)(host_ptr + BufferOffset.second); } /// Allocate memory block for 3D array on the device. /// \param size Size of the memory block, in bytes. /// \param q Queue to execute the allocate task. /// \returns A pitched_data object which stores the memory info. static inline pitched_data dpct_malloc(sycl::range<3> size, sycl::queue &q = get_default_queue()) { pitched_data pitch(nullptr, 0, size.get(0), size.get(1)); size_t pitch_size; pitch.set_data_ptr(detail::dpct_malloc(pitch_size, size.get(0), size.get(1), size.get(2), q)); pitch.set_pitch(pitch_size); return pitch; } /// Allocate memory block for 2D array on the device. /// \param [out] pitch Aligned size of x in bytes. /// \param x Range in dim x. /// \param y Range in dim y. /// \param q Queue to execute the allocate task. /// \returns A pointer to the newly allocated memory. static inline void *dpct_malloc(size_t &pitch, size_t x, size_t y, sycl::queue &q = get_default_queue()) { return detail::dpct_malloc(pitch, x, y, 1, q); } /// free /// \param ptr Point to free. /// \param q Queue to execute the free task. /// \returns no return value. static inline void dpct_free(void *ptr, sycl::queue &q = get_default_queue()) { detail::dpct_free(ptr, q); } /// Free the device memory pointed by a batch of pointers in \p pointers which /// are related to \p q after \p events completed. /// /// \param pointers The pointers point to the device memory requested to be freed. /// \param events The events to be waited. /// \param q The sycl::queue the memory relates to. inline void async_dpct_free(const std::vector<void *> &pointers, const std::vector<sycl::event> &events, sycl::queue &q = get_default_queue()) { q.submit([&](sycl::handler &cgh) { cgh.depends_on(events); cgh.host_task([=] { for (auto p : pointers) if (p) { detail::dpct_free(p, q); } }); }); } /// Synchronously copies \p size bytes from the address specified by \p from_ptr /// to the address specified by \p to_ptr. The value of \p direction is used to /// set the copy direction, it can be \a host_to_host, \a host_to_device, /// \a device_to_host, \a device_to_device or \a automatic. The function will /// return after the copy is completed. /// /// \param to_ptr Pointer to destination memory address. /// \param from_ptr Pointer to source memory address. /// \param size Number of bytes to be copied. /// \param direction Direction of the copy. /// \param q Queue to execute the copy task. /// \returns no return value. static void dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size, memcpy_direction direction = automatic, sycl::queue &q = get_default_queue()) { detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction).wait(); } /// Asynchronously copies \p size bytes from the address specified by \p /// from_ptr to the address specified by \p to_ptr. The value of \p direction is /// used to set the copy direction, it can be \a host_to_host, \a /// host_to_device, \a device_to_host, \a device_to_device or \a automatic. The /// return of the function does NOT guarantee the copy is completed. /// /// \param to_ptr Pointer to destination memory address. /// \param from_ptr Pointer to source memory address. /// \param size Number of bytes to be copied. /// \param direction Direction of the copy. /// \param q Queue to execute the copy task. /// \returns no return value. static void async_dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size, memcpy_direction direction = automatic, sycl::queue &q = dpct::get_default_queue()) { detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction); } /// Synchronously copies 2D matrix specified by \p x and \p y from the address /// specified by \p from_ptr to the address specified by \p to_ptr, while \p /// from_pitch and \p to_pitch are the range of dim x in bytes of the matrix /// specified by \p from_ptr and \p to_ptr. The value of \p direction is used to /// set the copy direction, it can be \a host_to_host, \a host_to_device, \a /// device_to_host, \a device_to_device or \a automatic. The function will /// return after the copy is completed. /// /// \param to_ptr Pointer to destination memory address. /// \param to_pitch Range of dim x in bytes of destination matrix. /// \param from_ptr Pointer to source memory address. /// \param from_pitch Range of dim x in bytes of source matrix. /// \param x Range of dim x of matrix to be copied. /// \param y Range of dim y of matrix to be copied. /// \param direction Direction of the copy. /// \param q Queue to execute the copy task. /// \returns no return value. static inline void dpct_memcpy(void *to_ptr, size_t to_pitch, const void *from_ptr, size_t from_pitch, size_t x, size_t y, memcpy_direction direction = automatic, sycl::queue &q = dpct::get_default_queue()) { sycl::event::wait(detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch, from_pitch, x, y, direction)); } /// Asynchronously copies 2D matrix specified by \p x and \p y from the address /// specified by \p from_ptr to the address specified by \p to_ptr, while \p /// \p from_pitch and \p to_pitch are the range of dim x in bytes of the matrix /// specified by \p from_ptr and \p to_ptr. The value of \p direction is used to /// set the copy direction, it can be \a host_to_host, \a host_to_device, \a /// device_to_host, \a device_to_device or \a automatic. The return of the /// function does NOT guarantee the copy is completed. /// /// \param to_ptr Pointer to destination memory address. /// \param to_pitch Range of dim x in bytes of destination matrix. /// \param from_ptr Pointer to source memory address. /// \param from_pitch Range of dim x in bytes of source matrix. /// \param x Range of dim x of matrix to be copied. /// \param y Range of dim y of matrix to be copied. /// \param direction Direction of the copy. /// \param q Queue to execute the copy task. /// \returns no return value. static inline void async_dpct_memcpy(void *to_ptr, size_t to_pitch, const void *from_ptr, size_t from_pitch, size_t x, size_t y, memcpy_direction direction = automatic, sycl::queue &q = get_default_queue()) { detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch, from_pitch, x, y, direction); } /// Synchronously copies a subset of a 3D matrix specified by \p to to another /// 3D matrix specified by \p from. The from and to position info are specified /// by \p from_pos and \p to_pos The copied matrix size is specified by \p size. /// The value of \p direction is used to set the copy direction, it can be \a /// host_to_host, \a host_to_device, \a device_to_host, \a device_to_device or /// \a automatic. The function will return after the copy is completed. /// /// \param to Destination matrix info. /// \param to_pos Position of destination. /// \param from Source matrix info. /// \param from_pos Position of destination. /// \param size Range of the submatrix to be copied. /// \param direction Direction of the copy. /// \param q Queue to execute the copy task. /// \returns no return value. static inline void dpct_memcpy(pitched_data to, sycl::id<3> to_pos, pitched_data from, sycl::id<3> from_pos, sycl::range<3> size, memcpy_direction direction = automatic, sycl::queue &q = dpct::get_default_queue()) { sycl::event::wait( detail::dpct_memcpy(q, to, to_pos, from, from_pos, size, direction)); } /// Asynchronously copies a subset of a 3D matrix specified by \p to to another /// 3D matrix specified by \p from. The from and to position info are specified /// by \p from_pos and \p to_pos The copied matrix size is specified by \p size. /// The value of \p direction is used to set the copy direction, it can be \a /// host_to_host, \a host_to_device, \a device_to_host, \a device_to_device or /// \a automatic. The return of the function does NOT guarantee the copy is /// completed. /// /// \param to Destination matrix info. /// \param to_pos Position of destination. /// \param from Source matrix info. /// \param from_pos Position of destination. /// \param size Range of the submatrix to be copied. /// \param direction Direction of the copy. /// \param q Queue to execute the copy task. /// \returns no return value. static inline void async_dpct_memcpy(pitched_data to, sycl::id<3> to_pos, pitched_data from, sycl::id<3> from_pos, sycl::range<3> size, memcpy_direction direction = automatic, sycl::queue &q = get_default_queue()) { detail::dpct_memcpy(q, to, to_pos, from, from_pos, size, direction); } /// Synchronously sets \p value to the first \p size bytes starting from \p /// dev_ptr. The function will return after the memset operation is completed. /// /// \param dev_ptr Pointer to the device memory address. /// \param value Value to be set. /// \param size Number of bytes to be set to the value. /// \param q The queue in which the operation is done. /// \returns no return value. static void dpct_memset(void *dev_ptr, int value, size_t size, sycl::queue &q = get_default_queue()) { detail::dpct_memset(q, dev_ptr, value, size).wait(); } /// Asynchronously sets \p value to the first \p size bytes starting from \p /// dev_ptr. The return of the function does NOT guarantee the memset operation /// is completed. /// /// \param dev_ptr Pointer to the device memory address. /// \param value Value to be set. /// \param size Number of bytes to be set to the value. /// \returns no return value. static void async_dpct_memset(void *dev_ptr, int value, size_t size, sycl::queue &q = dpct::get_default_queue()) { detail::dpct_memset(q, dev_ptr, value, size); } /// Sets \p value to the 2D memory region pointed by \p ptr in \p q. \p x and /// \p y specify the setted 2D memory size. \p pitch is the bytes in linear /// dimension, including padding bytes. The function will return after the /// memset operation is completed. /// /// \param ptr Pointer to the device memory region. /// \param pitch Bytes in linear dimension, including padding bytes. /// \param value Value to be set. /// \param x The setted memory size in linear dimension. /// \param y The setted memory size in second dimension. /// \param q The queue in which the operation is done. /// \returns no return value. static inline void dpct_memset(void *ptr, size_t pitch, int val, size_t x, size_t y, sycl::queue &q = get_default_queue()) { sycl::event::wait(detail::dpct_memset(q, ptr, pitch, val, x, y)); } /// Sets \p value to the 2D memory region pointed by \p ptr in \p q. \p x and /// \p y specify the setted 2D memory size. \p pitch is the bytes in linear /// dimension, including padding bytes. The return of the function does NOT /// guarantee the memset operation is completed. /// /// \param ptr Pointer to the device memory region. /// \param pitch Bytes in linear dimension, including padding bytes. /// \param value Value to be set. /// \param x The setted memory size in linear dimension. /// \param y The setted memory size in second dimension. /// \param q The queue in which the operation is done. /// \returns no return value. static inline void async_dpct_memset(void *ptr, size_t pitch, int val, size_t x, size_t y, sycl::queue &q = get_default_queue()) { detail::dpct_memset(q, ptr, pitch, val, x, y); } /// Sets \p value to the 3D memory region specified by \p pitch in \p q. \p size /// specify the setted 3D memory size. The function will return after the /// memset operation is completed. /// /// \param pitch Specify the 3D memory region. /// \param value Value to be set. /// \param size The setted 3D memory size. /// \param q The queue in which the operation is done. /// \returns no return value. static inline void dpct_memset(pitched_data pitch, int val, sycl::range<3> size, sycl::queue &q = get_default_queue()) { sycl::event::wait(detail::dpct_memset(q, pitch, val, size)); } /// Sets \p value to the 3D memory region specified by \p pitch in \p q. \p size /// specify the setted 3D memory size. The return of the function does NOT /// guarantee the memset operation is completed. /// /// \param pitch Specify the 3D memory region. /// \param value Value to be set. /// \param size The setted 3D memory size. /// \param q The queue in which the operation is done. /// \returns no return value. static inline void async_dpct_memset(pitched_data pitch, int val, sycl::range<3> size, sycl::queue &q = get_default_queue()) { detail::dpct_memset(q, pitch, val, size); } /// dpct accessor used as device function parameter. template <class T, memory_region Memory, size_t Dimension> class accessor; template <class T, memory_region Memory> class accessor<T, Memory, 3> { public: using memory_t = detail::memory_traits<Memory, T>; using element_t = typename memory_t::element_t; using pointer_t = typename memory_t::pointer_t; using accessor_t = typename memory_t::template accessor_t<3>; accessor(pointer_t data, const sycl::range<3> &in_range) : _data(data), _range(in_range) {} template <memory_region M = Memory> accessor(typename std::enable_if<M != local, const accessor_t>::type &acc) : accessor(acc, acc.get_range()) {} accessor(const accessor_t &acc, const sycl::range<3> &in_range) : accessor(acc.get_pointer(), in_range) {} accessor<T, Memory, 2> operator[](size_t index) const { sycl::range<2> sub(_range.get(1), _range.get(2)); return accessor<T, Memory, 2>(_data + index * sub.size(), sub); } pointer_t get_ptr() const { return _data; } private: pointer_t _data; sycl::range<3> _range; }; template <class T, memory_region Memory> class accessor<T, Memory, 2> { public: using memory_t = detail::memory_traits<Memory, T>; using element_t = typename memory_t::element_t; using pointer_t = typename memory_t::pointer_t; using accessor_t = typename memory_t::template accessor_t<2>; accessor(pointer_t data, const sycl::range<2> &in_range) : _data(data), _range(in_range) {} template <memory_region M = Memory> accessor(typename std::enable_if<M != local, const accessor_t>::type &acc) : accessor(acc, acc.get_range()) {} accessor(const accessor_t &acc, const sycl::range<2> &in_range) : accessor(acc.get_pointer(), in_range) {} pointer_t operator[](size_t index) const { return _data + _range.get(1) * index; } pointer_t get_ptr() const { return _data; } private: pointer_t _data; sycl::range<2> _range; }; namespace detail { /// Device variable with address space of shared, global or constant. template <class T, memory_region Memory, size_t Dimension> class device_memory { public: using accessor_t = typename detail::memory_traits<Memory, T>::template accessor_t<Dimension>; using value_t = typename detail::memory_traits<Memory, T>::value_t; using dpct_accessor_t = dpct::accessor<T, Memory, Dimension>; device_memory() : device_memory(sycl::range<Dimension>(1)) {} /// Constructor of 1-D array with initializer list device_memory( const sycl::range<Dimension> &in_range, std::initializer_list<value_t> &&init_list) : device_memory(in_range) { assert(init_list.size() <= in_range.size()); _host_ptr = (value_t *)std::malloc(_size); std::memset(_host_ptr, 0, _size); std::memcpy(_host_ptr, init_list.begin(), init_list.size() * sizeof(T)); } /// Constructor of 2-D array with initializer list template <size_t D = Dimension> device_memory( const typename std::enable_if<D == 2, sycl::range<2>>::type &in_range, std::initializer_list<std::initializer_list<value_t>> &&init_list) : device_memory(in_range) { assert(init_list.size() <= in_range[0]); _host_ptr = (value_t *)std::malloc(_size); std::memset(_host_ptr, 0, _size); auto tmp_data = _host_ptr; for (auto sub_list : init_list) { assert(sub_list.size() <= in_range[1]); std::memcpy(tmp_data, sub_list.begin(), sub_list.size() * sizeof(T)); tmp_data += in_range[1]; } } /// Constructor with range device_memory(const sycl::range<Dimension> &range_in) : _size(range_in.size() * sizeof(T)), _range(range_in), _reference(false), _host_ptr(nullptr), _device_ptr(nullptr) { static_assert( (Memory == global) || (Memory == constant) || (Memory == shared), "device memory region should be global, constant or shared"); // Make sure that singleton class mem_mgr and dev_mgr will destruct later // than this. detail::mem_mgr::instance(); dev_mgr::instance(); } /// Constructor with range template <class... Args> device_memory(Args... Arguments) : device_memory(sycl::range<Dimension>(Arguments...)) {} ~device_memory() { if (_device_ptr && !_reference) dpct::dpct_free(_device_ptr); if (_host_ptr) std::free(_host_ptr); } /// Allocate memory with default queue, and init memory if has initial value. void init() { init(dpct::get_default_queue()); } /// Allocate memory with specified queue, and init memory if has initial value. void init(sycl::queue &q) { if (_device_ptr) return; if (!_size) return; allocate_device(q); if (_host_ptr) detail::dpct_memcpy(q, _device_ptr, _host_ptr, _size, host_to_device); } /// The variable is assigned to a device pointer. void assign(value_t *src, size_t size) { this->~device_memory(); new (this) device_memory(src, size); } /// Get memory pointer of the memory object, which is virtual pointer when /// usm is not used, and device pointer when usm is used. value_t *get_ptr() { return get_ptr(get_default_queue()); } /// Get memory pointer of the memory object, which is virtual pointer when /// usm is not used, and device pointer when usm is used. value_t *get_ptr(sycl::queue &q) { init(q); return _device_ptr; } /// Get the device memory object size in bytes. size_t get_size() { return _size; } template <size_t D = Dimension> typename std::enable_if<D == 1, T>::type &operator[](size_t index) { init(); #ifdef DPCT_USM_LEVEL_NONE return dpct::get_buffer<typename std::enable_if<D == 1, T>::type>( _device_ptr) .template get_access<sycl::access_mode::read_write>()[index]; #else return _device_ptr[index]; #endif // DPCT_USM_LEVEL_NONE } #ifdef DPCT_USM_LEVEL_NONE /// Get sycl::accessor for the device memory object when usm is not used. accessor_t get_access(sycl::handler &cgh) { return get_buffer(_device_ptr) .template reinterpret<T, Dimension>(_range) .template get_access<detail::memory_traits<Memory, T>::mode, detail::memory_traits<Memory, T>::target>(cgh); } #else /// Get dpct::accessor with dimension info for the device memory object /// when usm is used and dimension is greater than 1. template <size_t D = Dimension> typename std::enable_if<D != 1, dpct_accessor_t>::type get_access(sycl::handler &cgh) { return dpct_accessor_t((T *)_device_ptr, _range); } #endif // DPCT_USM_LEVEL_NONE private: device_memory(value_t *memory_ptr, size_t size) : _size(size), _range(size / sizeof(T)), _reference(true), _device_ptr(memory_ptr) {} void allocate_device(sycl::queue &q) { #ifndef DPCT_USM_LEVEL_NONE if (Memory == shared) { _device_ptr = (value_t *)sycl::malloc_shared( _size, q.get_device(), q.get_context()); return; } #endif _device_ptr = (value_t *)detail::dpct_malloc(_size, q); } size_t _size; sycl::range<Dimension> _range; bool _reference; value_t *_host_ptr; value_t *_device_ptr; }; template <class T, memory_region Memory> class device_memory<T, Memory, 0> : public device_memory<T, Memory, 1> { public: using base = device_memory<T, Memory, 1>; using value_t = typename base::value_t; using accessor_t = typename detail::memory_traits<Memory, T>::template accessor_t<0>; /// Constructor with initial value. device_memory(const value_t &val) : base(sycl::range<1>(1), {val}) {} /// Default constructor device_memory() : base(1) {} #ifdef DPCT_USM_LEVEL_NONE /// Get sycl::accessor for the device memory object when usm is not used. accessor_t get_access(sycl::handler &cgh) { auto buf = get_buffer(base::get_ptr()) .template reinterpret<T, 1>(sycl::range<1>(1)); return accessor_t(buf, cgh); } #endif // DPCT_USM_LEVEL_NONE }; } template <class T, size_t Dimension> using global_memory = detail::device_memory<T, global, Dimension>; template <class T, size_t Dimension> using constant_memory = detail::device_memory<T, constant, Dimension>; template <class T, size_t Dimension> using shared_memory = detail::device_memory<T, shared, Dimension>; // dpct::deprecated:: is for functionality that was introduced for compatibility // purpose, but relies on deprecated C++ features, which are either removed or // will be removed in the future standards. // Direct use of deprecated functionality in this namespace should be avoided. namespace deprecated { template <typename T> using usm_host_allocator = detail::deprecated::usm_allocator<T, sycl::usm::alloc::host>; template <typename T> using usm_device_allocator = detail::deprecated::usm_allocator<T, sycl::usm::alloc::shared>; } // namespace deprecated class pointer_attributes { public: void init(const void *ptr, sycl::queue &q = dpct::get_default_queue()) { #ifdef DPCT_USM_LEVEL_NONE throw std::runtime_error( "dpct::pointer_attributes: only works for USM pointer."); #else memory_type = sycl::get_pointer_type(ptr, q.get_context()); device_pointer = (memory_type != sycl::usm::alloc::unknown) ? ptr : nullptr; host_pointer = (memory_type != sycl::usm::alloc::unknown) && (memory_type != sycl::usm::alloc::device) ? ptr : nullptr; sycl::device device_obj = sycl::get_pointer_device(ptr, q.get_context()); device_id = dpct::dev_mgr::instance().get_device_id(device_obj); #endif } sycl::usm::alloc get_memory_type() { return memory_type; } const void *get_device_pointer() { return device_pointer; } const void *get_host_pointer() { return host_pointer; } bool is_memory_shared() { return memory_type == sycl::usm::alloc::shared; } unsigned int get_device_id() { return device_id; } private: sycl::usm::alloc memory_type = sycl::usm::alloc::unknown; const void *device_pointer = nullptr; const void *host_pointer = nullptr; unsigned int device_id = 0; }; } // namespace dpct #endif // __DPCT_MEMORY_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/dpl_utils.hpp
//==---- dpl_utils.hpp ----------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_DPL_UTILS_HPP__ #define __DPCT_DPL_UTILS_HPP__ #define ONEDPL_USE_DPCPP_BACKEND 1 #define __USE_DPCT 1 #include <oneapi/dpl/execution> #include <oneapi/dpl/algorithm> #include <oneapi/dpl/numeric> #include "dpl_extras/memory.h" #include "dpl_extras/algorithm.h" #include "dpl_extras/numeric.h" #include "dpl_extras/iterators.h" #include "dpl_extras/vector.h" #include "dpl_extras/dpcpp_extensions.h" #endif // __DPCT_DPL_UTILS_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/math.hpp
//==---- math.hpp ---------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_MATH_HPP__ #define __DPCT_MATH_HPP__ #include <sycl/sycl.hpp> namespace dpct { namespace detail { template <typename VecT, class BinaryOperation, class = void> class vectorized_binary { public: inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op) { VecT v4; for (size_t i = 0; i < v4.size(); ++i) { v4[i] = binary_op(a[i], b[i]); } return v4; } }; template <typename VecT, class BinaryOperation> class vectorized_binary< VecT, BinaryOperation, std::void_t<std::invoke_result_t<BinaryOperation, VecT, VecT>>> { public: inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op) { return binary_op(a, b).template as<VecT>(); } }; template <typename T> bool isnan(const T a) { return sycl::isnan(a); } // TODO: Need add more specialization such as bfloat16 version. } // namespace detail /// Compute fast_length for variable-length array /// \param [in] a The array /// \param [in] len Length of the array /// \returns The computed fast_length inline float fast_length(const float *a, int len) { switch (len) { case 1: return a[0]; case 2: return sycl::fast_length(sycl::float2(a[0], a[1])); case 3: return sycl::fast_length(sycl::float3(a[0], a[1], a[2])); case 4: return sycl::fast_length(sycl::float4(a[0], a[1], a[2], a[3])); case 0: return 0; default: float f = 0; for (int i = 0; i < len; ++i) f += a[i] * a[i]; return sycl::sqrt(f); } } /// Calculate the square root of the input array. /// \param [in] a The array pointer /// \param [in] len Length of the array /// \returns The square root template <typename T> inline T length(const T *a, const int len) { switch (len) { case 1: return a[0]; case 2: return sycl::length(sycl::vec<T, 2>(a[0], a[1])); case 3: return sycl::length(sycl::vec<T, 3>(a[0], a[1], a[2])); case 4: return sycl::length(sycl::vec<T, 4>(a[0], a[1], a[2], a[3])); default: T ret = 0; for (int i = 0; i < len; ++i) ret += a[i] * a[i]; return sycl::sqrt(ret); } } /// Performs comparison. /// \param [in] a The first value /// \param [in] b The second value /// \param [in] binary_op functor that implements the binary operation /// \returns the comparison result template <typename T, class BinaryOperation> inline std::enable_if_t< std::is_same_v<std::invoke_result_t<BinaryOperation, T, T>, bool>, bool> compare(const T a, const T b, const BinaryOperation binary_op) { return binary_op(a, b); } template <typename T> inline std::enable_if_t< std::is_same_v<std::invoke_result_t<std::not_equal_to<>, T, T>, bool>, bool> compare(const T a, const T b, const std::not_equal_to<> binary_op) { return !detail::isnan(a) && !detail::isnan(b) && binary_op(a, b); } /// Performs unordered comparison. /// \param [in] a The first value /// \param [in] b The second value /// \param [in] binary_op functor that implements the binary operation /// \returns the comparison result template <typename T, class BinaryOperation> inline std::enable_if_t< std::is_same_v<std::invoke_result_t<BinaryOperation, T, T>, bool>, bool> unordered_compare(const T a, const T b, const BinaryOperation binary_op) { return detail::isnan(a) || detail::isnan(b) || binary_op(a, b); } /// Performs 2 element comparison and return true if both results are true. /// \param [in] a The first value /// \param [in] b The second value /// \param [in] binary_op functor that implements the binary operation /// \returns the comparison result template <typename T, class BinaryOperation> inline std::enable_if_t<T::size() == 2, bool> compare_both(const T a, const T b, const BinaryOperation binary_op) { return compare(a[0], b[0], binary_op) && compare(a[1], b[1], binary_op); } /// Performs 2 element unordered comparison and return true if both results are /// true. /// \param [in] a The first value /// \param [in] b The second value /// \param [in] binary_op functor that implements the binary operation /// \returns the comparison result template <typename T, class BinaryOperation> inline std::enable_if_t<T::size() == 2, bool> unordered_compare_both(const T a, const T b, const BinaryOperation binary_op) { return unordered_compare(a[0], b[0], binary_op) && unordered_compare(a[1], b[1], binary_op); } /// Performs 2 element comparison. /// \param [in] a The first value /// \param [in] b The second value /// \param [in] binary_op functor that implements the binary operation /// \returns the comparison result template <typename T, class BinaryOperation> inline std::enable_if_t<T::size() == 2, T> compare(const T a, const T b, const BinaryOperation binary_op) { return {compare(a[0], b[0], binary_op), compare(a[1], b[1], binary_op)}; } /// Performs 2 elements comparison, compare result of each element is 0 (false) /// or 0xffff (true), returns an unsigned int by composing compare result of two /// elements. /// \param [in] a The first value /// \param [in] b The second value /// \param [in] binary_op functor that implements the binary operation /// \returns the comparison result template <typename T, class BinaryOperation> inline unsigned compare_mask(const sycl::vec<T, 2> a, const sycl::vec<T, 2> b, const BinaryOperation binary_op) { return sycl::vec<short, 2>(-compare(a[0], b[0], binary_op), -compare(a[1], b[1], binary_op)) .as<sycl::vec<unsigned, 1>>(); } /// Performs 2 element unordered comparison. /// \param [in] a The first value /// \param [in] b The second value /// \param [in] binary_op functor that implements the binary operation /// \returns the comparison result template <typename T, class BinaryOperation> inline std::enable_if_t<T::size() == 2, T> unordered_compare(const T a, const T b, const BinaryOperation binary_op) { return {unordered_compare(a[0], b[0], binary_op), unordered_compare(a[1], b[1], binary_op)}; } /// Performs 2 elements unordered comparison, compare result of each element is /// 0 (false) or 0xffff (true), returns an unsigned int by composing compare /// result of two elements. /// \param [in] a The first value /// \param [in] b The second value /// \param [in] binary_op functor that implements the binary operation /// \returns the comparison result template <typename T, class BinaryOperation> inline unsigned unordered_compare_mask(const sycl::vec<T, 2> a, const sycl::vec<T, 2> b, const BinaryOperation binary_op) { return sycl::vec<short, 2>(-unordered_compare(a[0], b[0], binary_op), -unordered_compare(a[1], b[1], binary_op)) .as<sycl::vec<unsigned, 1>>(); } /// Determine whether 2 element value is NaN. /// \param [in] a The input value /// \returns the comparison result template <typename T> inline std::enable_if_t<T::size() == 2, T> isnan(const T a) { return {detail::isnan(a[0]), detail::isnan(a[1])}; } // min function overloads. // For floating-point types, `float` or `double` arguments are acceptable. // For integer types, `std::uint32_t`, `std::int32_t`, `std::uint64_t` or // `std::int64_t` type arguments are acceptable. inline double min(const double a, const float b) { return sycl::fmin(a, static_cast<double>(b)); } inline double min(const float a, const double b) { return sycl::fmin(static_cast<double>(a), b); } inline float min(const float a, const float b) { return sycl::fmin(a, b); } inline double min(const double a, const double b) { return sycl::fmin(a, b); } inline std::uint32_t min(const std::uint32_t a, const std::int32_t b) { return sycl::min(a, static_cast<std::uint32_t>(b)); } inline std::uint32_t min(const std::int32_t a, const std::uint32_t b) { return sycl::min(static_cast<std::uint32_t>(a), b); } inline std::int32_t min(const std::int32_t a, const std::int32_t b) { return sycl::min(a, b); } inline std::uint32_t min(const std::uint32_t a, const std::uint32_t b) { return sycl::min(a, b); } inline std::uint64_t min(const std::uint64_t a, const std::int64_t b) { return sycl::min(a, static_cast<std::uint64_t>(b)); } inline std::uint64_t min(const std::int64_t a, const std::uint64_t b) { return sycl::min(static_cast<std::uint64_t>(a), b); } inline std::int64_t min(const std::int64_t a, const std::int64_t b) { return sycl::min(a, b); } inline std::uint64_t min(const std::uint64_t a, const std::uint64_t b) { return sycl::min(a, b); } inline std::uint64_t min(const std::uint64_t a, const std::int32_t b) { return sycl::min(a, static_cast<std::uint64_t>(b)); } inline std::uint64_t min(const std::int32_t a, const std::uint64_t b) { return sycl::min(static_cast<std::uint64_t>(a), b); } inline std::uint64_t min(const std::uint64_t a, const std::uint32_t b) { return sycl::min(a, static_cast<std::uint64_t>(b)); } inline std::uint64_t min(const std::uint32_t a, const std::uint64_t b) { return sycl::min(static_cast<std::uint64_t>(a), b); } // max function overloads. // For floating-point types, `float` or `double` arguments are acceptable. // For integer types, `std::uint32_t`, `std::int32_t`, `std::uint64_t` or // `std::int64_t` type arguments are acceptable. inline double max(const double a, const float b) { return sycl::fmax(a, static_cast<double>(b)); } inline double max(const float a, const double b) { return sycl::fmax(static_cast<double>(a), b); } inline float max(const float a, const float b) { return sycl::fmax(a, b); } inline double max(const double a, const double b) { return sycl::fmax(a, b); } inline std::uint32_t max(const std::uint32_t a, const std::int32_t b) { return sycl::max(a, static_cast<std::uint32_t>(b)); } inline std::uint32_t max(const std::int32_t a, const std::uint32_t b) { return sycl::max(static_cast<std::uint32_t>(a), b); } inline std::int32_t max(const std::int32_t a, const std::int32_t b) { return sycl::max(a, b); } inline std::uint32_t max(const std::uint32_t a, const std::uint32_t b) { return sycl::max(a, b); } inline std::uint64_t max(const std::uint64_t a, const std::int64_t b) { return sycl::max(a, static_cast<std::uint64_t>(b)); } inline std::uint64_t max(const std::int64_t a, const std::uint64_t b) { return sycl::max(static_cast<std::uint64_t>(a), b); } inline std::int64_t max(const std::int64_t a, const std::int64_t b) { return sycl::max(a, b); } inline std::uint64_t max(const std::uint64_t a, const std::uint64_t b) { return sycl::max(a, b); } inline std::uint64_t max(const std::uint64_t a, const std::int32_t b) { return sycl::max(a, static_cast<std::uint64_t>(b)); } inline std::uint64_t max(const std::int32_t a, const std::uint64_t b) { return sycl::max(static_cast<std::uint64_t>(a), b); } inline std::uint64_t max(const std::uint64_t a, const std::uint32_t b) { return sycl::max(a, static_cast<std::uint64_t>(b)); } inline std::uint64_t max(const std::uint32_t a, const std::uint64_t b) { return sycl::max(static_cast<std::uint64_t>(a), b); } /// Performs relu saturation. /// \param [in] a The input value /// \returns the relu saturation result template <typename T> inline T relu(const T a) { if (!detail::isnan(a) && a < 0.f) return 0.f; return a; } template <class T> inline sycl::vec<T, 2> relu(const sycl::vec<T, 2> a) { return {relu(a[0]), relu(a[1])}; } /// Performs complex number multiply addition. /// \param [in] a The first value /// \param [in] b The second value /// \param [in] c The third value /// \returns the operation result template <typename T> inline sycl::vec<T, 2> complex_mul_add(const sycl::vec<T, 2> a, const sycl::vec<T, 2> b, const sycl::vec<T, 2> c) { return sycl::vec<T, 2>{a[0] * b[0] - a[1] * b[1] + c[0], a[0] * b[1] + a[1] * b[0] + c[1]}; } /// Performs 2 elements comparison and returns the bigger one. If either of /// inputs is NaN, then return NaN. /// \param [in] a The first value /// \param [in] b The second value /// \returns the bigger value template <typename T> inline T fmax_nan(const T a, const T b) { if (detail::isnan(a) || detail::isnan(b)) return NAN; return sycl::fmax(a, b); } template <typename T> inline sycl::vec<T, 2> fmax_nan(const sycl::vec<T, 2> a, const sycl::vec<T, 2> b) { return {fmax_nan(a[0], b[0]), fmax_nan(a[1], b[1])}; } /// Performs 2 elements comparison and returns the smaller one. If either of /// inputs is NaN, then return NaN. /// \param [in] a The first value /// \param [in] b The second value /// \returns the smaller value template <typename T> inline T fmin_nan(const T a, const T b) { if (detail::isnan(a) || detail::isnan(b)) return NAN; return sycl::fmin(a, b); } template <typename T> inline sycl::vec<T, 2> fmin_nan(const sycl::vec<T, 2> a, const sycl::vec<T, 2> b) { return {fmin_nan(a[0], b[0]), fmin_nan(a[1], b[1])}; } /// A sycl::abs wrapper functors. struct abs { template <typename T> auto operator()(const T x) const { return sycl::abs(x); } }; /// A sycl::abs_diff wrapper functors. struct abs_diff { template <typename T> auto operator()(const T x, const T y) const { return sycl::abs_diff(x, y); } }; /// A sycl::add_sat wrapper functors. struct add_sat { template <typename T> auto operator()(const T x, const T y) const { return sycl::add_sat(x, y); } }; /// A sycl::rhadd wrapper functors. struct rhadd { template <typename T> auto operator()(const T x, const T y) const { return sycl::rhadd(x, y); } }; /// A sycl::hadd wrapper functors. struct hadd { template <typename T> auto operator()(const T x, const T y) const { return sycl::hadd(x, y); } }; /// A sycl::max wrapper functors. struct maximum { template <typename T> auto operator()(const T x, const T y) const { return sycl::max(x, y); } }; /// A sycl::min wrapper functors. struct minimum { template <typename T> auto operator()(const T x, const T y) const { return sycl::min(x, y); } }; /// A sycl::sub_sat wrapper functors. struct sub_sat { template <typename T> auto operator()(const T x, const T y) const { return sycl::sub_sat(x, y); } }; /// Compute vectorized binary operation value for two values, with each value /// treated as a vector type \p VecT. /// \tparam [in] VecT The type of the vector /// \tparam [in] BinaryOperation The binary operation class /// \param [in] a The first value /// \param [in] b The second value /// \returns The vectorized binary operation value of the two values template <typename VecT, class BinaryOperation> inline unsigned vectorized_binary(unsigned a, unsigned b, const BinaryOperation binary_op) { sycl::vec<unsigned, 1> v0{a}, v1{b}; auto v2 = v0.as<VecT>(); auto v3 = v1.as<VecT>(); auto v4 = detail::vectorized_binary<VecT, BinaryOperation>()(v2, v3, binary_op); v0 = v4.template as<sycl::vec<unsigned, 1>>(); return v0; } /// Compute vectorized isgreater for two values, with each value treated as a /// vector type \p S. /// \tparam [in] S The type of the vector /// \tparam [in] T The type of the original values /// \param [in] a The first value /// \param [in] b The second value /// \returns The vectorized greater than of the two values template <typename S, typename T> inline T vectorized_isgreater(T a, T b) { sycl::vec<T, 1> v0{a}, v1{b}; auto v2 = v0.template as<S>(); auto v3 = v1.template as<S>(); auto v4 = v2 > v3; v0 = v4.template as<sycl::vec<T, 1>>(); return v0; } /// Compute vectorized max for two values, with each value treated as a vector /// type \p S. /// \tparam [in] S The type of the vector /// \tparam [in] T The type of the original values /// \param [in] a The first value /// \param [in] b The second value /// \returns The vectorized max of the two values template <typename S, typename T> inline T vectorized_max(T a, T b) { sycl::vec<T, 1> v0{a}, v1{b}; auto v2 = v0.template as<S>(); auto v3 = v1.template as<S>(); auto v4 = sycl::max(v2, v3); v0 = v4.template as<sycl::vec<T, 1>>(); return v0; } /// Compute vectorized min for two values, with each value treated as a vector /// type \p S. /// \tparam [in] S The type of the vector /// \tparam [in] T The type of the original values /// \param [in] a The first value /// \param [in] b The second value /// \returns The vectorized min of the two values template <typename S, typename T> inline T vectorized_min(T a, T b) { sycl::vec<T, 1> v0{a}, v1{b}; auto v2 = v0.template as<S>(); auto v3 = v1.template as<S>(); auto v4 = sycl::min(v2, v3); v0 = v4.template as<sycl::vec<T, 1>>(); return v0; } /// Compute vectorized unary operation for a value, with the value treated as a /// vector type \p VecT. /// \tparam [in] VecT The type of the vector /// \tparam [in] UnaryOperation The unary operation class /// \param [in] a The input value /// \returns The vectorized unary operation value of the input value template <typename VecT, class UnaryOperation> inline unsigned vectorized_unary(unsigned a, const UnaryOperation unary_op) { sycl::vec<unsigned, 1> v0{a}; auto v1 = v0.as<VecT>(); auto v2 = unary_op(v1); v0 = v2.template as<sycl::vec<unsigned, 1>>(); return v0; } /// Compute vectorized absolute difference for two values without modulo /// overflow, with each value treated as a vector type \p VecT. /// \tparam [in] VecT The type of the vector /// \param [in] a The first value /// \param [in] b The second value /// \returns The vectorized absolute difference of the two values template <typename VecT> inline unsigned vectorized_sum_abs_diff(unsigned a, unsigned b) { sycl::vec<unsigned, 1> v0{a}, v1{b}; auto v2 = v0.as<VecT>(); auto v3 = v1.as<VecT>(); auto v4 = sycl::abs_diff(v2, v3); unsigned sum = 0; for (size_t i = 0; i < v4.size(); ++i) { sum += v4[i]; } return sum; } } // namespace dpct #endif // __DPCT_MATH_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/blas_utils.hpp
//==---- blas_utils.hpp----------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_BLAS_UTILS_HPP__ #define __DPCT_BLAS_UTILS_HPP__ #include "memory.hpp" #include "util.hpp" #include "lib_common_utils.hpp" #include <sycl/sycl.hpp> #include <oneapi/mkl.hpp> #include <utility> #include <vector> #include <thread> namespace dpct { /// Get the value of \p s. /// Copy the data to host synchronously, then return the data. /// \param [in] p The pointer points the data. /// \param [in] q The queue where the memory copy should be executed. template <typename T> inline auto get_value(const T *s, sycl::queue &q) { return detail::get_value(s, q); } namespace detail { inline void mem_free(sycl::queue *exec_queue, std::vector<void *> pointers_array, sycl::event e) { e.wait(); for (auto p : pointers_array) sycl::free(p, *exec_queue); } inline int stride_for(int num_elems, int mem_align_in_elems) { return ((num_elems - 1) / mem_align_in_elems + 1) * mem_align_in_elems; } #ifndef DPCT_USM_LEVEL_NONE template<typename T> class working_memory { T *_input_ptr; T *_temp_ptr; bool _is_sycl_malloced = false; bool _is_scalar_value = false; sycl::queue _q; sycl::event _e; public: working_memory(size_t size, sycl::queue q) : _q(q) { _is_scalar_value = false; _temp_ptr = (T *)sycl::malloc_device(size, q); } working_memory(T *result_ptr, sycl::queue q) : _input_ptr(result_ptr), _q(q) { _is_scalar_value = true; _is_sycl_malloced = sycl::get_pointer_type(_input_ptr, _q.get_context()) != sycl::usm::alloc::unknown; if (!_is_sycl_malloced) _temp_ptr = sycl::malloc_shared<T>(1, _q); } auto get_ptr() { if (_is_scalar_value && _is_sycl_malloced) return _input_ptr; return _temp_ptr; } void set_event(sycl::event e) { _e = e; } ~working_memory() { if (_is_scalar_value) { if (!_is_sycl_malloced) { _q.memcpy(_input_ptr, _temp_ptr, sizeof(T)).wait(); sycl::free(_temp_ptr, _q); } } else { std::vector<void *> ptrs{_temp_ptr}; dpct::async_dpct_free(ptrs, {_e}); } } }; #endif template <typename Tx, typename Tr> inline void nrm2_impl(sycl::queue &q, int n, const void *x, int incx, void *result) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else #ifdef DPCT_USM_LEVEL_NONE auto x_buffer = dpct::get_buffer<Tx>(x); auto r_buffer = sycl::buffer<Tr, 1>(reinterpret_cast<Tr *>(result), sycl::range<1>(1)); if (dpct::is_device_ptr(result)) r_buffer = dpct::get_buffer<Tr>(result); oneapi::mkl::blas::column_major::nrm2(q, n, x_buffer, incx, r_buffer); #else working_memory<Tr> res_mem(reinterpret_cast<Tr *>(result), q); oneapi::mkl::blas::column_major::nrm2(q, n, reinterpret_cast<const Tx *>(x), incx, res_mem.get_ptr()); #endif #endif } template <bool is_conjugate, class Txy, class Tr> inline void dotuc_impl(sycl::queue &q, int n, const Txy *x, int incx, const Txy *y, int incy, Tr *result) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else #ifdef DPCT_USM_LEVEL_NONE auto x_buffer = dpct::get_buffer<Txy>(x); auto y_buffer = dpct::get_buffer<Txy>(y); auto r_buffer = sycl::buffer<Tr, 1>((Tr *)result, sycl::range<1>(1)); if (dpct::is_device_ptr(result)) r_buffer = dpct::get_buffer<Tr>(result); if constexpr (std::is_same_v<Txy, std::complex<float>> || std::is_same_v<Txy, std::complex<double>>) { if constexpr (is_conjugate) oneapi::mkl::blas::column_major::dotc(q, n, x_buffer, incx, y_buffer, incy, r_buffer); else oneapi::mkl::blas::column_major::dotu(q, n, x_buffer, incx, y_buffer, incy, r_buffer); } else oneapi::mkl::blas::column_major::dot(q, n, x_buffer, incx, y_buffer, incy, r_buffer); #else working_memory<Tr> res_mem(result, q); if constexpr (std::is_same_v<Txy, std::complex<float>> || std::is_same_v<Txy, std::complex<double>>) { if constexpr (is_conjugate) oneapi::mkl::blas::column_major::dotc(q, n, x, incx, y, incy, res_mem.get_ptr()); else oneapi::mkl::blas::column_major::dotu(q, n, x, incx, y, incy, res_mem.get_ptr()); } else oneapi::mkl::blas::column_major::dot(q, n, x, incx, y, incy, res_mem.get_ptr()); #endif #endif } template <bool is_conjugate> inline void dotuc(sycl::queue &q, int n, const void *x, library_data_t x_type, int incx, const void *y, library_data_t y_type, int incy, void *result, library_data_t result_type) { std::uint64_t key = detail::get_type_combination_id(x_type, y_type, result_type); switch (key) { case detail::get_type_combination_id(library_data_t::real_float, library_data_t::real_float, library_data_t::real_float): { detail::dotuc_impl<is_conjugate>( q, n, reinterpret_cast<const float *>(x), incx, reinterpret_cast<const float *>(y), incy, reinterpret_cast<float *>(result)); break; } case detail::get_type_combination_id(library_data_t::real_double, library_data_t::real_double, library_data_t::real_double): { detail::dotuc_impl<is_conjugate>( q, n, reinterpret_cast<const double *>(x), incx, reinterpret_cast<const double *>(y), incy, reinterpret_cast<double *>(result)); break; } case detail::get_type_combination_id(library_data_t::complex_float, library_data_t::complex_float, library_data_t::complex_float): { detail::dotuc_impl<is_conjugate>( q, n, reinterpret_cast<const std::complex<float> *>(x), incx, reinterpret_cast<const std::complex<float> *>(y), incy, reinterpret_cast<std::complex<float> *>(result)); break; } case detail::get_type_combination_id(library_data_t::complex_double, library_data_t::complex_double, library_data_t::complex_double): { detail::dotuc_impl<is_conjugate>( q, n, reinterpret_cast<const std::complex<double> *>(x), incx, reinterpret_cast<const std::complex<double> *>(y), incy, reinterpret_cast<std::complex<double> *>(result)); break; } case detail::get_type_combination_id(library_data_t::real_half, library_data_t::real_half, library_data_t::real_half): { detail::dotuc_impl<is_conjugate>( q, n, reinterpret_cast<const sycl::half *>(x), incx, reinterpret_cast<const sycl::half *>(y), incy, reinterpret_cast<sycl::half *>(result)); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } } template <class Tx, class Te> inline void scal_impl(sycl::queue &q, int n, const void *alpha, void *x, int incx) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else Te alpha_val = dpct::get_value(reinterpret_cast<const Te *>(alpha), q); auto data_x = get_memory(reinterpret_cast<Tx *>(x)); oneapi::mkl::blas::column_major::scal(q, n, alpha_val, data_x, incx); #endif } template <class Txy, class Te> inline void axpy_impl(sycl::queue &q, int n, const void *alpha, const void *x, int incx, void *y, int incy) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else Te alpha_val = dpct::get_value(reinterpret_cast<const Te *>(alpha), q); auto data_x = get_memory(reinterpret_cast<const Txy *>(x)); auto data_y = get_memory(reinterpret_cast<Txy *>(y)); oneapi::mkl::blas::column_major::axpy(q, n, alpha_val, data_x, incx, data_y, incy); #endif } template <class Txy, class Tc, class Ts> inline void rot_impl(sycl::queue &q, int n, void *x, int incx, void *y, int incy, const void *c, const void *s) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else Tc c_value = dpct::get_value(reinterpret_cast<const Tc *>(c), q); Ts s_value = dpct::get_value(reinterpret_cast<const Ts *>(s), q); auto data_x = get_memory(reinterpret_cast<Txy *>(x)); auto data_y = get_memory(reinterpret_cast<Txy *>(y)); oneapi::mkl::blas::column_major::rot(q, n, data_x, incx, data_y, incy, c_value, s_value); #endif } template <class Ta, class Tb, class Tc, class Ts> inline void gemm_impl(sycl::queue &q, oneapi::mkl::transpose a_trans, oneapi::mkl::transpose b_trans, int m, int n, int k, const void *alpha, const void *a, int lda, const void *b, int ldb, const void *beta, void *c, int ldc) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q); Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q); auto data_a = get_memory(reinterpret_cast<const Ta *>(a)); auto data_b = get_memory(reinterpret_cast<const Tb *>(b)); auto data_c = get_memory(reinterpret_cast<Tc *>(c)); oneapi::mkl::blas::column_major::gemm( q, a_trans, b_trans, m, n, k, alpha_value, data_a, lda, data_b, ldb, beta_value, data_c, ldc); #endif } template <class Ta, class Tb, class Tc, class Ts> inline void gemm_batch_impl(sycl::queue &q, oneapi::mkl::transpose a_trans, oneapi::mkl::transpose b_trans, int m, int n, int k, const void *alpha, const void **a, int lda, const void **b, int ldb, const void *beta, void **c, int ldc, int batch_size) { struct matrix_info_t { oneapi::mkl::transpose transpose_info[2]; Ts value_info[2]; std::int64_t size_info[3]; std::int64_t ld_info[3]; std::int64_t groupsize_info; }; Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q); Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q); matrix_info_t *matrix_info = (matrix_info_t *)std::malloc(sizeof(matrix_info_t)); matrix_info->transpose_info[0] = a_trans; matrix_info->transpose_info[1] = b_trans; matrix_info->value_info[0] = alpha_value; matrix_info->value_info[1] = beta_value; matrix_info->size_info[0] = m; matrix_info->size_info[1] = n; matrix_info->size_info[2] = k; matrix_info->ld_info[0] = lda; matrix_info->ld_info[1] = ldb; matrix_info->ld_info[2] = ldc; matrix_info->groupsize_info = batch_size; sycl::event e = oneapi::mkl::blas::column_major::gemm_batch( q, matrix_info->transpose_info, matrix_info->transpose_info + 1, matrix_info->size_info, matrix_info->size_info + 1, matrix_info->size_info + 2, matrix_info->value_info, reinterpret_cast<const Ta **>(a), matrix_info->ld_info, reinterpret_cast<const Tb **>(b), matrix_info->ld_info + 1, matrix_info->value_info + 1, reinterpret_cast<Tc **>(c), matrix_info->ld_info + 2, 1, &(matrix_info->groupsize_info)); q.submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { std::free(matrix_info); }); }); } template <class Ta, class Tb, class Tc, class Ts> inline void gemm_batch_impl(sycl::queue &q, oneapi::mkl::transpose a_trans, oneapi::mkl::transpose b_trans, int m, int n, int k, const void *alpha, const void *a, int lda, long long int stride_a, const void *b, int ldb, long long int stride_b, const void *beta, void *c, int ldc, long long int stride_c, int batch_size) { Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q); Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q); auto data_a = get_memory(reinterpret_cast<const Ta *>(a)); auto data_b = get_memory(reinterpret_cast<const Tb *>(b)); auto data_c = get_memory(reinterpret_cast<Tc *>(c)); oneapi::mkl::blas::column_major::gemm_batch( q, a_trans, b_trans, m, n, k, alpha_value, data_a, lda, stride_a, data_b, ldb, stride_b, beta_value, data_c, ldc, stride_c, batch_size); } template <bool is_hermitian, class T, class Tbeta> inline void rk_impl(sycl::queue &q, oneapi::mkl::uplo uplo, oneapi::mkl::transpose trans, int n, int k, const T *alpha, const T *a, int lda, const T *b, int ldb, const Tbeta *beta, T *c, int ldc) { // For symmetric matrix, this function performs: C = alpha*OP(A)*(OP(B))^T + beta*C // For Hermitian matrix, this function performs: C = alpha*OP(A)*(OP(B))^H + beta*C // The gemmt() function performs: C = alpha*OPA(A)*OPB(B) + beta*C // So the OPB need be updated before we call gemmt(). using Ty = typename dpct::DataType<T>::T2; using Ts = typename dpct::DataType<Tbeta>::T2; Ty alpha_value = dpct::get_value(reinterpret_cast<const Ty *>(alpha), q); Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q); oneapi::mkl::transpose trans_A = trans, trans_B = trans; int origin_b_rows = trans == oneapi::mkl::transpose::nontrans ? n : k; int origin_b_cols = trans == oneapi::mkl::transpose::nontrans ? k : n; if ((is_hermitian && trans == oneapi::mkl::transpose::trans) || (!is_hermitian && !std::is_floating_point_v<Ty> && trans == oneapi::mkl::transpose::conjtrans)) { // In this case, OPB need be a conjugate operation, // but only notrans, conjtrans and trans are available. // So we need do a conjtrans operation first, then do a trans operation. trans_B = oneapi::mkl::transpose::trans; auto data_a = get_memory(reinterpret_cast<const Ty *>(a)); auto data_c = get_memory(reinterpret_cast<Ty *>(c)); #ifdef DPCT_USM_LEVEL_NONE auto new_B_buffer = sycl::buffer<Ty, 1>(sycl::range<1>(origin_b_rows * origin_b_cols)); auto from_buffer = dpct::get_buffer<Ty>(b); oneapi::mkl::blas::column_major::omatcopy_batch( q, oneapi::mkl::transpose::conjtrans, origin_b_rows, origin_b_cols, Ts(1.0), from_buffer, ldb, origin_b_rows * ldb, new_B_buffer, origin_b_cols, origin_b_rows * origin_b_cols, 1); oneapi::mkl::blas::column_major::gemmt( q, uplo, trans_A, trans_B, n, k, alpha_value, data_a, lda, new_B_buffer, origin_b_cols, beta_value, data_c, ldc); #else working_memory<T> new_B(origin_b_rows * origin_b_cols * sizeof(T), q); oneapi::mkl::blas::column_major::omatcopy_batch( q, oneapi::mkl::transpose::conjtrans, origin_b_rows, origin_b_cols, Ts(1.0), reinterpret_cast<const Ty *>(b), ldb, origin_b_rows * ldb, reinterpret_cast<Ty *>(new_B.get_ptr()), origin_b_cols, origin_b_rows * origin_b_cols, 1); sycl::event e = oneapi::mkl::blas::column_major::gemmt( q, uplo, trans_A, trans_B, n, k, alpha_value, data_a, lda, reinterpret_cast<Ty *>(new_B.get_ptr()), origin_b_cols, beta_value, data_c, ldc); new_B.set_event(e); #endif } else { if constexpr (is_hermitian) { trans_B = trans == oneapi::mkl::transpose::nontrans ? oneapi::mkl::transpose::conjtrans : oneapi::mkl::transpose::nontrans; } else { trans_B = trans == oneapi::mkl::transpose::nontrans ? oneapi::mkl::transpose::trans : oneapi::mkl::transpose::nontrans; } auto data_a = get_memory(reinterpret_cast<const Ty *>(a)); auto data_b = get_memory(reinterpret_cast<const Ty *>(b)); auto data_c = get_memory(reinterpret_cast<Ty *>(c)); oneapi::mkl::blas::column_major::gemmt( q, uplo, trans_A, trans_B, n, k, alpha_value, data_a, lda, data_b, ldb, beta_value, data_c, ldc); } } template <class Ta, class Tb, class Ts> inline void trsm_batch_impl(sycl::queue &q, oneapi::mkl::side left_right, oneapi::mkl::uplo upper_lower, oneapi::mkl::transpose trans, oneapi::mkl::diag unit_diag, int m, int n, const void *alpha, const void **a, int lda, void **b, int ldb, int batch_size) { struct matrix_info_t { matrix_info_t(oneapi::mkl::side side_info, oneapi::mkl::uplo uplo_info, oneapi::mkl::transpose transpose_info, oneapi::mkl::diag diag_info, Ts value_info, std::int64_t m, std::int64_t n, std::int64_t lda, std::int64_t ldb, std::int64_t groupsize_info) : side_info(side_info), uplo_info(uplo_info), transpose_info(transpose_info), diag_info(diag_info), value_info(value_info), groupsize_info(groupsize_info) { size_info[0] = m; size_info[1] = n; ld_info[0] = lda; ld_info[1] = ldb; } oneapi::mkl::side side_info; oneapi::mkl::uplo uplo_info; oneapi::mkl::transpose transpose_info; oneapi::mkl::diag diag_info; Ts value_info; std::int64_t size_info[2]; std::int64_t ld_info[2]; std::int64_t groupsize_info; }; Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q); matrix_info_t *matrix_info = new matrix_info_t(left_right, upper_lower, trans, unit_diag, alpha_value, m, n, lda, ldb, batch_size); sycl::event e = oneapi::mkl::blas::column_major::trsm_batch( q, &(matrix_info->side_info), &(matrix_info->uplo_info), &(matrix_info->transpose_info), &(matrix_info->diag_info), matrix_info->size_info, matrix_info->size_info + 1, &(matrix_info->value_info), reinterpret_cast<const Ta **>(a), matrix_info->ld_info, reinterpret_cast<Tb **>(b), matrix_info->ld_info + 1, 1, &(matrix_info->groupsize_info)); q.submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { delete matrix_info; }); }); } template <typename T> inline void getrfnp_batch_wrapper(sycl::queue &exec_queue, int n, T *a[], int lda, int *info, int batch_size) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else using Ty = typename DataType<T>::T2; // Set the info array value to 0 detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size); std::int64_t stride_a = n * lda; std::int64_t scratchpad_size = oneapi::mkl::lapack::getrfnp_batch_scratchpad_size<Ty>( exec_queue, n, n, lda, stride_a, batch_size); Ty *a_strided_mem = (Ty *)dpct::dpct_malloc(stride_a * batch_size * sizeof(Ty), exec_queue); T **host_a = (T **)malloc(batch_size * sizeof(T *)); dpct::dpct_memcpy(host_a, a, batch_size * sizeof(T *)); for (std::int64_t i = 0; i < batch_size; ++i) dpct::dpct_memcpy(a_strided_mem + i * stride_a, host_a[i], n * lda * sizeof(T)); #ifdef DPCT_USM_LEVEL_NONE { sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)}; auto a_buffer = get_buffer<Ty>(a_strided_mem); oneapi::mkl::lapack::getrfnp_batch(exec_queue, n, n, a_buffer, lda, stride_a, batch_size, scratchpad, scratchpad_size); } std::vector<sycl::event> events; for (std::int64_t i = 0; i < batch_size; ++i) events.push_back(detail::dpct_memcpy(exec_queue, host_a[i], a_strided_mem + i * stride_a, n * lda * sizeof(T), automatic)); #else Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue); sycl::event e = oneapi::mkl::lapack::getrfnp_batch( exec_queue, n, n, a_strided_mem, lda, stride_a, batch_size, scratchpad, scratchpad_size); std::vector<sycl::event> events; for (std::int64_t i = 0; i < batch_size; ++i) events.push_back(detail::dpct_memcpy(exec_queue, host_a[i], a_strided_mem + i * stride_a, n * lda * sizeof(T), automatic, {e})); std::vector<void *> ptrs{scratchpad, a_strided_mem}; dpct::async_dpct_free(ptrs, events, exec_queue); #endif exec_queue.submit([&](sycl::handler &cgh) { cgh.depends_on(events); cgh.host_task([=] { free(host_a); }); }); #endif } } // namespace detail inline oneapi::mkl::transpose get_transpose(int t) { if (t == 0) { return oneapi::mkl::transpose::nontrans; } else if (t == 1) { return oneapi::mkl::transpose::trans; } else { return oneapi::mkl::transpose::conjtrans; } } /// Computes the LU factorizations of a batch of general matrices. /// \param [in] exec_queue The queue where the routine should be executed. /// \param [in] n The order of the matrices. /// \param [in, out] a Array of pointers to matrices. These matrices will be /// overwritten by lower triangulars with unit diagonal elements and upper /// triangulars. /// \param [in] lda The leading dimension of the matrices. /// \param [out] ipiv An array stores the pivot indices. If \p ipiv is nullptr, /// non-pivoting LU factorization is computed. /// \param [out] info An array stores the error information. /// \param [in] batch_size The size of the batch. template <typename T> inline void getrf_batch_wrapper(sycl::queue &exec_queue, int n, T *a[], int lda, int *ipiv, int *info, int batch_size) { if (ipiv == nullptr) { detail::getrfnp_batch_wrapper(exec_queue, n, a, lda, info, batch_size); return; } using Ty = typename DataType<T>::T2; // Set the info array value to 0 detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size); #ifdef DPCT_USM_LEVEL_NONE std::int64_t stride_a = n * lda; std::int64_t stride_ipiv = n; std::int64_t scratchpad_size = oneapi::mkl::lapack::getrf_batch_scratchpad_size<Ty>( exec_queue, n, n, lda, stride_a, stride_ipiv, batch_size); T *a_buffer_ptr; a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T)); T **host_a = (T **)malloc(batch_size * sizeof(T *)); dpct_memcpy(host_a, a, batch_size * sizeof(T *)); for (std::int64_t i = 0; i < batch_size; ++i) dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T)); { sycl::buffer<std::int64_t, 1> ipiv_buf( sycl::range<1>(batch_size * stride_ipiv)); sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)}; auto a_buffer = get_buffer<Ty>(a_buffer_ptr); oneapi::mkl::lapack::getrf_batch(exec_queue, n, n, a_buffer, lda, stride_a, ipiv_buf, stride_ipiv, batch_size, scratchpad, scratchpad_size); auto to_buffer = get_buffer<int>(ipiv); exec_queue.submit([&](sycl::handler &cgh) { auto from_acc = ipiv_buf.get_access<sycl::access_mode::read>(cgh); auto to_acc = to_buffer.get_access<sycl::access_mode::write>(cgh); cgh.parallel_for<dpct_kernel_name<class getrf_device_int64_to_int, T>>( sycl::range<2>(batch_size, n), [=](sycl::id<2> id) { to_acc[id.get(0) * n + id.get(1)] = static_cast<int>(from_acc[id.get(0) * stride_ipiv + id.get(1)]); }); }); } // Copy back to the original buffers std::vector<sycl::event> events; for (std::int64_t i = 0; i < batch_size; ++i) events.push_back(detail::dpct_memcpy(exec_queue, host_a[i], a_buffer_ptr + i * stride_a, n * lda * sizeof(T), automatic)); std::vector<void *> ptrs{host_a}; std::thread mem_free_thread( [=](std::vector<void *> pointers_array, std::vector<sycl::event> events_array) { sycl::event::wait(events_array); for (auto p : pointers_array) free(p); }, ptrs, events); mem_free_thread.detach(); #else std::int64_t m_int64 = n; std::int64_t n_int64 = n; std::int64_t lda_int64 = lda; std::int64_t group_sizes = batch_size; std::int64_t scratchpad_size = oneapi::mkl::lapack::getrf_batch_scratchpad_size<Ty>( exec_queue, &m_int64, &n_int64, &lda_int64, 1, &group_sizes); Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue); std::int64_t *ipiv_int64 = sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue); std::int64_t **ipiv_int64_ptr = sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue); T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue); exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *)).wait(); for (std::int64_t i = 0; i < batch_size; ++i) ipiv_int64_ptr[i] = ipiv_int64 + n * i; oneapi::mkl::lapack::getrf_batch(exec_queue, &m_int64, &n_int64, (Ty **)a_shared, &lda_int64, ipiv_int64_ptr, 1, &group_sizes, scratchpad, scratchpad_size); sycl::event e = exec_queue.submit([&](sycl::handler &cgh) { cgh.parallel_for<dpct_kernel_name<class getrf_device_int64_to_int, T>>( sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) { ipiv[idx] = static_cast<int>(ipiv_int64[idx]); }); }); std::vector<void *> ptrs{scratchpad, ipiv_int64, ipiv_int64_ptr, a_shared}; async_dpct_free(ptrs, {e}, exec_queue); #endif } /// Solves a system of linear equations with a batch of LU-factored square /// coefficient matrices, with multiple right-hand sides. /// \param [in] exec_queue The queue where the routine should be executed. /// \param [in] trans Indicates the form of the linear equations. /// \param [in] n The order of the matrices. /// \param [in] nrhs The number of right hand sides. /// \param [in] a Array of pointers to matrices. /// \param [in] lda The leading dimension of the matrices in \p a. /// \param [in] ipiv An array stores the pivots. /// \param [in, out] b Array of pointers to matrices, whose columns are /// the right-hand sides for the systems of equations. /// \param [in] ldb The leading dimension of the matrices in \p b. /// \param [out] info A value stores the error information. /// \param [in] batch_size The size of the batch. template <typename T> inline void getrs_batch_wrapper(sycl::queue &exec_queue, oneapi::mkl::transpose trans, int n, int nrhs, const T *a[], int lda, int *ipiv, T *b[], int ldb, int *info, int batch_size) { using Ty = typename DataType<T>::T2; // Set the info value to 0 *info = 0; #ifdef DPCT_USM_LEVEL_NONE std::int64_t stride_a = n * lda; std::int64_t stride_b = nrhs * ldb; std::int64_t stride_ipiv = n; std::int64_t scratchpad_size = oneapi::mkl::lapack::getrs_batch_scratchpad_size<Ty>( exec_queue, trans, n, nrhs, lda, stride_a, stride_ipiv, ldb, stride_b, batch_size); T *a_buffer_ptr, *b_buffer_ptr; a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T)); b_buffer_ptr = (T *)dpct_malloc(stride_b * batch_size * sizeof(T)); T **host_a = (T **)malloc(batch_size * sizeof(T *)); T **host_b = (T **)malloc(batch_size * sizeof(T *)); dpct_memcpy(host_a, a, batch_size * sizeof(T *)); dpct_memcpy(host_b, b, batch_size * sizeof(T *)); for (std::int64_t i = 0; i < batch_size; ++i) { dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T)); dpct_memcpy(b_buffer_ptr + i * stride_b, host_b[i], nrhs * ldb * sizeof(T)); } { auto a_buffer = get_buffer<Ty>(a_buffer_ptr); auto b_buffer = get_buffer<Ty>(b_buffer_ptr); sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)}; sycl::buffer<std::int64_t, 1> ipiv_buf( sycl::range<1>(batch_size * stride_ipiv)); auto from_buf = get_buffer<int>(ipiv); exec_queue.submit([&](sycl::handler &cgh) { auto from_acc = from_buf.get_access<sycl::access_mode::read>(cgh); auto to_acc = ipiv_buf.get_access<sycl::access_mode::write>(cgh); cgh.parallel_for<dpct_kernel_name<class getrs_device_int64_to_int, T>>( sycl::range<2>(batch_size, n), [=](sycl::id<2> id) { to_acc[id.get(0) * stride_ipiv + id.get(1)] = static_cast<std::int64_t>(from_acc[id.get(0) * n + id.get(1)]); }); }); oneapi::mkl::lapack::getrs_batch(exec_queue, trans, n, nrhs, a_buffer, lda, stride_a, ipiv_buf, stride_ipiv, b_buffer, ldb, stride_b, batch_size, scratchpad, scratchpad_size); } // Copy back to the original buffers std::vector<sycl::event> events; for (std::int64_t i = 0; i < batch_size; ++i) events.push_back(detail::dpct_memcpy(exec_queue, host_b[i], b_buffer_ptr + i * stride_b, nrhs * ldb * sizeof(T), automatic)); std::vector<void *> ptrs{host_a, host_b}; std::thread mem_free_thread( [=](std::vector<void *> pointers_array, std::vector<sycl::event> events_array) { sycl::event::wait(events_array); for (auto p : pointers_array) free(p); }, ptrs, events); mem_free_thread.detach(); #else std::int64_t n_int64 = n; std::int64_t nrhs_int64 = nrhs; std::int64_t lda_int64 = lda; std::int64_t ldb_int64 = ldb; std::int64_t group_sizes = batch_size; std::int64_t scratchpad_size = oneapi::mkl::lapack::getrs_batch_scratchpad_size<Ty>( exec_queue, &trans, &n_int64, &nrhs_int64, &lda_int64, &ldb_int64, 1, &group_sizes); Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue); std::int64_t *ipiv_int64 = sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue); std::int64_t **ipiv_int64_ptr = sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue); T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue); T **b_shared = sycl::malloc_shared<T *>(batch_size, exec_queue); exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *)); exec_queue.memcpy(b_shared, b, batch_size * sizeof(T *)); exec_queue.submit([&](sycl::handler &cgh) { cgh.parallel_for<dpct_kernel_name<class getrs_device_int64_to_int, T>>( sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) { ipiv_int64[idx] = static_cast<std::int64_t>(ipiv[idx]); }); }).wait(); for (std::int64_t i = 0; i < batch_size; ++i) ipiv_int64_ptr[i] = ipiv_int64 + n * i; sycl::event e = oneapi::mkl::lapack::getrs_batch( exec_queue, &trans, &n_int64, &nrhs_int64, (Ty **)a_shared, &lda_int64, ipiv_int64_ptr, (Ty **)b_shared, &ldb_int64, 1, &group_sizes, scratchpad, scratchpad_size); std::vector<void *> ptrs{scratchpad, ipiv_int64_ptr, ipiv_int64, a_shared, b_shared}; async_dpct_free(ptrs, {e}, exec_queue); #endif } /// Computes the inverses of a batch of LU-factored matrices. /// \param [in] exec_queue The queue where the routine should be executed. /// \param [in] n The order of the matrices. /// \param [in] a Array of pointers to matrices. /// \param [in] lda The leading dimension of the matrices in \p a. /// \param [in] ipiv An array stores the pivots. /// \param [out] b Array of pointers to inverse matrices. /// \param [in] ldb The leading dimension of the matrices in \p b. /// \param [out] info An array stores the error information. /// \param [in] batch_size The size of the batch. template <typename T> inline void getri_batch_wrapper(sycl::queue &exec_queue, int n, const T *a[], int lda, int *ipiv, T *b[], int ldb, int *info, int batch_size) { using Ty = typename DataType<T>::T2; // Set the info array value to 0 detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size); #ifdef DPCT_USM_LEVEL_NONE std::int64_t stride_b = n * ldb; std::int64_t stride_ipiv = n; std::int64_t scratchpad_size = oneapi::mkl::lapack::getri_batch_scratchpad_size<Ty>( exec_queue, n, ldb, stride_b, stride_ipiv, batch_size); T *b_buffer_ptr; b_buffer_ptr = (T *)dpct_malloc(stride_b * batch_size * sizeof(T)); T **host_a = (T **)malloc(batch_size * sizeof(T *)); T **host_b = (T **)malloc(batch_size * sizeof(T *)); dpct_memcpy(host_a, a, batch_size * sizeof(T *)); dpct_memcpy(host_b, b, batch_size * sizeof(T *)); for (std::int64_t i = 0; i < batch_size; ++i) { // Need to create a copy of input matrices "a" to keep them unchanged. // Matrices "b" (copy of matrices "a") will be used as input and output // parameter in oneapi::mkl::lapack::getri_batch call. matrix_mem_copy(b_buffer_ptr + i * stride_b, host_a[i], ldb, lda, n, n, dpct::device_to_device, exec_queue); } { auto b_buffer = get_buffer<Ty>(b_buffer_ptr); sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)}; sycl::buffer<std::int64_t, 1> ipiv_buf( sycl::range<1>(batch_size * stride_ipiv)); auto from_buf = get_buffer<int>(ipiv); exec_queue.submit([&](sycl::handler &cgh) { auto from_acc = from_buf.get_access<sycl::access_mode::read>(cgh); auto to_acc = ipiv_buf.get_access<sycl::access_mode::write>(cgh); cgh.parallel_for<dpct_kernel_name<class getri_device_int64_to_int, T>>( sycl::range<2>(batch_size, n), [=](sycl::id<2> id) { to_acc[id.get(0) * stride_ipiv + id.get(1)] = static_cast<std::int64_t>(from_acc[id.get(0) * n + id.get(1)]); }); }); oneapi::mkl::lapack::getri_batch(exec_queue, n, b_buffer, ldb, stride_b, ipiv_buf, stride_ipiv, batch_size, scratchpad, scratchpad_size); } // Copy back to the original buffers std::vector<sycl::event> events; for (std::int64_t i = 0; i < batch_size; ++i) events.push_back(detail::dpct_memcpy(exec_queue, host_b[i], b_buffer_ptr + i * stride_b, n * ldb * sizeof(T), automatic)); std::vector<void *> ptrs{host_a, host_b}; std::thread mem_free_thread( [=](std::vector<void *> pointers_array, std::vector<sycl::event> events_array) { sycl::event::wait(events_array); for (auto p : pointers_array) free(p); }, ptrs, events); mem_free_thread.detach(); #else std::int64_t n_int64 = n; std::int64_t ldb_int64 = ldb; std::int64_t group_sizes = batch_size; std::int64_t scratchpad_size = oneapi::mkl::lapack::getri_batch_scratchpad_size<Ty>( exec_queue, &n_int64, &ldb_int64, 1, &group_sizes); Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue); std::int64_t *ipiv_int64 = sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue); std::int64_t **ipiv_int64_ptr = sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue); exec_queue.submit([&](sycl::handler &cgh) { cgh.parallel_for<dpct_kernel_name<class getri_device_int64_to_int, T>>( sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) { ipiv_int64[idx] = static_cast<std::int64_t>(ipiv[idx]); }); }); T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue); T **b_shared = sycl::malloc_shared<T *>(batch_size, exec_queue); exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *)); exec_queue.memcpy(b_shared, b, batch_size * sizeof(T *)).wait(); for (std::int64_t i = 0; i < batch_size; ++i) { ipiv_int64_ptr[i] = ipiv_int64 + n * i; // Need to create a copy of input matrices "a" to keep them unchanged. // Matrices "b" (copy of matrices "a") will be used as input and output // parameter in oneapi::mkl::lapack::getri_batch call. matrix_mem_copy(b_shared[i], a_shared[i], ldb, lda, n, n, dpct::device_to_device, exec_queue); } sycl::event e = oneapi::mkl::lapack::getri_batch( exec_queue, &n_int64, (Ty **)b_shared, &ldb_int64, ipiv_int64_ptr, 1, &group_sizes, scratchpad, scratchpad_size); std::vector<void *> ptrs{scratchpad, ipiv_int64_ptr, ipiv_int64, a_shared, b_shared}; async_dpct_free(ptrs, {e}, exec_queue); #endif } /// Computes the QR factorizations of a batch of general matrices. /// \param [in] exec_queue The queue where the routine should be executed. /// \param [in] m The number of rows in the matrices. /// \param [in] n The number of columns in the matrices. /// \param [in, out] a Array of pointers to matrices. These /// matrices will be overwritten by the factorization data. /// \param [in] lda The leading dimension of the matrices in \p a. /// \param [out] tau An array stores the scalars. /// \param [out] info A value stores the error information. /// \param [in] batch_size The size of the batch. template <typename T> inline void geqrf_batch_wrapper(sycl::queue exec_queue, int m, int n, T *a[], int lda, T *tau[], int *info, int batch_size) { using Ty = typename DataType<T>::T2; // Set the info value to 0 *info = 0; #ifdef DPCT_USM_LEVEL_NONE std::int64_t stride_a = n * lda; std::int64_t stride_tau = std::max(1, std::min(m, n)); std::int64_t scratchpad_size = oneapi::mkl::lapack::geqrf_batch_scratchpad_size<Ty>( exec_queue, m, n, lda, stride_a, stride_tau, batch_size); T *a_buffer_ptr, *tau_buffer_ptr; a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T)); tau_buffer_ptr = (T *)dpct_malloc(stride_tau * batch_size * sizeof(T)); T **host_a = (T **)malloc(batch_size * sizeof(T *)); T **host_tau = (T **)malloc(batch_size * sizeof(T *)); dpct_memcpy(host_a, a, batch_size * sizeof(T *)); dpct_memcpy(host_tau, tau, batch_size * sizeof(T *)); for (std::int64_t i = 0; i < batch_size; ++i) dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T)); { auto a_buffer = get_buffer<Ty>(a_buffer_ptr); auto tau_buffer = get_buffer<Ty>(tau_buffer_ptr); sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)}; oneapi::mkl::lapack::geqrf_batch(exec_queue, m, n, a_buffer, lda, stride_a, tau_buffer, stride_tau, batch_size, scratchpad, scratchpad_size); } // Copy back to the original buffers std::vector<sycl::event> events_a; std::vector<sycl::event> events_tau; for (std::int64_t i = 0; i < batch_size; ++i) { events_a.push_back(detail::dpct_memcpy(exec_queue, host_a[i], a_buffer_ptr + i * stride_a, n * lda * sizeof(T), automatic)); events_tau.push_back(detail::dpct_memcpy( exec_queue, host_tau[i], tau_buffer_ptr + i * stride_tau, std::max(1, std::min(m, n)) * sizeof(T), automatic)); } std::vector<void *> ptr_a{host_a}; std::vector<void *> ptr_tau{host_tau}; std::thread mem_free_thread_a( [=](std::vector<void *> pointers_array, std::vector<sycl::event> events_array) { sycl::event::wait(events_array); for (auto p : pointers_array) free(p); }, ptr_a, events_a); std::thread mem_free_thread_tau( [=](std::vector<void *> pointers_array, std::vector<sycl::event> events_array) { sycl::event::wait(events_array); for (auto p : pointers_array) free(p); }, ptr_tau, events_tau); mem_free_thread_a.detach(); mem_free_thread_tau.detach(); #else std::int64_t m_int64 = n; std::int64_t n_int64 = n; std::int64_t lda_int64 = lda; std::int64_t group_sizes = batch_size; std::int64_t scratchpad_size = oneapi::mkl::lapack::geqrf_batch_scratchpad_size<Ty>( exec_queue, &m_int64, &n_int64, &lda_int64, 1, &group_sizes); Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue); T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue); T **tau_shared = sycl::malloc_shared<T *>(batch_size, exec_queue); exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *)); exec_queue.memcpy(tau_shared, tau, batch_size * sizeof(T *)).wait(); sycl::event e = oneapi::mkl::lapack::geqrf_batch( exec_queue, &m_int64, &n_int64, (Ty **)a_shared, &lda_int64, (Ty **)tau_shared, 1, &group_sizes, scratchpad, scratchpad_size); std::vector<void *> ptrs{scratchpad, a_shared, tau_shared}; async_dpct_free(ptrs, {e}, exec_queue); #endif } /// Computes the Euclidean norm of a vector. /// \param [in] q The queue where the routine should be executed. /// \param [in] n Number of elements in vector x. /// \param [in] x Input vector x. /// \param [in] x_type Data type of the vector x. /// \param [in] incx Stride of vector x. /// \param [out] result The result scalar. /// \param [in] result_type Data type of the result. inline void nrm2(sycl::queue &q, int n, const void *x, library_data_t x_type, int incx, void *result, library_data_t result_type) { std::uint64_t key = detail::get_type_combination_id(x_type, result_type); switch (key) { case detail::get_type_combination_id(library_data_t::real_float, library_data_t::real_float): { detail::nrm2_impl<float, float>(q, n, x, incx, result); break; } case detail::get_type_combination_id(library_data_t::real_double, library_data_t::real_double): { detail::nrm2_impl<double, double>(q, n, x, incx, result); break; } case detail::get_type_combination_id(library_data_t::complex_float, library_data_t::real_float): { detail::nrm2_impl<std::complex<float>, float>( q, n, x, incx, result); break; } case detail::get_type_combination_id(library_data_t::complex_double, library_data_t::real_double): { detail::nrm2_impl<std::complex<double>, double>( q, n, x, incx, result); break; } case detail::get_type_combination_id(library_data_t::real_half, library_data_t::real_half): { detail::nrm2_impl<sycl::half, sycl::half>( q, n, x, incx, result); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } } /// Computes the dot product of two vectors. /// \param [in] q The queue where the routine should be executed. /// \param [in] n Number of elements in vector x. /// \param [in] x Input vector x. /// \param [in] x_type Data type of the vector x. /// \param [in] incx Stride of vector x. /// \param [in] y Input vector y. /// \param [in] y_type Data type of the vector y. /// \param [in] incy Stride of vector y. /// \param [out] result The result scalar. /// \param [in] result_type Data type of the result. inline void dot(sycl::queue &q, int n, const void *x, library_data_t x_type, int incx, const void *y, library_data_t y_type, int incy, void *result, library_data_t result_type) { detail::dotuc<false>(q, n, x, x_type, incx, y, y_type, incy, result, result_type); } /// Computes the dot product of two vectors, conjugating the first vector. /// \param [in] q The queue where the routine should be executed. /// \param [in] n Number of elements in vector x. /// \param [in] x Input vector x. /// \param [in] x_type Data type of the vector x. /// \param [in] incx Stride of vector x. /// \param [in] y Input vector y. /// \param [in] y_type Data type of the vector y. /// \param [in] incy Stride of vector y. /// \param [out] result The result scalar. /// \param [in] result_type Data type of the result. inline void dotc(sycl::queue &q, int n, const void *x, library_data_t x_type, int incx, const void *y, library_data_t y_type, int incy, void *result, library_data_t result_type) { detail::dotuc<true>(q, n, x, x_type, incx, y, y_type, incy, result, result_type); } /// Computes the product of a vector by a scalar. /// \param [in] q The queue where the routine should be executed. /// \param [in] n Number of elements in vector x. /// \param [in] alpha The scale factor alpha. /// \param [in] alpha_type The data type of alpha. /// \param [in, out] x Input/Output vector x. /// \param [in] x_type Data type of the vector x. /// \param [in] incx Stride of vector x. inline void scal(sycl::queue &q, int n, const void *alpha, library_data_t alpha_type, void *x, library_data_t x_type, int incx) { std::uint64_t key = detail::get_type_combination_id(x_type); switch (key) { case detail::get_type_combination_id(library_data_t::real_float): { detail::scal_impl<float, float>(q, n, alpha, x, incx); break; } case detail::get_type_combination_id(library_data_t::real_double): { detail::scal_impl<double, double>(q, n, alpha, x, incx); break; } case detail::get_type_combination_id(library_data_t::complex_float): { detail::scal_impl<std::complex<float>, std::complex<float>>(q, n, alpha, x, incx); break; } case detail::get_type_combination_id(library_data_t::complex_double): { detail::scal_impl<std::complex<double>, std::complex<double>>( q, n, alpha, x, incx); break; } case detail::get_type_combination_id(library_data_t::real_half): { float alpha_value = dpct::get_value(reinterpret_cast<const float *>(alpha), q); sycl::half alaph_half(alpha_value); detail::scal_impl<sycl::half, sycl::half>(q, n, &alaph_half, x, incx); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } } /// Computes a vector-scalar product and adds the result to a vector. /// \param [in] q The queue where the routine should be executed. /// \param [in] n Number of elements in vector x. /// \param [in] alpha The scale factor alpha. /// \param [in] alpha_type The data type of alpha. /// \param [in] x Input vector x. /// \param [in] x_type Data type of the vector x. /// \param [in] incx Stride of vector x. /// \param [in, out] y Input/Output vector y. /// \param [in] y_type Data type of the vector y. /// \param [in] incy Stride of vector y. inline void axpy(sycl::queue &q, int n, const void *alpha, library_data_t alpha_type, const void *x, library_data_t x_type, int incx, void *y, library_data_t y_type, int incy) { std::uint64_t key = detail::get_type_combination_id(x_type, alpha_type); switch (key) { case detail::get_type_combination_id(library_data_t::real_float, library_data_t::real_float): { detail::axpy_impl<float, float>(q, n, alpha, x, incx, y, incy); break; } case detail::get_type_combination_id(library_data_t::real_double, library_data_t::real_double): { detail::axpy_impl<double, double>(q, n, alpha, x, incx, y, incy); break; } case detail::get_type_combination_id(library_data_t::complex_float, library_data_t::complex_float): { detail::axpy_impl<std::complex<float>, std::complex<float>>( q, n, alpha, x, incx, y, incy); break; } case detail::get_type_combination_id(library_data_t::complex_double, library_data_t::complex_double): { detail::axpy_impl<std::complex<double>, std::complex<double>>( q, n, alpha, x, incx, y, incy); break; } case detail::get_type_combination_id(library_data_t::real_half, library_data_t::real_float): { float alpha_value = dpct::get_value(reinterpret_cast<const float *>(alpha), q); sycl::half alaph_half(alpha_value); detail::axpy_impl<sycl::half, sycl::half>(q, n, &alaph_half, x, incx, y, incy); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } } /// Performs rotation of points in the plane. /// \param [in] q The queue where the routine should be executed. /// \param [in] n Number of elements in vector x. /// \param [in, out] x Input/Output vector x. /// \param [in] x_type Data type of the vector x. /// \param [in] incx Stride of vector x. /// \param [in, out] y Input/Output vector y. /// \param [in] y_type Data type of the vector y. /// \param [in] incy Stride of vector y. /// \param [in] c Scaling factor. /// \param [in] s Scaling factor. /// \param [in] cs_type Data type of the scaling factors. inline void rot(sycl::queue &q, int n, void *x, library_data_t x_type, int incx, void *y, library_data_t y_type, int incy, const void *c, const void *s, library_data_t cs_type) { std::uint64_t key = detail::get_type_combination_id(x_type, cs_type); switch (key) { case detail::get_type_combination_id(library_data_t::real_float, library_data_t::real_float): { detail::rot_impl<float, float, float>(q, n, x, incx, y, incy, c, s); break; } case detail::get_type_combination_id(library_data_t::real_double, library_data_t::real_double): { detail::rot_impl<double, double, double>(q, n, x, incx, y, incy, c, s); break; } case detail::get_type_combination_id(library_data_t::complex_float, library_data_t::real_float): { detail::rot_impl<std::complex<float>, float, float>(q, n, x, incx, y, incy, c, s); break; } case detail::get_type_combination_id(library_data_t::complex_double, library_data_t::real_double): { detail::rot_impl<std::complex<double>, double, double>(q, n, x, incx, y, incy, c, s); break; } case detail::get_type_combination_id(library_data_t::complex_float, library_data_t::complex_float): { detail::rot_impl<std::complex<float>, float, std::complex<float>>(q, n, x, incx, y, incy, c, s); break; } case detail::get_type_combination_id(library_data_t::complex_double, library_data_t::complex_double): { detail::rot_impl<std::complex<double>, double, std::complex<double>>(q, n, x, incx, y, incy, c, s); break; } case detail::get_type_combination_id(library_data_t::real_half, library_data_t::real_half): { detail::rot_impl<sycl::half, sycl::half, sycl::half>(q, n, x, incx, y, incy, c, s); break; } case detail::get_type_combination_id(library_data_t::real_bfloat16, library_data_t::real_bfloat16): { detail::rot_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, oneapi::mkl::bfloat16>(q, n, x, incx, y, incy, c, s); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } } /// Computes matrix-matrix product with general matrices. /// \param [in] q The queue where the routine should be executed. /// \param [in] a_trans Specifies the operation applied to A. /// \param [in] b_trans Specifies the operation applied to B. /// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C. /// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C. /// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B). /// \param [in] alpha Scaling factor for the matrix-matrix product. /// \param [in] a Input matrix A. /// \param [in] a_type Data type of the matrix A. /// \param [in] lda Leading dimension of A. /// \param [in] b Input matrix B. /// \param [in] b_type Data type of the matrix B. /// \param [in] ldb Leading dimension of B. /// \param [in] beta Scaling factor for matrix C. /// \param [in, out] c Input/Output matrix C. /// \param [in] c_type Data type of the matrix C. /// \param [in] ldc Leading dimension of C. /// \param [in] scaling_type Data type of the scaling factors. inline void gemm(sycl::queue &q, oneapi::mkl::transpose a_trans, oneapi::mkl::transpose b_trans, int m, int n, int k, const void *alpha, const void *a, library_data_t a_type, int lda, const void *b, library_data_t b_type, int ldb, const void *beta, void *c, library_data_t c_type, int ldc, library_data_t scaling_type) { bool matched = false; if (scaling_type == library_data_t::real_float && c_type == library_data_t::complex_float) { scaling_type = library_data_t::complex_float; } else if (scaling_type == library_data_t::real_double && c_type == library_data_t::complex_double) { scaling_type = library_data_t::complex_double; } std::uint64_t key = detail::get_type_combination_id(a_type, b_type, c_type, scaling_type); switch (key) { case detail::get_type_combination_id( library_data_t::real_float, library_data_t::real_float, library_data_t::real_float, library_data_t::real_float): { detail::gemm_impl<float, float, float, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::real_double, library_data_t::real_double, library_data_t::real_double, library_data_t::real_double): { detail::gemm_impl<double, double, double, double>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::complex_float, library_data_t::complex_float, library_data_t::complex_float, library_data_t::complex_float): { detail::gemm_impl<std::complex<float>, std::complex<float>, std::complex<float>, std::complex<float>>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::complex_double, library_data_t::complex_double, library_data_t::complex_double, library_data_t::complex_double): { detail::gemm_impl<std::complex<double>, std::complex<double>, std::complex<double>, std::complex<double>>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_half, library_data_t::real_half): { detail::gemm_impl<sycl::half, sycl::half, sycl::half, sycl::half>(q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_float, library_data_t::real_float): { detail::gemm_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float, float>(q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_float, library_data_t::real_float): { detail::gemm_impl<sycl::half, sycl::half, float, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_half, library_data_t::real_float): { float alpha_value = dpct::get_value(reinterpret_cast<const float *>(alpha), q); float beta_value = dpct::get_value(reinterpret_cast<const float *>(beta), q); sycl::half alpha_half(alpha_value); sycl::half beta_half(beta_value); detail::gemm_impl<sycl::half, sycl::half, sycl::half, sycl::half>(q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, b, ldb, &beta_half, c, ldc); break; } case detail::get_type_combination_id( library_data_t::real_int8, library_data_t::real_int8, library_data_t::real_float, library_data_t::real_float): { detail::gemm_impl<std::int8_t, std::int8_t, float, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_float): { detail::gemm_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::real_int8, library_data_t::real_int8, library_data_t::real_int32, library_data_t::real_int32): { float alpha_float = dpct::get_value(reinterpret_cast<const std::int32_t *>(alpha), q); float beta_float = dpct::get_value(reinterpret_cast<const std::int32_t *>(beta), q); detail::gemm_impl<std::int8_t, std::int8_t, std::int32_t, float>( q, a_trans, b_trans, m, n, k, &alpha_float, a, lda, b, ldb, &beta_float, c, ldc); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } } /// Computes a batch of matrix-matrix product with general matrices. /// \param [in] q The queue where the routine should be executed. /// \param [in] a_trans Specifies the operation applied to A. /// \param [in] b_trans Specifies the operation applied to B. /// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C. /// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C. /// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B). /// \param [in] alpha Scaling factor for the matrix-matrix product. /// \param [in] a Input matrix A. /// \param [in] a_type Data type of the matrix A. /// \param [in] lda Leading dimension of A. /// \param [in] b Input matrix B. /// \param [in] b_type Data type of the matrix B. /// \param [in] ldb Leading dimension of B. /// \param [in] beta Scaling factor for matrix C. /// \param [in, out] c Input/Output matrix C. /// \param [in] c_type Data type of the matrix C. /// \param [in] ldc Leading dimension of C. /// \param [in] batch_size Specifies the number of matrix multiply operations to perform. /// \param [in] scaling_type Data type of the scaling factors. inline void gemm_batch(sycl::queue &q, oneapi::mkl::transpose a_trans, oneapi::mkl::transpose b_trans, int m, int n, int k, const void *alpha, const void *a[], library_data_t a_type, int lda, const void *b[], library_data_t b_type, int ldb, const void *beta, void *c[], library_data_t c_type, int ldc, int batch_size, library_data_t scaling_type) { #ifdef DPCT_USM_LEVEL_NONE throw std::runtime_error("this API is unsupported when USM level is none"); #else bool matched = false; if (scaling_type == library_data_t::real_float && c_type == library_data_t::complex_float) { scaling_type = library_data_t::complex_float; } else if (scaling_type == library_data_t::real_double && c_type == library_data_t::complex_double) { scaling_type = library_data_t::complex_double; } std::uint64_t key = detail::get_type_combination_id(a_type, b_type, c_type, scaling_type); switch (key) { case detail::get_type_combination_id( library_data_t::real_float, library_data_t::real_float, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl<float, float, float, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_double, library_data_t::real_double, library_data_t::real_double, library_data_t::real_double): { detail::gemm_batch_impl<double, double, double, double>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size); break; } case detail::get_type_combination_id( library_data_t::complex_float, library_data_t::complex_float, library_data_t::complex_float, library_data_t::complex_float): { detail::gemm_batch_impl<std::complex<float>, std::complex<float>, std::complex<float>, std::complex<float>>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size); break; } case detail::get_type_combination_id( library_data_t::complex_double, library_data_t::complex_double, library_data_t::complex_double, library_data_t::complex_double): { detail::gemm_batch_impl<std::complex<double>, std::complex<double>, std::complex<double>, std::complex<double>>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_half, library_data_t::real_half): { detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size); break; } #ifdef __INTEL_MKL__ case detail::get_type_combination_id( library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_float): { detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float, float>(q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_int8, library_data_t::real_int8, library_data_t::real_int32, library_data_t::real_int32): { float alpha_float = dpct::get_value(reinterpret_cast<const std::int32_t *>(alpha), q); float beta_float = dpct::get_value(reinterpret_cast<const std::int32_t *>(beta), q); detail::gemm_batch_impl<std::int8_t, std::int8_t, std::int32_t, float>(q, a_trans, b_trans, m, n, k, &alpha_float, a, lda, b, ldb, &beta_float, c, ldc, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_int8, library_data_t::real_int8, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl<std::int8_t, std::int8_t, float, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl<sycl::half, sycl::half, float, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size); break; } #endif case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_half, library_data_t::real_float): { float alpha_value = dpct::get_value(reinterpret_cast<const float *>(alpha), q); float beta_value = dpct::get_value(reinterpret_cast<const float *>(beta), q); sycl::half alpha_half(alpha_value); sycl::half beta_half(beta_value); detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>( q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, b, ldb, &beta_half, c, ldc, batch_size); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } #endif } /// Computes a batch of matrix-matrix product with general matrices. /// \param [in] q The queue where the routine should be executed. /// \param [in] a_trans Specifies the operation applied to A. /// \param [in] b_trans Specifies the operation applied to B. /// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C. /// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C. /// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B). /// \param [in] alpha Scaling factor for the matrix-matrix product. /// \param [in] a Input matrix A. /// \param [in] a_type Data type of the matrix A. /// \param [in] lda Leading dimension of A. /// \param [in] stride_a Stride between the different A matrices. /// \param [in] b Input matrix B. /// \param [in] b_type Data type of the matrix B. /// \param [in] ldb Leading dimension of B. /// \param [in] stride_b Stride between the different B matrices. /// \param [in] beta Scaling factor for matrix C. /// \param [in, out] c Input/Output matrix C. /// \param [in] c_type Data type of the matrix C. /// \param [in] ldc Leading dimension of C. /// \param [in] stride_c Stride between the different C matrices. /// \param [in] batch_size Specifies the number of matrix multiply operations to perform. /// \param [in] scaling_type Data type of the scaling factors. inline void gemm_batch(sycl::queue &q, oneapi::mkl::transpose a_trans, oneapi::mkl::transpose b_trans, int m, int n, int k, const void *alpha, const void *a, library_data_t a_type, int lda, long long int stride_a, const void *b, library_data_t b_type, int ldb, long long int stride_b, const void *beta, void *c, library_data_t c_type, int ldc, long long int stride_c, int batch_size, library_data_t scaling_type) { bool matched = false; if (scaling_type == library_data_t::real_float && c_type == library_data_t::complex_float) { scaling_type = library_data_t::complex_float; } else if (scaling_type == library_data_t::real_double && c_type == library_data_t::complex_double) { scaling_type = library_data_t::complex_double; } std::uint64_t key = detail::get_type_combination_id(a_type, b_type, c_type, scaling_type); switch (key) { case detail::get_type_combination_id( library_data_t::real_float, library_data_t::real_float, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl<float, float, float, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_double, library_data_t::real_double, library_data_t::real_double, library_data_t::real_double): { detail::gemm_batch_impl<double, double, double, double>( q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::complex_float, library_data_t::complex_float, library_data_t::complex_float, library_data_t::complex_float): { detail::gemm_batch_impl<std::complex<float>, std::complex<float>, std::complex<float>, std::complex<float>>( q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::complex_double, library_data_t::complex_double, library_data_t::complex_double, library_data_t::complex_double): { detail::gemm_batch_impl<std::complex<double>, std::complex<double>, std::complex<double>, std::complex<double>>( q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_half, library_data_t::real_half): { detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } #ifdef __INTEL_MKL__ case detail::get_type_combination_id( library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_float): { detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float, float>(q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_int8, library_data_t::real_int8, library_data_t::real_int32, library_data_t::real_int32): { detail::gemm_batch_impl<std::int8_t, std::int8_t, std::int32_t, std::int32_t>(q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_int8, library_data_t::real_int8, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl<std::int8_t, std::int8_t, float, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl<sycl::half, sycl::half, float, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } #endif case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_half, library_data_t::real_float): { float alpha_value = dpct::get_value(reinterpret_cast<const float *>(alpha), q); float beta_value = dpct::get_value(reinterpret_cast<const float *>(beta), q); sycl::half alpha_half(alpha_value); sycl::half beta_half(beta_value); detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>( q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, stride_a, b, ldb, stride_b, &beta_half, c, ldc, stride_c, batch_size); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } } /// This routines perform a special rank-k update of a symmetric matrix C by /// general matrices A and B. /// \param [in] q The queue where the routine should be executed. /// \param [in] uplo Specifies whether C's data is stored in its upper or lower triangle. /// \param [in] trans Specifies the operation to apply. /// \param [in] n The number of rows and columns in C. /// \param [in] k The inner dimension of matrix multiplications. /// \param [in] alpha Scaling factor for the rank-k update. /// \param [in] a Input matrix A. /// \param [in] lda Leading dimension of A. /// \param [in] b Input matrix B. /// \param [in] ldb Leading dimension of B. /// \param [in] beta Scaling factor for the rank-k update. /// \param [in, out] c Input/Output matrix C. /// \param [in] ldc Leading dimension of C. template <class T> inline void syrk(sycl::queue &q, oneapi::mkl::uplo uplo, oneapi::mkl::transpose trans, int n, int k, const T *alpha, const T *a, int lda, const T *b, int ldb, const T *beta, T *c, int ldc) { detail::rk_impl<false, T, T>(q, uplo, trans, n, k, alpha, a, lda, b, ldb, beta, c, ldc); } /// This routines perform a special rank-k update of a Hermitian matrix C by /// general matrices A and B. /// \param [in] q The queue where the routine should be executed. /// \param [in] uplo Specifies whether C's data is stored in its upper or lower triangle. /// \param [in] trans Specifies the operation to apply. /// \param [in] n The number of rows and columns in C. /// \param [in] k The inner dimension of matrix multiplications. /// \param [in] alpha Scaling factor for the rank-k update. /// \param [in] a Input matrix A. /// \param [in] lda Leading dimension of A. /// \param [in] b Input matrix B. /// \param [in] ldb Leading dimension of B. /// \param [in] beta Scaling factor for the rank-k update. /// \param [in, out] c Input/Output matrix C. /// \param [in] ldc Leading dimension of C. template <class T, class Tbeta> inline void herk(sycl::queue &q, oneapi::mkl::uplo uplo, oneapi::mkl::transpose trans, int n, int k, const T *alpha, const T *a, int lda, const T *b, int ldb, const Tbeta *beta, T *c, int ldc) { detail::rk_impl<true, T, Tbeta>(q, uplo, trans, n, k, alpha, a, lda, b, ldb, beta, c, ldc); } /// This routine performs a group of trsm operations. Each trsm solves an /// equation of the form op(A) * X = alpha * B or X * op(A) = alpha * B. /// \param [in] q The queue where the routine should be executed. /// \param [in] left_right Specifies A multiplies X on the left or on the right. /// \param [in] upper_lower Specifies A is upper or lower triangular. /// \param [in] trans Specifies the operation applied to A. /// \param [in] unit_diag Specifies whether A is unit triangular. /// \param [in] m Number of rows of the B matrices. /// \param [in] n Number of columns of the B matrices. /// \param [in] alpha Scaling factor for the solutions. /// \param [in] a Input matrices A. /// \param [in] a_type Data type of the matrices A. /// \param [in] lda Leading dimension of the matrices A. /// \param [in, out] b Input and output matrices B. /// \param [in] b_type Data type of the matrices B. /// \param [in] ldb Leading dimension of the matrices B. /// \param [in] batch_size Specifies the number of trsm operations to perform. /// \param [in] scaling_type Data type of the scaling factors. inline void trsm_batch(sycl::queue &q, oneapi::mkl::side left_right, oneapi::mkl::uplo upper_lower, oneapi::mkl::transpose trans, oneapi::mkl::diag unit_diag, int m, int n, const void *alpha, const void **a, library_data_t a_type, int lda, void **b, library_data_t b_type, int ldb, int batch_size, library_data_t scaling_type) { #ifdef DPCT_USM_LEVEL_NONE throw std::runtime_error("this API is unsupported when USM level is none"); #else std::uint64_t key = detail::get_type_combination_id(a_type, b_type, scaling_type); switch (key) { case detail::get_type_combination_id(library_data_t::real_float, library_data_t::real_float, library_data_t::real_float): { detail::trsm_batch_impl<float, float, float>(q, left_right, upper_lower, trans, unit_diag, m, n, alpha, a, lda, b, ldb, batch_size); break; } case detail::get_type_combination_id(library_data_t::real_double, library_data_t::real_double, library_data_t::real_double): { detail::trsm_batch_impl<double, double, double>( q, left_right, upper_lower, trans, unit_diag, m, n, alpha, a, lda, b, ldb, batch_size); break; } case detail::get_type_combination_id(library_data_t::complex_float, library_data_t::complex_float, library_data_t::complex_float): { detail::trsm_batch_impl<std::complex<float>, std::complex<float>, std::complex<float>>(q, left_right, upper_lower, trans, unit_diag, m, n, alpha, a, lda, b, ldb, batch_size); break; } case detail::get_type_combination_id(library_data_t::complex_double, library_data_t::complex_double, library_data_t::complex_double): { detail::trsm_batch_impl<std::complex<double>, std::complex<double>, std::complex<double>>(q, left_right, upper_lower, trans, unit_diag, m, n, alpha, a, lda, b, ldb, batch_size); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } #endif } /// Computes a triangular matrix-general matrix product. /// \param [in] q The queue where the routine should be executed. /// \param [in] left_right Specifies A is on the left or right side of the /// multiplication. /// \param [in] upper_lower Specifies A is upper or lower triangular. /// \param [in] trans Specifies the operation applied to A. /// \param [in] unit_diag Specifies whether A is unit triangular. /// \param [in] m Number of rows of B. /// \param [in] n Number of columns of B. /// \param [in] alpha Scaling factor for the matrix-matrix product. /// \param [in] a Input matrices A. /// \param [in] lda Leading dimension of the matrices A. /// \param [in] b Input matrices B. /// \param [in] ldb Leading dimension of the matrices B. /// \param [out] c Output matrices C. /// \param [in] ldc Leading dimension of the matrices C. template <class T> inline void trmm(sycl::queue &q, oneapi::mkl::side left_right, oneapi::mkl::uplo upper_lower, oneapi::mkl::transpose trans, oneapi::mkl::diag unit_diag, int m, int n, const T *alpha, const T *a, int lda, const T *b, int ldb, T *c, int ldc) { using Ty = typename DataType<T>::T2; auto alpha_val = dpct::get_value(alpha, q); if (b != c) { dpct::matrix_mem_copy(c, b, ldc, ldb, m, n, dpct::device_to_device, q); } auto data_a = detail::get_memory(reinterpret_cast<const Ty *>(a)); auto data_c = detail::get_memory(reinterpret_cast<Ty *>(c)); oneapi::mkl::blas::column_major::trmm(q, left_right, upper_lower, trans, unit_diag, m, n, alpha_val, data_a, lda, data_c, ldc); } } // namespace dpct #endif // __DPCT_BLAS_UTILS_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/atomic.hpp
//==---- atomic.hpp -------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_ATOMIC_HPP__ #define __DPCT_ATOMIC_HPP__ #include <sycl/sycl.hpp> namespace dpct { /// Atomically add the value operand to the value at the addr and assign the /// result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand The value to add to the value at \p addr. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> inline T atomic_fetch_add(T *addr, T operand) { auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_add(operand); } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device, typename T1, typename T2> inline T1 atomic_fetch_add(T1 *addr, T2 operand) { auto atm = sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_add(operand); } /// Atomically add the value operand to the value at the addr and assign the /// result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand The value to add to the value at \p addr. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space> inline T atomic_fetch_add(T *addr, T operand, sycl::memory_order memoryOrder) { switch (memoryOrder) { case sycl::memory_order::relaxed: return atomic_fetch_add<T, addressSpace, sycl::memory_order::relaxed, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::acq_rel: return atomic_fetch_add<T, addressSpace, sycl::memory_order::acq_rel, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::seq_cst: return atomic_fetch_add<T, addressSpace, sycl::memory_order::seq_cst, sycl::memory_scope::device>(addr, operand); default: assert(false && "Invalid memory_order for atomics. Valid memory_order for " "atomics are: sycl::memory_order::relaxed, " "sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!"); } } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, typename T1, typename T2> inline T1 atomic_fetch_add(T1 *addr, T2 operand, sycl::memory_order memoryOrder) { atomic_fetch_add<T1, addressSpace>(addr, operand, memoryOrder); } /// Atomically subtract the value operand from the value at the addr and assign /// the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand The value to subtract from the value at \p addr /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> inline T atomic_fetch_sub(T *addr, T operand) { auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_sub(operand); } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device, typename T1, typename T2> inline T1 atomic_fetch_sub(T1 *addr, T2 operand) { auto atm = sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_sub(operand); } /// Atomically subtract the value operand from the value at the addr and assign /// the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand The value to subtract from the value at \p addr /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space> inline T atomic_fetch_sub(T *addr, T operand, sycl::memory_order memoryOrder) { switch (memoryOrder) { case sycl::memory_order::relaxed: return atomic_fetch_sub<T, addressSpace, sycl::memory_order::relaxed, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::acq_rel: return atomic_fetch_sub<T, addressSpace, sycl::memory_order::acq_rel, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::seq_cst: return atomic_fetch_sub<T, addressSpace, sycl::memory_order::seq_cst, sycl::memory_scope::device>(addr, operand); default: assert(false && "Invalid memory_order for atomics. Valid memory_order for " "atomics are: sycl::memory_order::relaxed, " "sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!"); } } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, typename T1, typename T2> inline T1 atomic_fetch_sub(T1 *addr, T2 operand, sycl::memory_order memoryOrder) { atomic_fetch_sub<T1, addressSpace>(addr, operand, memoryOrder); } /// Atomically perform a bitwise AND between the value operand and the value at the addr /// and assign the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand The value to use in bitwise AND operation with the value at the \p addr. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> inline T atomic_fetch_and(T *addr, T operand) { auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_and(operand); } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device, typename T1, typename T2> inline T1 atomic_fetch_and(T1 *addr, T2 operand) { auto atm = sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_and(operand); } /// Atomically perform a bitwise AND between the value operand and the value at the addr /// and assign the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand The value to use in bitwise AND operation with the value at the \p addr. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space> inline T atomic_fetch_and(T *addr, T operand, sycl::memory_order memoryOrder) { switch (memoryOrder) { case sycl::memory_order::relaxed: return atomic_fetch_and<T, addressSpace, sycl::memory_order::relaxed, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::acq_rel: return atomic_fetch_and<T, addressSpace, sycl::memory_order::acq_rel, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::seq_cst: return atomic_fetch_and<T, addressSpace, sycl::memory_order::seq_cst, sycl::memory_scope::device>(addr, operand); default: assert(false && "Invalid memory_order for atomics. Valid memory_order for " "atomics are: sycl::memory_order::relaxed, " "sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!"); } } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, typename T1, typename T2> inline T1 atomic_fetch_and(T1 *addr, T2 operand, sycl::memory_order memoryOrder) { atomic_fetch_and<T1, addressSpace>(addr, operand, memoryOrder); } /// Atomically or the value at the addr with the value operand, and assign /// the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand The value to use in bitwise OR operation with the value at the \p addr. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> inline T atomic_fetch_or(T *addr, T operand) { auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_or(operand); } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device, typename T1, typename T2> inline T1 atomic_fetch_or(T1 *addr, T2 operand) { auto atm = sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_or(operand); } /// Atomically or the value at the addr with the value operand, and assign /// the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand The value to use in bitwise OR operation with the value at the \p addr. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space> inline T atomic_fetch_or(T *addr, T operand, sycl::memory_order memoryOrder) { switch (memoryOrder) { case sycl::memory_order::relaxed: return atomic_fetch_or<T, addressSpace, sycl::memory_order::relaxed, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::acq_rel: return atomic_fetch_or<T, addressSpace, sycl::memory_order::acq_rel, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::seq_cst: return atomic_fetch_or<T, addressSpace, sycl::memory_order::seq_cst, sycl::memory_scope::device>(addr, operand); default: assert(false && "Invalid memory_order for atomics. Valid memory_order for " "atomics are: sycl::memory_order::relaxed, " "sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!"); } } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, typename T1, typename T2> inline T1 atomic_fetch_or(T1 *addr, T2 operand, sycl::memory_order memoryOrder) { atomic_fetch_or<T1, addressSpace>(addr, operand, memoryOrder); } /// Atomically xor the value at the addr with the value operand, and assign /// the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand The value to use in bitwise XOR operation with the value at the \p addr. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> inline T atomic_fetch_xor(T *addr, T operand) { auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_xor(operand); } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device, typename T1, typename T2> inline T1 atomic_fetch_xor(T1 *addr, T2 operand) { auto atm = sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_xor(operand); } /// Atomically xor the value at the addr with the value operand, and assign /// the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand The value to use in bitwise XOR operation with the value at the \p addr. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space> inline T atomic_fetch_xor(T *addr, T operand, sycl::memory_order memoryOrder) { switch (memoryOrder) { case sycl::memory_order::relaxed: return atomic_fetch_xor<T, addressSpace, sycl::memory_order::relaxed, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::acq_rel: return atomic_fetch_xor<T, addressSpace, sycl::memory_order::acq_rel, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::seq_cst: return atomic_fetch_xor<T, addressSpace, sycl::memory_order::seq_cst, sycl::memory_scope::device>(addr, operand); default: assert(false && "Invalid memory_order for atomics. Valid memory_order for " "atomics are: sycl::memory_order::relaxed, " "sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!"); } } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, typename T1, typename T2> inline T1 atomic_fetch_xor(T1 *addr, T2 operand, sycl::memory_order memoryOrder) { atomic_fetch_xor<T1, addressSpace>(addr, operand, memoryOrder); } /// Atomically calculate the minimum of the value at addr and the value operand /// and assign the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> inline T atomic_fetch_min(T *addr, T operand) { auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_min(operand); } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device, typename T1, typename T2> inline T1 atomic_fetch_min(T1 *addr, T2 operand) { auto atm = sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_min(operand); } /// Atomically calculate the minimum of the value at addr and the value operand /// and assign the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space> inline T atomic_fetch_min(T *addr, T operand, sycl::memory_order memoryOrder) { switch (memoryOrder) { case sycl::memory_order::relaxed: return atomic_fetch_min<T, addressSpace, sycl::memory_order::relaxed, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::acq_rel: return atomic_fetch_min<T, addressSpace, sycl::memory_order::acq_rel, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::seq_cst: return atomic_fetch_min<T, addressSpace, sycl::memory_order::seq_cst, sycl::memory_scope::device>(addr, operand); default: assert(false && "Invalid memory_order for atomics. Valid memory_order for " "atomics are: sycl::memory_order::relaxed, " "sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!"); } } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, typename T1, typename T2> inline T1 atomic_fetch_min(T1 *addr, T2 operand, sycl::memory_order memoryOrder) { atomic_fetch_min<T1, addressSpace>(addr, operand, memoryOrder); } /// Atomically calculate the maximum of the value at addr and the value operand /// and assign the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> inline T atomic_fetch_max(T *addr, T operand) { auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_max(operand); } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device, typename T1, typename T2> inline T1 atomic_fetch_max(T1 *addr, T2 operand) { auto atm = sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_max(operand); } /// Atomically calculate the maximum of the value at addr and the value operand /// and assign the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space> inline T atomic_fetch_max(T *addr, T operand, sycl::memory_order memoryOrder) { switch (memoryOrder) { case sycl::memory_order::relaxed: return atomic_fetch_max<T, addressSpace, sycl::memory_order::relaxed, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::acq_rel: return atomic_fetch_max<T, addressSpace, sycl::memory_order::acq_rel, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::seq_cst: return atomic_fetch_max<T, addressSpace, sycl::memory_order::seq_cst, sycl::memory_scope::device>(addr, operand); default: assert(false && "Invalid memory_order for atomics. Valid memory_order for " "atomics are: sycl::memory_order::relaxed, " "sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!"); } } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, typename T1, typename T2> inline T1 atomic_fetch_max(T1 *addr, T2 operand, sycl::memory_order memoryOrder) { atomic_fetch_max<T1, addressSpace>(addr, operand, memoryOrder); } /// Atomically set \p operand to the value stored in \p addr, if old value stored in /// \p addr is equal to zero or greater than \p operand, else decrease the value stored /// in \p addr. /// \param [in, out] addr The pointer to the data. /// \param operand The threshold value. /// \param memoryOrder The memory ordering used. /// \returns The old value stored in \p addr. template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> inline unsigned int atomic_fetch_compare_dec(unsigned int *addr, unsigned int operand) { auto atm = sycl::atomic_ref<unsigned int, memoryOrder, memoryScope, addressSpace>(addr[0]); unsigned int old; while (true) { old = atm.load(); if (old == 0 || old > operand) { if (atm.compare_exchange_strong(old, operand)) break; } else if (atm.compare_exchange_strong(old, old - 1)) break; } return old; } /// Atomically increment the value stored in \p addr if old value stored in \p /// addr is less than \p operand, else set 0 to the value stored in \p addr. /// \param [in, out] addr The pointer to the data. /// \param operand The threshold value. /// \param memoryOrder The memory ordering used. /// \returns The old value stored in \p addr. template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> inline unsigned int atomic_fetch_compare_inc(unsigned int *addr, unsigned int operand) { auto atm = sycl::atomic_ref<unsigned int, memoryOrder, memoryScope, addressSpace>(addr[0]); unsigned int old; while (true) { old = atm.load(); if (old >= operand) { if (atm.compare_exchange_strong(old, 0)) break; } else if (atm.compare_exchange_strong(old, old + 1)) break; } return old; } /// Atomically increment the value stored in \p addr if old value stored in \p /// addr is less than \p operand, else set 0 to the value stored in \p addr. /// \param [in, out] addr The pointer to the data. /// \param operand The threshold value. /// \param memoryOrder The memory ordering used. /// \returns The old value stored in \p addr. template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space> inline unsigned int atomic_fetch_compare_inc(unsigned int *addr, unsigned int operand, sycl::memory_order memoryOrder) { switch (memoryOrder) { case sycl::memory_order::relaxed: return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::relaxed, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::acq_rel: return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::acq_rel, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::seq_cst: return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::seq_cst, sycl::memory_scope::device>(addr, operand); default: assert(false && "Invalid memory_order for atomics. Valid memory_order for " "atomics are: sycl::memory_order::relaxed, " "sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!"); } } /// Atomically exchange the value at the address addr with the value operand. /// \param [in, out] addr The pointer to the data. /// \param operand The value to be exchanged with the value pointed by \p addr. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> inline T atomic_exchange(T *addr, T operand) { auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.exchange(operand); } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device, typename T1, typename T2> inline T1 atomic_exchange(T1 *addr, T2 operand) { auto atm = sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.exchange(operand); } /// Atomically exchange the value at the address addr with the value operand. /// \param [in, out] addr The pointer to the data. /// \param operand The value to be exchanged with the value pointed by \p addr. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space> inline T atomic_exchange(T *addr, T operand, sycl::memory_order memoryOrder) { switch (memoryOrder) { case sycl::memory_order::relaxed: return atomic_exchange<T, addressSpace, sycl::memory_order::relaxed, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::acq_rel: return atomic_exchange<T, addressSpace, sycl::memory_order::acq_rel, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::seq_cst: return atomic_exchange<T, addressSpace, sycl::memory_order::seq_cst, sycl::memory_scope::device>(addr, operand); default: assert(false && "Invalid memory_order for atomics. Valid memory_order for " "atomics are: sycl::memory_order::relaxed, " "sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!"); } } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, typename T1, typename T2> inline T1 atomic_exchange(T1 *addr, T2 operand, sycl::memory_order memoryOrder) { atomic_exchange<T1, addressSpace>(addr, operand, memoryOrder); } /// Atomically compare the value at \p addr to the value expected and exchange /// with the value desired if the value at \p addr is equal to the value expected. /// Returns the value at the \p addr before the call. /// \param [in, out] addr Multi_ptr. /// \param expected The value to compare against the value at \p addr. /// \param desired The value to assign to \p addr if the value at \p addr is expected. /// \param success The memory ordering used when comparison succeeds. /// \param fail The memory ordering used when comparison fails. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> T atomic_compare_exchange_strong( sycl::multi_ptr<T, addressSpace> addr, T expected, T desired, sycl::memory_order success = sycl::memory_order::relaxed, sycl::memory_order fail = sycl::memory_order::relaxed) { auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(*addr); atm.compare_exchange_strong(expected, desired, success, fail); return expected; } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device, typename T1, typename T2, typename T3> T1 atomic_compare_exchange_strong( sycl::multi_ptr<T1, addressSpace> addr, T2 expected, T3 desired, sycl::memory_order success = sycl::memory_order::relaxed, sycl::memory_order fail = sycl::memory_order::relaxed) { auto atm = sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(*addr); T1 expected_value = expected; atm.compare_exchange_strong(expected_value, desired, success, fail); return expected_value; } /// Atomically compare the value at \p addr to the value expected and exchange /// with the value desired if the value at \p addr is equal to the value expected. /// Returns the value at the \p addr before the call. /// \param [in] addr The pointer to the data. /// \param expected The value to compare against the value at \p addr. /// \param desired The value to assign to \p addr if the value at \p addr is expected. /// \param success The memory ordering used when comparison succeeds. /// \param fail The memory ordering used when comparison fails. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> T atomic_compare_exchange_strong( T *addr, T expected, T desired, sycl::memory_order success = sycl::memory_order::relaxed, sycl::memory_order fail = sycl::memory_order::relaxed) { auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]); atm.compare_exchange_strong(expected, desired, success, fail); return expected; } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device, typename T1, typename T2, typename T3> T1 atomic_compare_exchange_strong( T1 *addr, T2 expected, T3 desired, sycl::memory_order success = sycl::memory_order::relaxed, sycl::memory_order fail = sycl::memory_order::relaxed) { T1 expected_value = expected; auto atm = sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]); atm.compare_exchange_strong(expected_value, desired, success, fail); return expected_value; } /// Atomic extension to implement standard APIs in std::atomic namespace detail{ template <typename T> struct IsValidAtomicType { static constexpr bool value = (std::is_same<T, int>::value || std::is_same<T, unsigned int>::value || std::is_same<T, long>::value || std::is_same<T, unsigned long>::value || std::is_same<T, long long>::value || std::is_same<T, unsigned long long>::value || std::is_same<T, float>::value || std::is_same<T, double>::value || std::is_pointer<T>::value); }; } // namespace detail template <typename T, sycl::memory_scope DefaultScope = sycl::memory_scope::system, sycl::memory_order DefaultOrder = sycl::memory_order::seq_cst, sycl::access::address_space Space = sycl::access::address_space::generic_space> class atomic{ static_assert( detail::IsValidAtomicType<T>::value, "Invalid atomic type. Valid types are int, unsigned int, long, " "unsigned long, long long, unsigned long long, float, double " "and pointer types"); T __d; public: /// default memory synchronization order static constexpr sycl::memory_order default_read_order = sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space>::default_read_order; static constexpr sycl::memory_order default_write_order = sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space>::default_write_order; static constexpr sycl::memory_scope default_scope = DefaultScope; static constexpr sycl::memory_order default_read_modify_write_order = DefaultOrder; /// Default constructor. constexpr atomic() noexcept = default; /// Constructor with initialize value. constexpr atomic(T d) noexcept : __d(d){}; /// atomically replaces the value of the referenced object with a non-atomic argument /// \param operand The value to replace the pointed value. /// \param memoryOrder The memory ordering used. /// \param memoryScope The memory scope used. void store(T operand, sycl::memory_order memoryOrder = default_write_order, sycl::memory_scope memoryScope = default_scope) noexcept { sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d); atm.store(operand, memoryOrder, memoryScope); } /// atomically obtains the value of the referenced object /// \param memoryOrder The memory ordering used. /// \param memoryScope The memory scope used. /// \returns The value of the referenced object T load(sycl::memory_order memoryOrder = default_read_order, sycl::memory_scope memoryScope = default_scope) const noexcept { sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm( const_cast<T &>(__d)); return atm.load(memoryOrder, memoryScope); } /// atomically replaces the value of the referenced object and obtains the value held previously /// \param operand The value to replace the pointed value. /// \param memoryOrder The memory ordering used. /// \param memoryScope The memory scope used. /// \returns The value of the referenced object before the call. T exchange(T operand, sycl::memory_order memoryOrder = default_read_modify_write_order, sycl::memory_scope memoryScope = default_scope) noexcept { sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d); return atm.exchange(operand, memoryOrder, memoryScope); } /// atomically compares the value of the referenced object with non-atomic argument /// and performs atomic exchange if equal or atomic load if not /// \param expected The value expected to be found in the object referenced by the atomic_ref object /// \param desired The value to store in the referenced object if it is as expected /// \param success The memory models for the read-modify-write /// \param failure The memory models for load operations /// \param memoryScope The memory scope used. /// \returns true if the referenced object was successfully changed, false otherwise. bool compare_exchange_weak( T &expected, T desired, sycl::memory_order success, sycl::memory_order failure, sycl::memory_scope memoryScope = default_scope) noexcept { sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d); return atm.compare_exchange_weak(expected, desired, success, failure, memoryScope); } /// \param expected The value expected to be found in the object referenced by the atomic_ref object /// \param desired The value to store in the referenced object if it is as expected /// \param memoryOrder The memory synchronization ordering for operations /// \param memoryScope The memory scope used. /// \returns true if the referenced object was successfully changed, false otherwise. bool compare_exchange_weak(T &expected, T desired, sycl::memory_order memoryOrder = default_read_modify_write_order, sycl::memory_scope memoryScope = default_scope) noexcept { sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d); return atm.compare_exchange_weak(expected, desired, memoryOrder, memoryScope); } /// atomically compares the value of the referenced object with non-atomic argument /// and performs atomic exchange if equal or atomic load if not /// \param expected The value expected to be found in the object referenced by the atomic_ref object /// \param desired The value to store in the referenced object if it is as expected /// \param success The memory models for the read-modify-write /// \param failure The memory models for load operations /// \param memoryScope The memory scope used. /// \returns true if the referenced object was successfully changed, false otherwise. bool compare_exchange_strong( T &expected, T desired, sycl::memory_order success, sycl::memory_order failure, sycl::memory_scope memoryScope = default_scope) noexcept { sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d); return atm.compare_exchange_strong(expected, desired, success, failure, memoryScope); } /// \param expected The value expected to be found in the object referenced by the atomic_ref object /// \param desired The value to store in the referenced object if it is as expected /// \param memoryOrder The memory synchronization ordering for operations /// \param memoryScope The memory scope used. /// \returns true if the referenced object was successfully changed, false otherwise. bool compare_exchange_strong(T &expected, T desired, sycl::memory_order memoryOrder = default_read_modify_write_order, sycl::memory_scope memoryScope = default_scope) noexcept { sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d); return atm.compare_exchange_strong(expected, desired, memoryOrder, memoryScope); } /// atomically adds the argument to the value stored in the atomic object and obtains the value held previously /// \param operand The other argument of arithmetic addition /// \param memoryOrder The memory ordering used. /// \param memoryScope The memory scope used. /// \returns The value of the referenced object before the call. T fetch_add(T operand, sycl::memory_order memoryOrder = default_read_modify_write_order, sycl::memory_scope memoryScope = default_scope) noexcept { sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d); return atm.fetch_add(operand, memoryOrder, memoryScope); } /// atomically subtracts the argument from the value stored in the atomic object and obtains the value held previously /// \param operand The other argument of arithmetic subtraction /// \param memoryOrder The memory ordering used. /// \param memoryScope The memory scope used. /// \returns The value of the referenced object before the call. T fetch_sub(T operand, sycl::memory_order memoryOrder = default_read_modify_write_order, sycl::memory_scope memoryScope = default_scope) noexcept { sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d); return atm.fetch_sub(operand, memoryOrder, memoryScope); } }; } // namespace dpct #endif // __DPCT_ATOMIC_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/rng_utils.hpp
//==---- rng_utils.hpp ----------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_RNG_UTILS_HPP__ #define __DPCT_RNG_UTILS_HPP__ #include <sycl/sycl.hpp> #include <oneapi/mkl.hpp> #ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this. #include <oneapi/mkl/rng/device.hpp> #endif #include "device.hpp" #include "lib_common_utils.hpp" namespace dpct { namespace rng { #ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this. namespace device { /// The random number generator on device. /// \tparam engine_t The device random number generator engine. It can only be /// oneapi::mkl::rng::device::mrg32k3a<1> or /// oneapi::mkl::rng::device::mrg32k3a<4> or /// oneapi::mkl::rng::device::philox4x32x10<1> or /// oneapi::mkl::rng::device::philox4x32x10<4>. template <typename engine_t> class rng_generator { static_assert( std::disjunction_v< std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<1>>, std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<4>>, std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<1>>, std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<4>>, std::is_same<engine_t, oneapi::mkl::rng::device::mcg59<1>>>, "engine_t can only be oneapi::mkl::rng::device::mrg32k3a<1> or " "oneapi::mkl::rng::device::mrg32k3a<4> or " "oneapi::mkl::rng::device::philox4x32x10<1> or " "oneapi::mkl::rng::device::philox4x32x10<4> or " "oneapi::mkl::rng::device::mcg59<1>."); static constexpr bool _is_engine_vec_size_one = std::disjunction_v< std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<1>>, std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<1>>, std::is_same<engine_t, oneapi::mkl::rng::device::mcg59<1>>>; static constexpr std::uint64_t default_seed = 0; oneapi::mkl::rng::device::bits<std::uint32_t> _distr_bits; oneapi::mkl::rng::device::uniform_bits<std::uint32_t> _distr_uniform_bits; oneapi::mkl::rng::device::gaussian<float> _distr_gaussian_float; oneapi::mkl::rng::device::gaussian<double> _distr_gaussian_double; oneapi::mkl::rng::device::lognormal<float> _distr_lognormal_float; oneapi::mkl::rng::device::lognormal<double> _distr_lognormal_double; oneapi::mkl::rng::device::poisson<std::uint32_t> _distr_poisson; oneapi::mkl::rng::device::uniform<float> _distr_uniform_float; oneapi::mkl::rng::device::uniform<double> _distr_uniform_double; engine_t _engine; public: /// Default constructor of rng_generator rng_generator() { _engine = engine_t(default_seed); } /// Constructor of rng_generator if engine type is not mcg59 /// \param [in] seed The seed to initialize the engine state. /// \param [in] num_to_skip Set the number of elements need to be skipped. /// The number is calculated as: num_to_skip[0] + num_to_skip[1] * 2^64 + /// num_to_skip[2] * 2^128 + ... + num_to_skip[n-1] * 2^(64*(n-1)) template <typename T = engine_t, typename std::enable_if<!std::is_same_v< T, oneapi::mkl::rng::device::mcg59<1>>>::type * = nullptr> rng_generator(std::uint64_t seed, std::initializer_list<std::uint64_t> num_to_skip) { _engine = engine_t(seed, num_to_skip); } /// Constructor of rng_generator if engine type is mcg59 /// \param [in] seed The seed to initialize the engine state. /// \param [in] num_to_skip Set the number of elements need to be skipped. template <typename T = engine_t, typename std::enable_if<std::is_same_v< T, oneapi::mkl::rng::device::mcg59<1>>>::type * = nullptr> rng_generator(std::uint64_t seed, std::uint64_t num_to_skip) { _engine = engine_t(seed, num_to_skip); } /// Generate random number(s) obeys distribution \tparam distr_t. /// \tparam T The distribution of the random number. It can only be /// oneapi::mkl::rng::device::bits<std::uint32_t>, /// oneapi::mkl::rng::device::uniform_bits<std::uint32_t>, /// oneapi::mkl::rng::device::gaussian<float>, /// oneapi::mkl::rng::device::gaussian<double>, /// oneapi::mkl::rng::device::lognormal<float>, /// oneapi::mkl::rng::device::lognormal<double>, /// oneapi::mkl::rng::device::poisson<std::uint32_t>, /// oneapi::mkl::rng::device::uniform<float> or /// oneapi::mkl::rng::device::uniform<double> /// \tparam vec_size The length of the return vector. It can only be 1, 2 /// or 4. /// \param distr_params The parameter(s) for lognormal or poisson /// distribution. /// \return The vector of the random number(s). template <typename distr_t, int vec_size, class... distr_params_t> auto generate(distr_params_t... distr_params) { static_assert(vec_size == 1 || vec_size == 2 || vec_size == 4, "vec_size is not supported."); static_assert( std::disjunction_v< std::is_same<distr_t, oneapi::mkl::rng::device::bits<std::uint32_t>>, std::is_same<distr_t, oneapi::mkl::rng::device::uniform_bits<std::uint32_t>>, std::is_same<distr_t, oneapi::mkl::rng::device::gaussian<float>>, std::is_same<distr_t, oneapi::mkl::rng::device::gaussian<double>>, std::is_same<distr_t, oneapi::mkl::rng::device::lognormal<float>>, std::is_same<distr_t, oneapi::mkl::rng::device::lognormal<double>>, std::is_same<distr_t, oneapi::mkl::rng::device::poisson<std::uint32_t>>, std::is_same<distr_t, oneapi::mkl::rng::device::uniform<float>>, std::is_same<distr_t, oneapi::mkl::rng::device::uniform<double>>>, "distribution is not supported."); if constexpr (std::is_same_v< distr_t, oneapi::mkl::rng::device::bits<std::uint32_t>>) { return generate_vec<vec_size>(_distr_bits); } if constexpr (std::is_same_v< distr_t, oneapi::mkl::rng::device::uniform_bits<std::uint32_t>>) { return generate_vec<vec_size>(_distr_uniform_bits); } if constexpr (std::is_same_v<distr_t, oneapi::mkl::rng::device::gaussian<float>>) { return generate_vec<vec_size>(_distr_gaussian_float); } if constexpr (std::is_same_v<distr_t, oneapi::mkl::rng::device::gaussian<double>>) { return generate_vec<vec_size>(_distr_gaussian_double); } if constexpr (std::is_same_v<distr_t, oneapi::mkl::rng::device::lognormal<float>>) { return generate_vec<vec_size>(_distr_lognormal_float, distr_params..., 0.0f, 1.0f); } if constexpr (std::is_same_v<distr_t, oneapi::mkl::rng::device::lognormal<double>>) { return generate_vec<vec_size>(_distr_lognormal_double, distr_params..., 0.0, 1.0); } if constexpr (std::is_same_v<distr_t, oneapi::mkl::rng::device::poisson< std::uint32_t>>) { return generate_vec<vec_size>(_distr_poisson, distr_params...); } if constexpr (std::is_same_v<distr_t, oneapi::mkl::rng::device::uniform<float>>) { return generate_vec<vec_size>(_distr_uniform_float); } if constexpr (std::is_same_v<distr_t, oneapi::mkl::rng::device::uniform<double>>) { return generate_vec<vec_size>(_distr_uniform_double); } } /// Get the random number generator engine. /// \return The reference of the internal random number generator engine. engine_t &get_engine() { return _engine; } private: template <int vec_size, typename distr_t, class... distr_params_t> auto generate_vec(distr_t &distr, distr_params_t... distr_params) { if constexpr (sizeof...(distr_params_t)) { typename distr_t::param_type pt(distr_params...); distr.param(pt); } if constexpr (vec_size == 4) { if constexpr (_is_engine_vec_size_one) { sycl::vec<typename distr_t::result_type, 4> res; res.x() = oneapi::mkl::rng::device::generate(distr, _engine); res.y() = oneapi::mkl::rng::device::generate(distr, _engine); res.z() = oneapi::mkl::rng::device::generate(distr, _engine); res.w() = oneapi::mkl::rng::device::generate(distr, _engine); return res; } else { return oneapi::mkl::rng::device::generate(distr, _engine); } } else if constexpr (vec_size == 1) { if constexpr (_is_engine_vec_size_one) { return oneapi::mkl::rng::device::generate(distr, _engine); } else { return oneapi::mkl::rng::device::generate_single(distr, _engine); } } else if constexpr (vec_size == 2) { if constexpr (_is_engine_vec_size_one) { sycl::vec<typename distr_t::result_type, 2> res; res.x() = oneapi::mkl::rng::device::generate(distr, _engine); res.y() = oneapi::mkl::rng::device::generate(distr, _engine); return res; } else { sycl::vec<typename distr_t::result_type, 2> res; res.x() = oneapi::mkl::rng::device::generate_single(distr, _engine); res.y() = oneapi::mkl::rng::device::generate_single(distr, _engine); return res; } } } }; } // namespace device #endif namespace host { namespace detail { class rng_generator_base { public: /// Set the seed of host rng_generator. /// \param seed The engine seed. virtual void set_seed(const std::uint64_t seed) = 0; /// Set the dimensions of host rng_generator. /// \param dimensions The engine dimensions. virtual void set_dimensions(const std::uint32_t dimensions) = 0; /// Set the queue of host rng_generator. /// \param queue The engine queue. virtual void set_queue(sycl::queue *queue) = 0; /// Generate unsigned int random number(s) with 'uniform_bits' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. virtual inline void generate_uniform_bits(unsigned int *output, std::int64_t n) = 0; /// Generate unsigned long long random number(s) with 'uniform_bits' /// distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. virtual inline void generate_uniform_bits(unsigned long long *output, std::int64_t n) = 0; /// Generate float random number(s) with 'lognormal' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. /// \param m Mean of associated normal distribution /// \param s Standard deviation of associated normal distribution. virtual inline void generate_lognormal(float *output, std::int64_t n, float m, float s) = 0; /// Generate double random number(s) with 'lognormal' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. /// \param m Mean of associated normal distribution /// \param s Standard deviation of associated normal distribution. virtual inline void generate_lognormal(double *output, std::int64_t n, double m, double s) = 0; /// Generate float random number(s) with 'gaussian' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. /// \param mean Mean of normal distribution /// \param stddev Standard deviation of normal distribution. virtual inline void generate_gaussian(float *output, std::int64_t n, float mean, float stddev) = 0; /// Generate double random number(s) with 'gaussian' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. /// \param mean Mean of normal distribution /// \param stddev Standard deviation of normal distribution. virtual inline void generate_gaussian(double *output, std::int64_t n, double mean, double stddev) = 0; /// Generate unsigned int random number(s) with 'poisson' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. /// \param lambda Lambda for the Poisson distribution. virtual inline void generate_poisson(unsigned int *output, std::int64_t n, double lambda) = 0; /// Generate float random number(s) with 'uniform' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. virtual inline void generate_uniform(float *output, std::int64_t n) = 0; /// Generate double random number(s) with 'uniform' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. virtual inline void generate_uniform(double *output, std::int64_t n) = 0; /// Skip ahead several random number(s). /// \param num_to_skip The number of random numbers to be skipped. virtual void skip_ahead(const std::uint64_t num_to_skip) = 0; protected: sycl::queue *_queue{&dpct::get_default_queue()}; std::uint64_t _seed{0}; std::uint32_t _dimensions{1}; }; /// The random number generator on host. template <typename engine_t = oneapi::mkl::rng::philox4x32x10> class rng_generator : public rng_generator_base { public: /// Constructor of rng_generator. rng_generator() : _engine(create_engine(_queue, _seed, _dimensions)) {} /// Set the seed of host rng_generator. /// \param seed The engine seed. void set_seed(const std::uint64_t seed) { if (seed == _seed) { return; } _seed = seed; _engine = create_engine(_queue, _seed, _dimensions); } /// Set the dimensions of host rng_generator. /// \param dimensions The engine dimensions. void set_dimensions(const std::uint32_t dimensions) { if (dimensions == _dimensions) { return; } _dimensions = dimensions; _engine = create_engine(_queue, _seed, _dimensions); } /// Set the queue of host rng_generator. /// \param queue The engine queue. void set_queue(sycl::queue *queue) { if (queue == _queue) { return; } _queue = queue; _engine = create_engine(_queue, _seed, _dimensions); } /// Generate unsigned int random number(s) with 'uniform_bits' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. inline void generate_uniform_bits(unsigned int *output, std::int64_t n) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) " "Interfaces Project does not support this API."); #else static_assert(sizeof(unsigned int) == sizeof(std::uint32_t)); generate<oneapi::mkl::rng::uniform_bits<std::uint32_t>>( (std::uint32_t *)output, n); #endif } /// Generate unsigned long long random number(s) with 'uniform_bits' /// distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. inline void generate_uniform_bits(unsigned long long *output, std::int64_t n) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) " "Interfaces Project does not support this API."); #else static_assert(sizeof(unsigned long long) == sizeof(std::uint64_t)); generate<oneapi::mkl::rng::uniform_bits<std::uint64_t>>( (std::uint64_t *)output, n); #endif } /// Generate float random number(s) with 'lognormal' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. /// \param m Mean of associated normal distribution /// \param s Standard deviation of associated normal distribution. inline void generate_lognormal(float *output, std::int64_t n, float m, float s) { generate<oneapi::mkl::rng::lognormal<float>>(output, n, m, s); } /// Generate double random number(s) with 'lognormal' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. /// \param m Mean of associated normal distribution /// \param s Standard deviation of associated normal distribution. inline void generate_lognormal(double *output, std::int64_t n, double m, double s) { generate<oneapi::mkl::rng::lognormal<double>>(output, n, m, s); } /// Generate float random number(s) with 'gaussian' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. /// \param mean Mean of normal distribution /// \param stddev Standard deviation of normal distribution. inline void generate_gaussian(float *output, std::int64_t n, float mean, float stddev) { generate<oneapi::mkl::rng::gaussian<float>>(output, n, mean, stddev); } /// Generate double random number(s) with 'gaussian' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. /// \param mean Mean of normal distribution /// \param stddev Standard deviation of normal distribution. inline void generate_gaussian(double *output, std::int64_t n, double mean, double stddev) { generate<oneapi::mkl::rng::gaussian<double>>(output, n, mean, stddev); } /// Generate unsigned int random number(s) with 'poisson' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. /// \param lambda Lambda for the Poisson distribution. inline void generate_poisson(unsigned int *output, std::int64_t n, double lambda) { generate<oneapi::mkl::rng::poisson<unsigned int>>(output, n, lambda); } /// Generate float random number(s) with 'uniform' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. inline void generate_uniform(float *output, std::int64_t n) { generate<oneapi::mkl::rng::uniform<float>>(output, n); } /// Generate double random number(s) with 'uniform' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. inline void generate_uniform(double *output, std::int64_t n) { generate<oneapi::mkl::rng::uniform<double>>(output, n); } /// Skip ahead several random number(s). /// \param num_to_skip The number of random numbers to be skipped. void skip_ahead(const std::uint64_t num_to_skip) { #ifndef __INTEL_MKL__ oneapi::mkl::rng::skip_ahead(_engine, num_to_skip); #else if constexpr (std::is_same_v<engine_t, oneapi::mkl::rng::mt2203>) throw std::runtime_error("no skip_ahead method of mt2203 engine."); else oneapi::mkl::rng::skip_ahead(_engine, num_to_skip); #endif } private: static inline engine_t create_engine(sycl::queue *queue, const std::uint64_t seed, const std::uint32_t dimensions) { #ifdef __INTEL_MKL__ return std::is_same_v<engine_t, oneapi::mkl::rng::sobol> ? engine_t(*queue, dimensions) : engine_t(*queue, seed); #else return engine_t(*queue, seed); #endif } template <typename distr_t, typename buffer_t, class... distr_params_t> void generate(buffer_t *output, const std::int64_t n, const distr_params_t... distr_params) { auto output_buf = dpct::detail::get_memory(output); oneapi::mkl::rng::generate(distr_t(distr_params...), _engine, n, output_buf); } engine_t _engine{}; }; } // namespace detail } // namespace host enum class random_engine_type { philox4x32x10, mrg32k3a, mt2203, mt19937, sobol, mcg59 }; typedef std::shared_ptr<rng::host::detail::rng_generator_base> host_rng_ptr; /// Create a host random number generator. /// \param type The random engine type. /// \return The pointer of random number generator. inline host_rng_ptr create_host_rng(const random_engine_type type) { switch (type) { case random_engine_type::philox4x32x10: return std::make_shared< rng::host::detail::rng_generator<oneapi::mkl::rng::philox4x32x10>>(); case random_engine_type::mrg32k3a: return std::make_shared< rng::host::detail::rng_generator<oneapi::mkl::rng::mrg32k3a>>(); #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) " "Interfaces Project does not support this API."); #else case random_engine_type::mt2203: return std::make_shared< rng::host::detail::rng_generator<oneapi::mkl::rng::mt2203>>(); case random_engine_type::mt19937: return std::make_shared< rng::host::detail::rng_generator<oneapi::mkl::rng::mt19937>>(); case random_engine_type::sobol: return std::make_shared< rng::host::detail::rng_generator<oneapi::mkl::rng::sobol>>(); case random_engine_type::mcg59: return std::make_shared< rng::host::detail::rng_generator<oneapi::mkl::rng::mcg59>>(); #endif } } } // namespace rng } // namespace dpct #endif // __DPCT_RNG_UTILS_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/numeric.h
//==---- numeric.h --------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_NUMERIC_H__ #define __DPCT_NUMERIC_H__ namespace dpct { template <typename Policy, typename InputIt1, typename InputIt2, typename T> T inner_product(Policy &&policy, InputIt1 first1, InputIt1 last1, InputIt2 first2, T init) { return std::transform_reduce(std::forward<Policy>(policy), first1, last1, first2, init); } template <typename Policy, typename InputIt1, typename InputIt2, typename T, typename BinaryOperation1, typename BinaryOperation2> T inner_product(Policy &&policy, InputIt1 first1, InputIt1 last1, InputIt2 first2, T init, BinaryOperation1 op1, BinaryOperation2 op2) { return std::transform_reduce(std::forward<Policy>(policy), first1, last1, first2, init, op1, op2); } } // end namespace dpct #endif
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/iterators.h
//==---- iterators.h ------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_ITERATORS_H__ #define __DPCT_ITERATORS_H__ #include <oneapi/dpl/iterator> #include "functional.h" namespace dpct { namespace internal { // Wrapper class returned from a dereferenced transform_iterator which was // created using // make_transform_output_iterator(). Used to apply the supplied transform // function when writing into an object of this class. // // Example: // int a[] = {0, 1, 2, 3, 4}; // int* p = a; // auto f = [](auto v) {return v*v;}; // auto tr_out = dpct::make_transform_output_iterator(p+1, f); // auto wrap = *tr_out; // wrap is a transform_output_ref_wrapper // std::cout<<*(p+1)<<std::endl; // '1' // wrap = 2; // apply function, store 2*2=4 // std::cout<<*(p+1)<<std::endl; // '4' template <typename T, typename _UnaryFunc> class transform_output_ref_wrapper { private: T __my_reference_; _UnaryFunc __my_unary_func_; public: template <typename U> transform_output_ref_wrapper(U &&__reference, _UnaryFunc __unary_func) : __my_reference_(std::forward<U>(__reference)), __my_unary_func_(__unary_func) {} // When writing to an object of this type, apply the supplied unary function, // then write to the wrapped reference template <typename UnaryInputType> transform_output_ref_wrapper &operator=(const UnaryInputType &e) { __my_reference_ = __my_unary_func_(e); return *this; } }; // Unary functor to create a transform_output_reference_wrapper when a // transform_iterator is dereferenced, so that a // the supplied unary function may be applied on write, resulting in a // transform_output_iterator template <typename _UnaryFunc> struct _Unary_Out { _Unary_Out(_UnaryFunc __f_) : __f(__f_) {} _UnaryFunc __f; template <typename T> auto operator()(T &&val) const { return transform_output_ref_wrapper<T, _UnaryFunc>(std::forward<T>(val), __f); } }; } // end namespace internal using std::advance; using std::distance; template <typename T> oneapi::dpl::counting_iterator<T> make_counting_iterator(const T &input) { return oneapi::dpl::counting_iterator<T>(input); } template <typename _Tp> class constant_iterator { public: typedef std::false_type is_hetero; typedef std::true_type is_passed_directly; typedef std::ptrdiff_t difference_type; typedef _Tp value_type; typedef _Tp *pointer; // There is no storage behind the iterator, so we return a value instead of // reference. typedef const _Tp reference; typedef const _Tp const_reference; typedef std::random_access_iterator_tag iterator_category; explicit constant_iterator(_Tp __init) : __my_value_(__init), __my_counter_(0) {} private: // used to construct iterator instances with different counter values required // by arithmetic operators constant_iterator(const _Tp &__value, const difference_type &__offset) : __my_value_(__value), __my_counter_(__offset) {} public: // non-const variants of access operators are not provided so unintended // writes are caught at compile time. const_reference operator*() const { return __my_value_; } const_reference operator[](difference_type) const { return __my_value_; } difference_type operator-(const constant_iterator &__it) const { return __my_counter_ - __it.__my_counter_; } constant_iterator &operator+=(difference_type __forward) { __my_counter_ += __forward; return *this; } constant_iterator &operator-=(difference_type __backward) { return *this += -__backward; } constant_iterator &operator++() { return *this += 1; } constant_iterator &operator--() { return *this -= 1; } constant_iterator operator++(int) { constant_iterator __it(*this); ++(*this); return __it; } constant_iterator operator--(int) { constant_iterator __it(*this); --(*this); return __it; } constant_iterator operator-(difference_type __backward) const { return constant_iterator(__my_value_, __my_counter_ - __backward); } constant_iterator operator+(difference_type __forward) const { return constant_iterator(__my_value_, __my_counter_ + __forward); } friend constant_iterator operator+(difference_type __forward, const constant_iterator __it) { return __it + __forward; } bool operator==(const constant_iterator &__it) const { return __my_value_ == __it.__my_value_ && this->__my_counter_ == __it.__my_counter_; } bool operator!=(const constant_iterator &__it) const { return !(*this == __it); } bool operator<(const constant_iterator &__it) const { return *this - __it < 0; } bool operator>(const constant_iterator &__it) const { return __it < *this; } bool operator<=(const constant_iterator &__it) const { return !(*this > __it); } bool operator>=(const constant_iterator &__it) const { return !(*this < __it); } private: _Tp __my_value_; uint64_t __my_counter_; }; template <typename _Tp> constant_iterator<_Tp> make_constant_iterator(_Tp __value) { return constant_iterator<_Tp>(__value); } // key_value_pair class to represent a key and value, specifically a // dereferenced arg_index_input_iterator template <typename _KeyTp, typename _ValueTp> class key_value_pair { public: key_value_pair() = default; key_value_pair(const _KeyTp &_key, const _ValueTp &_value) : key(_key), value(_value) {} bool operator==(const key_value_pair<_KeyTp, _ValueTp> &_kvp) const { return (key == _kvp.key) && (value == _kvp.value); } bool operator!=(const key_value_pair<_KeyTp, _ValueTp> &_kvp) const { return (key != _kvp.key) || (value != _kvp.value); } _KeyTp key; _ValueTp value; }; namespace detail { template <typename KeyTp, typename _ValueTp> struct make_key_value_pair { template <typename ValRefTp> key_value_pair<KeyTp, _ValueTp> operator()(const oneapi::dpl::__internal::tuple<KeyTp, ValRefTp> &tup) const { return ::dpct::key_value_pair<KeyTp, _ValueTp>(::std::get<0>(tup), ::std::get<1>(tup)); } }; template <class T> struct __zip_iterator_impl; template <class... Ts> struct __zip_iterator_impl<std::tuple<Ts...>> { using type = oneapi::dpl::zip_iterator<Ts...>; }; } // end namespace detail // dpct::zip_iterator can only accept std::tuple type as template argument for // compatibility purpose. Please use oneapi::dpl::zip_iterator if you want to // pass iterator's types directly. template <typename... Ts> using zip_iterator = typename detail::__zip_iterator_impl<Ts...>::type; // arg_index_input_iterator is an iterator over a input iterator, with a index. // When dereferenced, it returns a key_value_pair, which can be interrogated for // the index key or the value from the input iterator template <typename InputIteratorT, typename OffsetT = ptrdiff_t, typename OutputValueT = typename ::std::iterator_traits<InputIteratorT>::value_type> class arg_index_input_iterator : public oneapi::dpl::transform_iterator< oneapi::dpl::zip_iterator<oneapi::dpl::counting_iterator<OffsetT>, InputIteratorT>, detail::make_key_value_pair<OffsetT, OutputValueT>> { using arg_index_input_iterator_wrap = oneapi::dpl::transform_iterator< oneapi::dpl::zip_iterator<oneapi::dpl::counting_iterator<OffsetT>, InputIteratorT>, detail::make_key_value_pair<OffsetT, OutputValueT>>; public: typedef OffsetT difference_type; // signal to __get_sycl_range that this iterator is as a direct pass iterator using is_zip = ::std::true_type; arg_index_input_iterator(const arg_index_input_iterator_wrap &__arg_wrap) : arg_index_input_iterator_wrap(__arg_wrap) {} arg_index_input_iterator(InputIteratorT __iter) : arg_index_input_iterator_wrap( oneapi::dpl::make_zip_iterator( oneapi::dpl::counting_iterator(OffsetT(0)), __iter), detail::make_key_value_pair<OffsetT, OutputValueT>()) {} arg_index_input_iterator &operator=(const arg_index_input_iterator &__input) { arg_index_input_iterator_wrap::operator=(__input); return *this; } arg_index_input_iterator &operator++() { arg_index_input_iterator_wrap::operator++(); return *this; } arg_index_input_iterator &operator--() { arg_index_input_iterator_wrap::operator--(); return *this; } arg_index_input_iterator operator++(int) { arg_index_input_iterator __it(*this); ++(*this); return __it; } arg_index_input_iterator operator--(int) { arg_index_input_iterator __it(*this); --(*this); return __it; } arg_index_input_iterator operator+(difference_type __forward) const { return arg_index_input_iterator( arg_index_input_iterator_wrap::operator+(__forward)); } arg_index_input_iterator operator-(difference_type __backward) const { return arg_index_input_iterator( arg_index_input_iterator_wrap::operator-(__backward)); } arg_index_input_iterator &operator+=(difference_type __forward) { arg_index_input_iterator_wrap::operator+=(__forward); return *this; } arg_index_input_iterator &operator-=(difference_type __backward) { arg_index_input_iterator_wrap::operator-=(__backward); return *this; } friend arg_index_input_iterator operator+(difference_type __forward, const arg_index_input_iterator &__it) { return __it + __forward; } difference_type operator-(const arg_index_input_iterator &__it) const { return arg_index_input_iterator_wrap::operator-(__it); } bool operator==(const arg_index_input_iterator &__it) const { return arg_index_input_iterator_wrap::operator==(__it); } bool operator!=(const arg_index_input_iterator &__it) const { return !(*this == __it); } bool operator<(const arg_index_input_iterator &__it) const { return *this - __it < 0; } bool operator>(const arg_index_input_iterator &__it) const { return __it < *this; } bool operator<=(const arg_index_input_iterator &__it) const { return !(*this > __it); } bool operator>=(const arg_index_input_iterator &__it) const { return !(*this < __it); } // returns an arg_index_input_iterator with the same iter position, but a // count reset to 0 arg_index_input_iterator create_normalized() { return arg_index_input_iterator( ::std::get<1>(arg_index_input_iterator_wrap::base().base())); } }; template <typename IterT> struct io_iterator_pair { inline io_iterator_pair() : selector(false) {} inline io_iterator_pair(const IterT &first, const IterT &second) : selector(false) { iter[0] = first; iter[1] = second; } inline IterT first() const { return selector ? iter[1] : iter[0]; } inline IterT second() const { return selector ? iter[0] : iter[1]; } inline void swap() { selector = !selector; } bool selector; IterT iter[2]; }; template <typename _Iter, typename _UnaryFunc> auto make_transform_output_iterator(_Iter __it, _UnaryFunc __unary_func) { return oneapi::dpl::transform_iterator( __it, internal::_Unary_Out<_UnaryFunc>(__unary_func)); } } // end namespace dpct #endif
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/algorithm.h
//==---- algorithm.h ------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_ALGORITHM_H__ #define __DPCT_ALGORITHM_H__ #include <oneapi/dpl/execution> #include <oneapi/dpl/algorithm> #include <oneapi/dpl/numeric> #include "functional.h" #include "iterators.h" #include "vector.h" namespace dpct { template <typename Policy, typename Iter1, typename Iter2, typename Pred, typename T> void replace_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p, const T &new_value) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); std::transform(std::forward<Policy>(policy), first, last, mask, first, internal::replace_if_fun<typename std::iterator_traits<Iter1>::value_type, Pred>(p, new_value)); } template <typename Policy, typename Iter1, typename Iter2, typename Iter3, typename Pred, typename T> Iter3 replace_copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Iter3 result, Pred p, const T &new_value) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); return std::transform(std::forward<Policy>(policy), first, last, mask, result, internal::replace_if_fun<typename std::iterator_traits<Iter3>::value_type, Pred>(p, new_value)); } template <typename Policy, typename Iter1, typename Iter2, typename Pred> internal::enable_if_hetero_execution_policy<Policy, Iter1> remove_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); using oneapi::dpl::make_zip_iterator; using policy_type = typename std::decay<Policy>::type; using internal::__buffer; using ValueType = typename std::iterator_traits<Iter1>::value_type; __buffer<ValueType> _tmp(std::distance(first, last)); auto end = std::copy_if( std::forward<Policy>(policy), make_zip_iterator(first, mask), make_zip_iterator(last, mask + std::distance(first, last)), make_zip_iterator(_tmp.get(), oneapi::dpl::discard_iterator()), internal::negate_predicate_key_fun<Pred>(p)); return std::copy(std::forward<Policy>(policy), _tmp.get(), std::get<0>(end.base()), first); } template <typename Policy, typename Iter1, typename Iter2, typename Pred> typename std::enable_if<!internal::is_hetero_execution_policy< typename std::decay<Policy>::type>::value, Iter1>::type remove_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); using oneapi::dpl::make_zip_iterator; using policy_type = typename std::decay<Policy>::type; using ValueType = typename std::iterator_traits<Iter1>::value_type; std::vector<ValueType> _tmp(std::distance(first, last)); auto end = std::copy_if( policy, make_zip_iterator(first, mask), make_zip_iterator(last, mask + std::distance(first, last)), make_zip_iterator(_tmp.begin(), oneapi::dpl::discard_iterator()), internal::negate_predicate_key_fun<Pred>(p)); return std::copy(policy, _tmp.begin(), std::get<0>(end.base()), first); } template <typename Policy, typename Iter1, typename Iter2, typename Iter3, typename Pred> Iter3 remove_copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Iter3 result, Pred p) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); using oneapi::dpl::make_zip_iterator; auto ret_val = std::remove_copy_if( std::forward<Policy>(policy), make_zip_iterator(first, mask), make_zip_iterator(last, mask + std::distance(first, last)), make_zip_iterator(result, oneapi::dpl::discard_iterator()), internal::predicate_key_fun<Pred>(p)); return std::get<0>(ret_val.base()); } template <class Policy, class Iter1, class Iter2, class BinaryPred> std::pair<Iter1, Iter2> unique(Policy &&policy, Iter1 keys_first, Iter1 keys_last, Iter2 values_first, BinaryPred binary_pred) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::unique( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first, values_first), oneapi::dpl::make_zip_iterator( keys_last, values_first + std::distance(keys_first, keys_last)), internal::compare_key_fun<BinaryPred>(binary_pred)); auto n1 = std::distance( oneapi::dpl::make_zip_iterator(keys_first, values_first), ret_val); return std::make_pair(keys_first + n1, values_first + n1); } template <class Policy, class Iter1, class Iter2> std::pair<Iter1, Iter2> unique(Policy &&policy, Iter1 keys_first, Iter1 keys_last, Iter2 values_first) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); using T = typename std::iterator_traits<Iter1>::value_type; return unique(std::forward<Policy>(policy), keys_first, keys_last, values_first, std::equal_to<T>()); } template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4, class BinaryPred> std::pair<Iter3, Iter4> unique_copy(Policy &&policy, Iter1 keys_first, Iter1 keys_last, Iter2 values_first, Iter3 keys_result, Iter4 values_result, BinaryPred binary_pred) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::unique_copy( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first, values_first), oneapi::dpl::make_zip_iterator( keys_last, values_first + std::distance(keys_first, keys_last)), oneapi::dpl::make_zip_iterator(keys_result, values_result), internal::unique_fun<BinaryPred>(binary_pred)); auto n1 = std::distance( oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val); return std::make_pair(keys_result + n1, values_result + n1); } template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4> std::pair<Iter3, Iter4> unique_copy(Policy &&policy, Iter1 keys_first, Iter1 keys_last, Iter2 values_first, Iter3 keys_result, Iter4 values_result) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); using T = typename std::iterator_traits<Iter1>::value_type; auto comp = std::equal_to<T>(); return unique_copy(std::forward<Policy>(policy), keys_first, keys_last, values_first, keys_result, values_result, comp); } template <typename Policy, typename Iter, typename Pred> Iter partition_point(Policy &&policy, Iter first, Iter last, Pred p) { static_assert( std::is_same<typename std::iterator_traits<Iter>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); if (std::is_partitioned(std::forward<Policy>(policy), first, last, p)) return std::find_if_not(std::forward<Policy>(policy), first, last, p); else return first; } template <typename Policy, typename Iter1, typename Iter2, typename Iter3, typename Pred> Iter3 copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Iter3 result, Pred pred) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::copy_if( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, mask), oneapi::dpl::make_zip_iterator(last, mask + std::distance(first, last)), oneapi::dpl::make_zip_iterator(result, oneapi::dpl::discard_iterator()), internal::predicate_key_fun<Pred>(pred)); return std::get<0>(ret_val.base()); } template <class Policy, class Iter1, class Iter2, class UnaryOperation, class Pred> Iter2 transform_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 result, UnaryOperation unary_op, Pred pred) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); using T = typename std::iterator_traits<Iter1>::value_type; const auto n = std::distance(first, last); std::for_each( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, result), oneapi::dpl::make_zip_iterator(first, result) + n, internal::transform_if_fun<T, Pred, UnaryOperation>(pred, unary_op)); return result + n; } template <class Policy, class Iter1, class Iter2, class Iter3, class UnaryOperation, class Pred> Iter3 transform_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Iter3 result, UnaryOperation unary_op, Pred pred) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); using T = typename std::iterator_traits<Iter1>::value_type; using Ref1 = typename std::iterator_traits<Iter1>::reference; using Ref2 = typename std::iterator_traits<Iter2>::reference; const auto n = std::distance(first, last); std::for_each( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, mask, result), oneapi::dpl::make_zip_iterator(first, mask, result) + n, internal::transform_if_unary_zip_mask_fun<T, Pred, UnaryOperation>( pred, unary_op)); return result + n; } template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4, class BinaryOperation, class Pred> Iter4 transform_if(Policy &&policy, Iter1 first1, Iter1 last1, Iter2 first2, Iter3 mask, Iter4 result, BinaryOperation binary_op, Pred pred) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); const auto n = std::distance(first1, last1); using ZipIterator = typename oneapi::dpl::zip_iterator<Iter1, Iter2, Iter3, Iter4>; using T = typename std::iterator_traits<ZipIterator>::value_type; std::for_each( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first1, first2, mask, result), oneapi::dpl::make_zip_iterator(last1, first2 + n, mask + n, result + n), internal::transform_if_zip_mask_fun<T, Pred, BinaryOperation>(pred, binary_op)); return result + n; } template <typename Policy, typename InputIter1, typename InputIter2, typename OutputIter> void scatter(Policy &&policy, InputIter1 first, InputIter1 last, InputIter2 map, OutputIter result) { static_assert( std::is_same<typename std::iterator_traits<InputIter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same< typename std::iterator_traits<InputIter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same< typename std::iterator_traits<OutputIter>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); oneapi::dpl::copy(policy, first, last, oneapi::dpl::make_permutation_iterator(result, map)); } template <typename Policy, typename InputIter1, typename InputIter2, typename OutputIter> OutputIter gather(Policy &&policy, InputIter1 map_first, InputIter1 map_last, InputIter2 input_first, OutputIter result) { static_assert( std::is_same<typename std::iterator_traits<InputIter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same< typename std::iterator_traits<InputIter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same< typename std::iterator_traits<OutputIter>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto perm_begin = oneapi::dpl::make_permutation_iterator(input_first, map_first); const int n = ::std::distance(map_first, map_last); return oneapi::dpl::copy(policy, perm_begin, perm_begin + n, result); } template <typename Policy, typename InputIter1, typename InputIter2, typename InputIter3, typename OutputIter, typename Predicate> void scatter_if(Policy &&policy, InputIter1 first, InputIter1 last, InputIter2 map, InputIter3 mask, OutputIter result, Predicate pred) { static_assert( std::is_same<typename std::iterator_traits<InputIter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same< typename std::iterator_traits<InputIter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same< typename std::iterator_traits<InputIter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same< typename std::iterator_traits<OutputIter>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); transform_if(policy, first, last, mask, oneapi::dpl::make_permutation_iterator(result, map), [=](auto &&v) { return v; }, [=](auto &&m) { return pred(m); }); } template <typename Policy, typename InputIter1, typename InputIter2, typename InputIter3, typename OutputIter, typename Predicate> OutputIter gather_if(Policy &&policy, InputIter1 map_first, InputIter1 map_last, InputIter2 mask, InputIter3 input_first, OutputIter result, Predicate pred) { static_assert( std::is_same<typename std::iterator_traits<InputIter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same< typename std::iterator_traits<InputIter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same< typename std::iterator_traits<InputIter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same< typename std::iterator_traits<OutputIter>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto perm_begin = oneapi::dpl::make_permutation_iterator(input_first, map_first); const int n = std::distance(map_first, map_last); return transform_if(policy, perm_begin, perm_begin + n, mask, result, [=](auto &&v) { return v; }, [=](auto &&m) { return pred(m); }); } template <typename Policy, typename Iter1, typename Iter2, typename Iter3, typename Iter4, typename Iter5, typename Iter6> std::pair<Iter5, Iter6> merge(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2, Iter5 keys_result, Iter6 values_result) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter5>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter6>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto n1 = std::distance(keys_first1, keys_last1); auto n2 = std::distance(keys_first2, keys_last2); std::merge(std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first1, values_first1), oneapi::dpl::make_zip_iterator(keys_last1, values_first1 + n1), oneapi::dpl::make_zip_iterator(keys_first2, values_first2), oneapi::dpl::make_zip_iterator(keys_last2, values_first2 + n2), oneapi::dpl::make_zip_iterator(keys_result, values_result), internal::compare_key_fun<>()); return std::make_pair(keys_result + n1 + n2, values_result + n1 + n2); } template <typename Policy, typename Iter1, typename Iter2, typename Iter3, typename Iter4, typename Iter5, typename Iter6, typename Comp> std::pair<Iter5, Iter6> merge(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2, Iter5 keys_result, Iter6 values_result, Comp comp) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter5>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter6>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto n1 = std::distance(keys_first1, keys_last1); auto n2 = std::distance(keys_first2, keys_last2); std::merge(std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first1, values_first1), oneapi::dpl::make_zip_iterator(keys_last1, values_first1 + n1), oneapi::dpl::make_zip_iterator(keys_first2, values_first2), oneapi::dpl::make_zip_iterator(keys_last2, values_first2 + n2), oneapi::dpl::make_zip_iterator(keys_result, values_result), internal::compare_key_fun<Comp>(comp)); return std::make_pair(keys_result + n1 + n2, values_result + n1 + n2); } template <class Policy, class Iter, class T> void iota(Policy &&policy, Iter first, Iter last, T init, T step) { static_assert( std::is_same<typename std::iterator_traits<Iter>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); using DiffSize = typename std::iterator_traits<Iter>::difference_type; std::transform( std::forward<Policy>(policy), oneapi::dpl::counting_iterator<DiffSize>(0), oneapi::dpl::counting_iterator<DiffSize>(std::distance(first, last)), first, internal::sequence_fun<T>(init, step)); } template <class Policy, class Iter, class T> void iota(Policy &&policy, Iter first, Iter last, T init) { static_assert( std::is_same<typename std::iterator_traits<Iter>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); iota(std::forward<Policy>(policy), first, last, init, T(1)); } template <class Policy, class Iter> void iota(Policy &&policy, Iter first, Iter last) { static_assert( std::is_same<typename std::iterator_traits<Iter>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); using DiffSize = typename std::iterator_traits<Iter>::difference_type; iota(std::forward<Policy>(policy), first, last, DiffSize(0), DiffSize(1)); } template <class Policy, class Iter1, class Iter2, class Comp> void sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last, Iter2 values_first, Comp comp) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto first = oneapi::dpl::make_zip_iterator(keys_first, values_first); auto last = first + std::distance(keys_first, keys_last); std::sort(std::forward<Policy>(policy), first, last, internal::compare_key_fun<Comp>(comp)); } template <class Policy, class Iter1, class Iter2> void sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last, Iter2 values_first) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); sort(std::forward<Policy>(policy), keys_first, keys_last, values_first, internal::__less()); } template <class Policy, class Iter1, class Iter2, class Comp> void stable_sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last, Iter2 values_first, Comp comp) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); std::stable_sort( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first, values_first), oneapi::dpl::make_zip_iterator( keys_last, values_first + std::distance(keys_first, keys_last)), internal::compare_key_fun<Comp>(comp)); } template <class Policy, class Iter1, class Iter2> void stable_sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last, Iter2 values_first) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); stable_sort(std::forward<Policy>(policy), keys_first, keys_last, values_first, internal::__less()); } template <class Policy, class Iter, class Operator> void for_each_index(Policy &&policy, Iter first, Iter last, Operator unary_op) { static_assert( std::is_same<typename std::iterator_traits<Iter>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); using DiffSize = typename std::iterator_traits<Iter>::difference_type; std::transform( std::forward<Policy>(policy), oneapi::dpl::counting_iterator<DiffSize>(0), oneapi::dpl::counting_iterator<DiffSize>(std::distance(first, last)), first, unary_op); } template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4, class Iter5> std::pair<Iter4, Iter5> set_intersection(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1, Iter4 keys_result, Iter5 values_result) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter5>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::set_intersection( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first1, values_first1), oneapi::dpl::make_zip_iterator( keys_last1, values_first1 + std::distance(keys_first1, keys_last1)), oneapi::dpl::make_zip_iterator(keys_first2, oneapi::dpl::discard_iterator()), oneapi::dpl::make_zip_iterator(keys_last2, oneapi::dpl::discard_iterator()), oneapi::dpl::make_zip_iterator(keys_result, values_result), internal::compare_key_fun<>()); auto n1 = std::distance( oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val); return std::make_pair(keys_result + n1, values_result + n1); } template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4, class Iter5, class Comp> std::pair<Iter4, Iter5> set_intersection(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1, Iter4 keys_result, Iter5 values_result, Comp comp) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter5>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::set_intersection( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first1, values_first1), oneapi::dpl::make_zip_iterator( keys_last1, values_first1 + std::distance(keys_first1, keys_last1)), oneapi::dpl::make_zip_iterator(keys_first2, oneapi::dpl::discard_iterator()), oneapi::dpl::make_zip_iterator(keys_last2, oneapi::dpl::discard_iterator()), oneapi::dpl::make_zip_iterator(keys_result, values_result), internal::compare_key_fun<Comp>(comp)); auto n1 = std::distance( oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val); return std::make_pair(keys_result + n1, values_result + n1); } template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4, class Iter5, class Iter6> std::pair<Iter5, Iter6> set_symmetric_difference(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2, Iter5 keys_result, Iter6 values_result) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter5>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter6>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::set_symmetric_difference( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first1, values_first1), oneapi::dpl::make_zip_iterator( keys_last1, values_first1 + std::distance(keys_first1, keys_last1)), oneapi::dpl::make_zip_iterator(keys_first2, values_first2), oneapi::dpl::make_zip_iterator( keys_last2, values_first2 + std::distance(keys_first2, keys_last2)), oneapi::dpl::make_zip_iterator(keys_result, values_result), internal::compare_key_fun<>()); auto n1 = std::distance( oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val); return std::make_pair(keys_result + n1, values_result + n1); } template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4, class Iter5, class Iter6, class Comp> std::pair<Iter5, Iter6> set_symmetric_difference(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2, Iter5 keys_result, Iter6 values_result, Comp comp) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter5>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter6>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::set_symmetric_difference( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first1, values_first1), oneapi::dpl::make_zip_iterator( keys_last1, values_first1 + std::distance(keys_first1, keys_last1)), oneapi::dpl::make_zip_iterator(keys_first2, values_first2), oneapi::dpl::make_zip_iterator( keys_last2, values_first2 + std::distance(keys_first2, keys_last2)), oneapi::dpl::make_zip_iterator(keys_result, values_result), internal::compare_key_fun<Comp>(comp)); auto n1 = std::distance( oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val); return std::make_pair(keys_result + n1, values_result + n1); } template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4, class Iter5, class Iter6> std::pair<Iter5, Iter6> set_difference(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2, Iter5 keys_result, Iter6 values_result) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter5>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter6>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::set_difference( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first1, values_first1), oneapi::dpl::make_zip_iterator( keys_last1, values_first1 + std::distance(keys_first1, keys_last1)), oneapi::dpl::make_zip_iterator(keys_first2, values_first2), oneapi::dpl::make_zip_iterator( keys_last2, values_first2 + std::distance(keys_first2, keys_last2)), oneapi::dpl::make_zip_iterator(keys_result, values_result), internal::compare_key_fun<>()); auto n1 = std::distance( oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val); return std::make_pair(keys_result + n1, values_result + n1); } template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4, class Iter5, class Iter6, class Comp> std::pair<Iter5, Iter6> set_difference(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2, Iter5 keys_result, Iter6 values_result, Comp comp) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter5>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter6>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::set_difference( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first1, values_first1), oneapi::dpl::make_zip_iterator( keys_last1, values_first1 + std::distance(keys_first1, keys_last1)), oneapi::dpl::make_zip_iterator(keys_first2, values_first2), oneapi::dpl::make_zip_iterator( keys_last2, values_first2 + std::distance(keys_first2, keys_last2)), oneapi::dpl::make_zip_iterator(keys_result, values_result), internal::compare_key_fun<Comp>(comp)); auto n1 = std::distance( oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val); return std::make_pair(keys_result + n1, values_result + n1); } template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4, class Iter5, class Iter6> internal::enable_if_execution_policy<Policy, std::pair<Iter5, Iter6>> set_union(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2, Iter5 keys_result, Iter6 values_result) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter5>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter6>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::set_union( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first1, values_first1), oneapi::dpl::make_zip_iterator( keys_last1, values_first1 + std::distance(keys_first1, keys_last1)), oneapi::dpl::make_zip_iterator(keys_first2, values_first2), oneapi::dpl::make_zip_iterator( keys_last2, values_first2 + std::distance(keys_first2, keys_last2)), oneapi::dpl::make_zip_iterator(keys_result, values_result), internal::compare_key_fun<>()); auto n1 = std::distance( oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val); return std::make_pair(keys_result + n1, values_result + n1); } template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4, class Iter5, class Iter6, class Comp> internal::enable_if_execution_policy<Policy, std::pair<Iter5, Iter6>> set_union(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2, Iter5 keys_result, Iter6 values_result, Comp comp) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter5>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter6>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::set_union( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first1, values_first1), oneapi::dpl::make_zip_iterator( keys_last1, values_first1 + std::distance(keys_first1, keys_last1)), oneapi::dpl::make_zip_iterator(keys_first2, values_first2), oneapi::dpl::make_zip_iterator( keys_last2, values_first2 + std::distance(keys_first2, keys_last2)), oneapi::dpl::make_zip_iterator(keys_result, values_result), internal::compare_key_fun<Comp>(comp)); auto n1 = std::distance( oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val); return std::make_pair(keys_result + n1, values_result + n1); } template <typename Policy, typename Iter1, typename Iter2, typename Iter3, typename Iter4, typename Pred> internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>> stable_partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Iter3 out_true, Iter4 out_false, Pred p) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::partition_copy( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, mask), oneapi::dpl::make_zip_iterator(last, mask + std::distance(first, last)), oneapi::dpl::make_zip_iterator(out_true, oneapi::dpl::discard_iterator()), oneapi::dpl::make_zip_iterator(out_false, oneapi::dpl::discard_iterator()), internal::predicate_key_fun<Pred>(p)); return std::make_pair(std::get<0>(ret_val.first.base()), std::get<0>(ret_val.second.base())); } template <typename Policy, typename Iter1, typename Iter3, typename Iter4, typename Pred> internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>> stable_partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter3 out_true, Iter4 out_false, Pred p) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); return std::partition_copy(std::forward<Policy>(policy), first, last, out_true, out_false, p); } template <typename Policy, typename Iter1, typename Iter2, typename Iter3, typename Iter4, typename Pred> internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>> partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Iter3 out_true, Iter4 out_false, Pred p) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); return stable_partition_copy(std::forward<Policy>(policy), first, last, mask, out_true, out_false, p); } template <typename Policy, typename Iter1, typename Iter2, typename Pred> internal::enable_if_hetero_execution_policy<Policy, Iter1> stable_partition(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); typedef typename std::decay<Policy>::type policy_type; internal::__buffer<typename std::iterator_traits<Iter1>::value_type> _tmp( std::distance(first, last)); std::copy(std::forward<Policy>(policy), mask, mask + std::distance(first, last), _tmp.get()); auto ret_val = std::stable_partition(std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, _tmp.get()), oneapi::dpl::make_zip_iterator( last, _tmp.get() + std::distance(first, last)), internal::predicate_key_fun<Pred>(p)); return std::get<0>(ret_val.base()); } template <typename Policy, typename Iter1, typename Iter2, typename Pred> typename std::enable_if<!internal::is_hetero_execution_policy< typename std::decay<Policy>::type>::value, Iter1>::type stable_partition(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); typedef typename std::decay<Policy>::type policy_type; std::vector<typename std::iterator_traits<Iter1>::value_type> _tmp( std::distance(first, last)); std::copy(std::forward<Policy>(policy), mask, mask + std::distance(first, last), _tmp.begin()); auto ret_val = std::stable_partition( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, _tmp.begin()), oneapi::dpl::make_zip_iterator(last, _tmp.begin() + std::distance(first, last)), internal::predicate_key_fun<Pred>(p)); return std::get<0>(ret_val.base()); } template <typename Policy, typename Iter1, typename Iter2, typename Pred> internal::enable_if_execution_policy<Policy, Iter1> partition(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); return stable_partition(std::forward<Policy>(policy), first, last, mask, p); } template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename value_t, typename value_out_t> inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value && dpct::internal::is_iterator<key_out_t>::value && dpct::internal::is_iterator<value_t>::value && dpct::internal::is_iterator<value_out_t>::value> sort_pairs(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, value_t values_in, value_out_t values_out, int64_t n, bool descending = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8); template <typename _ExecutionPolicy, typename key_t, typename key_out_t> inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value && dpct::internal::is_iterator<key_out_t>::value> sort_keys(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n, bool descending = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8); namespace internal { // Transforms key to a specific bit range and sorts the transformed key template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename transformed_key_t> inline void transform_and_sort(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n, bool descending, int begin_bit, int end_bit) { using key_t_value_t = typename std::iterator_traits<key_t>::value_type; auto trans_key = translate_key<key_t_value_t, transformed_key_t>(begin_bit, end_bit); // Use of the comparison operator that is not simply std::greater() or // std::less() will result in // not using radix sort which will cost some performance. However, this is // necessary to provide the transformation of the key to the bitrange // desired. auto partial_sort_with_comp = [&](const auto &comp) { return oneapi::dpl::partial_sort_copy( std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n, keys_out, keys_out + n, [=](const auto a, const auto b) { return comp(trans_key(a), trans_key(b)); }); }; if (descending) partial_sort_with_comp(::std::greater<transformed_key_t>()); else partial_sort_with_comp(::std::less<transformed_key_t>()); } template <typename _ExecutionPolicy, typename key_t, typename key_out_t> inline void sort_only(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n, bool descending) { using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type; if constexpr (::std::is_floating_point<key_t_value_t>::value) { if (descending) { // Comparison operator that is not std::greater() ensures stability of // -0.0 and 0.0 // at the cost of some performance because radix sort will not be used. auto comp_descending = [=](const auto a, const auto b) { return a > b; }; oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n, keys_out, keys_out + n, comp_descending); } else { // Comparison operator that is not std::less() ensures stability of -0.0 // and 0.0 // at the cost of some performance because radix sort will not be used. auto comp_ascending = [=](const auto a, const auto b) { return a < b; }; oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n, keys_out, keys_out + n, comp_ascending); } } else { if (descending) { oneapi::dpl::partial_sort_copy( ::std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n, keys_out, keys_out + n, ::std::greater<key_t_value_t>()); } else { oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n, keys_out, keys_out + n); } } } // Transforms key from a pair to a specific bit range and sorts the pairs by the // transformed key template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename transform_key_t, typename value_t, typename value_out_t> inline void transform_and_sort_pairs(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, value_t values_in, value_out_t values_out, int64_t n, bool descending, int begin_bit, int end_bit) { using key_t_value_t = typename std::iterator_traits<key_t>::value_type; auto zip_input = oneapi::dpl::zip_iterator(keys_in, values_in); auto zip_output = oneapi::dpl::zip_iterator(keys_out, values_out); auto trans_key = translate_key<key_t_value_t, transform_key_t>(begin_bit, end_bit); // Use of the comparison operator that is not simply std::greater() or // std::less() will result in // not using radix sort which will cost some performance. However, this is // necessary to provide the transformation of the key to the bitrange desired // and also to select the key from the zipped pair. auto load_val = [=](const auto a) { return trans_key(std::get<0>(a)); }; auto partial_sort_with_comp = [&](const auto &comp) { return oneapi::dpl::partial_sort_copy( std::forward<_ExecutionPolicy>(policy), zip_input, zip_input + n, zip_output, zip_output + n, [=](const auto a, const auto b) { return comp(load_val(a), load_val(b)); }); }; if (descending) partial_sort_with_comp(::std::greater<key_t_value_t>()); else partial_sort_with_comp(::std::less<key_t_value_t>()); } template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename value_t, typename value_out_t> inline void sort_only_pairs(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, value_t values_in, value_out_t values_out, int64_t n, bool descending) { using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type; auto zip_input = oneapi::dpl::zip_iterator(keys_in, values_in); auto zip_output = oneapi::dpl::zip_iterator(keys_out, values_out); // Use of the comparison operator that is not simply std::greater() or // std::less() will result in // not using radix sort which will cost some performance. However, this is // necessary to select the key from the zipped pair. auto load_val = [=](const auto a) { return std::get<0>(a); }; auto partial_sort_with_comp = [&](const auto &comp) { return oneapi::dpl::partial_sort_copy( std::forward<_ExecutionPolicy>(policy), zip_input, zip_input + n, zip_output, zip_output + n, [=](const auto a, const auto b) { return comp(load_val(a), load_val(b)); }); }; if (descending) partial_sort_with_comp(::std::greater<key_t_value_t>()); else partial_sort_with_comp(::std::less<key_t_value_t>()); } // overload for key_out_t != std::nullptr_t template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename value_t, typename value_out_t> typename ::std::enable_if<!::std::is_null_pointer<key_out_t>::value>::type sort_pairs_impl(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, value_t values_in, value_out_t values_out, int64_t n, bool descending, int begin_bit, int end_bit) { using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type; int clipped_begin_bit = ::std::max(begin_bit, 0); int clipped_end_bit = ::std::min((::std::uint64_t)end_bit, sizeof(key_t_value_t) * 8); int num_bytes = (clipped_end_bit - clipped_begin_bit - 1) / 8 + 1; auto transform_and_sort_pairs_f = [&](auto x) { using T = typename ::std::decay_t<decltype(x)>; internal::transform_and_sort_pairs<decltype(policy), key_t, key_out_t, T, value_t, value_out_t>( ::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in, values_out, n, descending, clipped_begin_bit, clipped_end_bit); }; if (clipped_end_bit - clipped_begin_bit == sizeof(key_t_value_t) * 8) { internal::sort_only_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in, values_out, n, descending); } else if (num_bytes == 1) { transform_and_sort_pairs_f.template operator()<uint8_t>(0); } else if (num_bytes == 2) { transform_and_sort_pairs_f.template operator()<uint16_t>(0); } else if (num_bytes <= 4) { transform_and_sort_pairs_f.template operator()<uint32_t>(0); } else // if (num_bytes <= 8) { transform_and_sort_pairs_f.template operator()<uint64_t>(0); } } // overload for key_out_t == std::nullptr_t template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename value_t, typename value_out_t> typename ::std::enable_if<::std::is_null_pointer<key_out_t>::value>::type sort_pairs_impl(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, value_t values_in, value_out_t values_out, int64_t n, bool descending, int begin_bit, int end_bit) { // create temporary keys_out to discard, memory footprint could be improved by // a specialized iterator with a single // unchanging dummy key_t element using key_t_value_t = typename std::iterator_traits<key_t>::value_type; sycl::buffer<key_t_value_t, 1> temp_keys_out{sycl::range<1>(n)}; internal::sort_pairs_impl(std::forward<_ExecutionPolicy>(policy), keys_in, oneapi::dpl::begin(temp_keys_out), values_in, values_out, n, descending, begin_bit, end_bit); } template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename value_t, typename value_out_t, typename OffsetIteratorT> inline void segmented_sort_pairs_by_parallel_sorts( _ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, value_out_t values_in, value_t values_out, int64_t n, int64_t nsegments, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { using offset_type = typename ::std::iterator_traits<OffsetIteratorT>::value_type; ::std::vector<offset_type> host_accessible_offset_starts(nsegments); ::std::vector<offset_type> host_accessible_offset_ends(nsegments); // make offsets accessible on host ::std::copy(::std::forward<_ExecutionPolicy>(policy), begin_offsets, begin_offsets + nsegments, host_accessible_offset_starts.begin()); ::std::copy(::std::forward<_ExecutionPolicy>(policy), end_offsets, end_offsets + nsegments, host_accessible_offset_ends.begin()); for (::std::uint64_t i = 0; i < nsegments; i++) { uint64_t segment_begin = host_accessible_offset_starts[i]; uint64_t segment_end = ::std::min(n, (int64_t)host_accessible_offset_ends[i]); if (segment_begin < segment_end) { ::dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in + segment_begin, keys_out + segment_begin, values_in + segment_begin, values_out + segment_begin, segment_end - segment_begin, descending, begin_bit, end_bit); } } } template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename OffsetIteratorT> inline void segmented_sort_keys_by_parallel_sorts( _ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n, int64_t nsegments, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { using offset_type = typename ::std::iterator_traits<OffsetIteratorT>::value_type; ::std::vector<offset_type> host_accessible_offset_starts(nsegments); ::std::vector<offset_type> host_accessible_offset_ends(nsegments); // make offsets accessible on host ::std::copy(::std::forward<_ExecutionPolicy>(policy), begin_offsets, begin_offsets + nsegments, host_accessible_offset_starts.begin()); ::std::copy(::std::forward<_ExecutionPolicy>(policy), end_offsets, end_offsets + nsegments, host_accessible_offset_ends.begin()); for (::std::uint64_t i = 0; i < nsegments; i++) { uint64_t segment_begin = host_accessible_offset_starts[i]; uint64_t segment_end = ::std::min(n, (int64_t)host_accessible_offset_ends[i]); if (segment_begin < segment_end) { ::dpct::sort_keys(::std::forward<_ExecutionPolicy>(policy), keys_in + segment_begin, keys_out + segment_begin, segment_end - segment_begin, descending, begin_bit, end_bit); } } } template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename value_t, typename value_out_t, typename OffsetIteratorT> inline void segmented_sort_pairs_by_parallel_for_of_sorts( _ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, value_t values_in, value_out_t values_out, int64_t n, int64_t nsegments, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { policy.queue().submit([&](sycl::handler &cgh) { cgh.parallel_for(nsegments, [=](sycl::id<1> i) { uint64_t segment_begin = begin_offsets[i]; uint64_t segment_end = ::std::min(n, (int64_t)end_offsets[i]); if (segment_begin == segment_end) { return; } ::dpct::sort_pairs(::std::execution::seq, keys_in + segment_begin, keys_out + segment_begin, values_in + segment_begin, values_out + segment_begin, segment_end - segment_begin, descending, begin_bit, end_bit); }); }); policy.queue().wait(); } template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename OffsetIteratorT> inline void segmented_sort_keys_by_parallel_for_of_sorts( _ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n, int64_t nsegments, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { policy.queue().submit([&](sycl::handler &cgh) { cgh.parallel_for(nsegments, [=](sycl::id<1> i) { uint64_t segment_begin = begin_offsets[i]; uint64_t segment_end = ::std::min(n, (int64_t)end_offsets[i]); if (segment_begin == segment_end) { return; } ::dpct::sort_keys(::std::execution::seq, keys_in + segment_begin, keys_out + segment_begin, segment_end - segment_begin, descending, begin_bit, end_bit); }); }); policy.queue().wait(); } template <typename _ExecutionPolicy, typename OffsetIteratorT> inline void mark_segments(_ExecutionPolicy &&policy, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, int64_t n, int64_t nsegments, sycl::buffer<::std::size_t, 1> segments) { ::std::size_t work_group_size = policy.queue() .get_device() .template get_info<sycl::info::device::max_work_group_size>(); auto sg_sizes = policy.queue() .get_device() .template get_info<sycl::info::device::sub_group_sizes>(); ::std::size_t sub_group_size = sg_sizes.empty() ? 0 : sg_sizes.back(); float avg_seg_size = (float)n / (float)nsegments; if (avg_seg_size > work_group_size) { // If average segment size is larger than workgroup, use workgroup to // coordinate to mark segments policy.queue() .submit([&](sycl::handler &h) { auto segments_acc = segments.get_access<sycl::access_mode::write>(h); h.parallel_for(work_group_size, ([=](sycl::id<1> id) { for (::std::size_t seg = 0; seg < nsegments; seg++) { ::std::size_t i = begin_offsets[seg]; ::std::size_t end = end_offsets[seg]; while (i + id < end) { segments_acc[i + id] = seg; i += work_group_size; } } })); }) .wait(); } else if (sub_group_size > 0 && avg_seg_size > sub_group_size / 2) { // If average segment size is larger than half a subgroup, use subgroup to // coordinate to mark segments policy.queue() .submit([&](sycl::handler &h) { auto segments_acc = segments.get_access<sycl::access_mode::write>(h); h.parallel_for( sycl::nd_range<1>{work_group_size, work_group_size}, ([=](sycl::nd_item<1> item) { auto sub_group = item.get_sub_group(); ::std::size_t num_subgroups = sub_group.get_group_range().size(); ::std::size_t local_size = sub_group.get_local_range().size(); ::std::size_t sub_group_id = sub_group.get_group_id(); while (sub_group_id < nsegments) { ::std::size_t subgroup_local_id = sub_group.get_local_id(); ::std::size_t i = begin_offsets[sub_group_id]; ::std::size_t end = end_offsets[sub_group_id]; while (i + subgroup_local_id < end) { segments_acc[i + subgroup_local_id] = sub_group_id; i += local_size; } sub_group_id += num_subgroups; } })); }) .wait(); } else { // If average segment size is small as compared to subgroup, use single // work item to mark each segment policy.queue() .submit([&](sycl::handler &h) { auto segments_acc = segments.get_access<sycl::access_mode::write>(h); h.parallel_for(nsegments, ([=](sycl::id<1> seg) { for (::std::size_t i = begin_offsets[seg]; i < end_offsets[seg]; i++) { segments_acc[i] = seg; } })); }) .wait(); } } template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename OffsetIteratorT> inline void segmented_sort_keys_by_two_pair_sorts( _ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n, int64_t nsegments, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { sycl::buffer<::std::size_t, 1> segments{sycl::range<1>(n)}; sycl::buffer<::std::size_t, 1> segments_sorted{sycl::range<1>(n)}; using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type; sycl::buffer<key_t_value_t, 1> keys_temp{sycl::range<1>(n)}; mark_segments(::std::forward<_ExecutionPolicy>(policy), begin_offsets, end_offsets, n, nsegments, segments); // Part 1: Sort by keys keeping track of which segment were in dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in, oneapi::dpl::begin(keys_temp), oneapi::dpl::begin(segments), oneapi::dpl::begin(segments_sorted), n, descending); // Part 2: Sort the segments with a stable sort to get back sorted segments. dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), oneapi::dpl::begin(segments_sorted), oneapi::dpl::begin(segments), oneapi::dpl::begin(keys_temp), keys_out, n, false); } template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename value_t, typename value_out_t, typename OffsetIteratorT> inline void segmented_sort_pairs_by_two_pair_sorts( _ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, value_out_t values_in, value_t values_out, int64_t n, int64_t nsegments, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { sycl::buffer<::std::size_t, 1> segments{sycl::range<1>(n)}; sycl::buffer<::std::size_t, 1> segments_sorted{sycl::range<1>(n)}; using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type; sycl::buffer<key_t_value_t, 1> keys_temp{sycl::range<1>(n)}; using value_t_value_t = typename ::std::iterator_traits<value_t>::value_type; sycl::buffer<value_t_value_t, 1> values_temp{sycl::range<1>(n)}; mark_segments(::std::forward<_ExecutionPolicy>(policy), begin_offsets, end_offsets, n, nsegments, segments); auto zip_seg_vals = oneapi::dpl::make_zip_iterator(oneapi::dpl::begin(segments), values_in); auto zip_seg_vals_out = oneapi::dpl::make_zip_iterator( oneapi::dpl::begin(segments_sorted), oneapi::dpl::begin(values_temp)); // Part 1: Sort by keys keeping track of which segment were in dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in, oneapi::dpl::begin(keys_temp), zip_seg_vals, zip_seg_vals_out, n, descending); auto zip_keys_vals = oneapi::dpl::make_zip_iterator( oneapi::dpl::begin(keys_temp), oneapi::dpl::begin(values_temp)); auto zip_keys_vals_out = oneapi::dpl::make_zip_iterator(keys_out, values_out); // Part 2: Sort the segments with a stable sort to get back sorted segments. dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), oneapi::dpl::begin(segments_sorted), oneapi::dpl::begin(segments), zip_keys_vals, zip_keys_vals_out, n, false); } } // end namespace internal template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename value_t, typename value_out_t> inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value && dpct::internal::is_iterator<key_out_t>::value && dpct::internal::is_iterator<value_t>::value && dpct::internal::is_iterator<value_out_t>::value> sort_pairs(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, value_t values_in, value_out_t values_out, int64_t n, bool descending, int begin_bit, int end_bit) { internal::sort_pairs_impl(std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in, values_out, n, descending, begin_bit, end_bit); } template <typename _ExecutionPolicy, typename key_t, typename value_t> inline void sort_pairs( _ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys, io_iterator_pair<value_t> &values, int64_t n, bool descending = false, bool do_swap_iters = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys.first(), keys.second(), values.first(), values.second(), n, descending, begin_bit, end_bit); if (do_swap_iters) { keys.swap(); values.swap(); } } template <typename _ExecutionPolicy, typename key_t, typename key_out_t> inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value && dpct::internal::is_iterator<key_out_t>::value> sort_keys(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n, bool descending, int begin_bit, int end_bit) { using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type; int clipped_begin_bit = ::std::max(begin_bit, 0); int clipped_end_bit = ::std::min((::std::uint64_t)end_bit, sizeof(key_t_value_t) * 8); int num_bytes = (clipped_end_bit - clipped_begin_bit - 1) / 8 + 1; auto transform_and_sort_f = [&](auto x) { using T = typename ::std::decay_t<decltype(x)>; internal::transform_and_sort<decltype(policy), key_t, key_out_t, T>( ::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n, descending, clipped_begin_bit, clipped_end_bit); }; if (clipped_end_bit - clipped_begin_bit == sizeof(key_t_value_t) * 8) { internal::sort_only(::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n, descending); } else if (num_bytes == 1) { transform_and_sort_f.template operator()<uint8_t>(0); } else if (num_bytes == 2) { transform_and_sort_f.template operator()<uint16_t>(0); } else if (num_bytes <= 4) { transform_and_sort_f.template operator()<uint32_t>(0); } else // if (num_bytes <= 8) { transform_and_sort_f.template operator()<uint64_t>(0); } } template <typename _ExecutionPolicy, typename key_t> inline void sort_keys( _ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys, int64_t n, bool descending = false, bool do_swap_iters = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { sort_keys(std::forward<_ExecutionPolicy>(policy), keys.first(), keys.second(), n, descending, begin_bit, end_bit); if (do_swap_iters) keys.swap(); } template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename OffsetIteratorT> inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value && dpct::internal::is_iterator<key_out_t>::value> segmented_sort_keys( _ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n, int64_t nsegments, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { int compute_units = policy.queue() .get_device() .template get_info<sycl::info::device::max_compute_units>(); auto sg_sizes = policy.queue() .get_device() .template get_info<sycl::info::device::sub_group_sizes>(); int subgroup_size = sg_sizes.empty() ? 1 : sg_sizes.back(); // parallel for of serial sorts when we have sufficient number of segments for // load balance when number of segments is large as compared to our target // compute capability if (nsegments > compute_units * (policy.queue().get_device().is_gpu() ? subgroup_size : 1)) { dpct::internal::segmented_sort_keys_by_parallel_for_of_sorts( ::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n, nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit); } else if (nsegments < 512) // for loop of parallel sorts when we have a small // number of total sorts to limit total overhead { dpct::internal::segmented_sort_keys_by_parallel_sorts( ::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n, nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit); } else // decent catch all using 2 full sorts { dpct::internal::segmented_sort_keys_by_two_pair_sorts( ::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n, nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit); } } template <typename _ExecutionPolicy, typename key_t, typename OffsetIteratorT> inline void segmented_sort_keys( _ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys, int64_t n, int64_t nsegments, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, bool descending = false, bool do_swap_iters = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { segmented_sort_keys(::std::forward<_ExecutionPolicy>(policy), keys.first(), keys.second(), n, nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit); if (do_swap_iters) { keys.swap(); } } template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename value_t, typename value_out_t, typename OffsetIteratorT> inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value && dpct::internal::is_iterator<key_out_t>::value && dpct::internal::is_iterator<value_t>::value && dpct::internal::is_iterator<value_out_t>::value> segmented_sort_pairs( _ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, value_t values_in, value_out_t values_out, int64_t n, int64_t nsegments, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { int compute_units = policy.queue() .get_device() .template get_info<sycl::info::device::max_compute_units>(); auto sg_sizes = policy.queue() .get_device() .template get_info<sycl::info::device::sub_group_sizes>(); int subgroup_size = sg_sizes.empty() ? 1 : sg_sizes.back(); // parallel for of serial sorts when we have sufficient number of segments for // load balance when number of segments is large as compared to our target // compute capability if (nsegments > compute_units * (policy.queue().get_device().is_gpu() ? subgroup_size : 1)) { dpct::internal::segmented_sort_pairs_by_parallel_for_of_sorts( ::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in, values_out, n, nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit); } else if (nsegments < 512) // for loop of parallel sorts when we have a small // number of total sorts to limit total overhead { dpct::internal::segmented_sort_pairs_by_parallel_sorts( ::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in, values_out, n, nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit); } else // decent catch all using 2 full sorts { dpct::internal::segmented_sort_pairs_by_two_pair_sorts( ::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in, values_out, n, nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit); } } template <typename _ExecutionPolicy, typename key_t, typename value_t, typename OffsetIteratorT> inline void segmented_sort_pairs( _ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys, io_iterator_pair<value_t> &values, int64_t n, int64_t nsegments, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, bool descending = false, bool do_swap_iters = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { segmented_sort_pairs(std::forward<_ExecutionPolicy>(policy), keys.first(), keys.second(), values.first(), values.second(), n, nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit); if (do_swap_iters) { keys.swap(); values.swap(); } } template <typename _ExecutionPolicy, typename Iter1, typename Iter2> inline void reduce_argmax(_ExecutionPolicy &&policy, Iter1 input, Iter2 output, ::std::size_t n) { dpct::arg_index_input_iterator<decltype(input), int> input_arg_idx(input); auto ret = ::std::max_element( ::std::forward<_ExecutionPolicy>(policy), input_arg_idx, input_arg_idx + n, [](const auto &a, const auto &b) { return (a.value < b.value); }); ::std::copy(::std::forward<_ExecutionPolicy>(policy), ret, ret + 1, output); } template <typename _ExecutionPolicy, typename Iter1, typename Iter2> inline void reduce_argmin(_ExecutionPolicy &&policy, Iter1 input, Iter2 output, ::std::size_t n) { dpct::arg_index_input_iterator<decltype(input), int> input_arg_idx(input); auto ret = ::std::min_element( ::std::forward<_ExecutionPolicy>(policy), input_arg_idx, input_arg_idx + n, [](const auto &a, const auto &b) { return (a.value < b.value); }); ::std::copy(::std::forward<_ExecutionPolicy>(policy), ret, ret + 1, output); } template <typename _ExecutionPolicy, typename Iter1, typename ValueLessComparable, typename StrictWeakOrdering> inline ::std::pair<Iter1, Iter1> equal_range(_ExecutionPolicy &&policy, Iter1 start, Iter1 end, const ValueLessComparable &value, StrictWeakOrdering comp) { ::std::vector<::std::int64_t> res_lower(1); ::std::vector<::std::int64_t> res_upper(1); ::std::vector<ValueLessComparable> value_vec(1, value); ::oneapi::dpl::lower_bound(policy, start, end, value_vec.begin(), value_vec.end(), res_lower.begin(), comp); ::oneapi::dpl::upper_bound(::std::forward<_ExecutionPolicy>(policy), start, end, value_vec.begin(), value_vec.end(), res_upper.begin(), comp); auto result = ::std::make_pair(start + res_lower[0], start + res_upper[0]); return result; } template <typename _ExecutionPolicy, typename Iter1, typename ValueLessComparable> inline ::std::pair<Iter1, Iter1> equal_range(_ExecutionPolicy &&policy, Iter1 start, Iter1 end, const ValueLessComparable &value) { return equal_range(::std::forward<_ExecutionPolicy>(policy), start, end, value, internal::__less()); } template <typename Policy, typename Iter1, typename Iter2, typename Iter3> inline ::std::enable_if_t< dpct::internal::is_iterator<Iter1>::value && dpct::internal::is_iterator<Iter2>::value && internal::is_hetero_execution_policy<::std::decay_t<Policy>>::value> segmented_reduce_argmin(Policy &&policy, Iter1 keys_in, Iter2 keys_out, ::std::int64_t nsegments, Iter3 begin_offsets, Iter3 end_offsets) { policy.queue().submit([&](sycl::handler &cgh) { cgh.parallel_for(nsegments, [=](sycl::id<1> i) { if (end_offsets[i] <= begin_offsets[i]) { keys_out[i] = dpct::key_value_pair( 1, ::std::numeric_limits< typename ::std::iterator_traits<Iter1>::value_type>::max()); } else { dpct::arg_index_input_iterator<Iter1, int> arg_index(keys_in + begin_offsets[i]); keys_out[i] = *::std::min_element( arg_index, arg_index + (end_offsets[i] - begin_offsets[i]), [](const auto &a, const auto &b) { return a.value < b.value; }); } }); }); policy.queue().wait(); } template <typename Policy, typename Iter1, typename Iter2, typename Iter3> inline ::std::enable_if_t< dpct::internal::is_iterator<Iter1>::value && dpct::internal::is_iterator<Iter2>::value && internal::is_hetero_execution_policy<::std::decay_t<Policy>>::value> segmented_reduce_argmax(Policy &&policy, Iter1 keys_in, Iter2 keys_out, ::std::int64_t nsegments, Iter3 begin_offsets, Iter3 end_offsets) { policy.queue().submit([&](sycl::handler &cgh) { cgh.parallel_for(nsegments, [=](sycl::id<1> i) { if (end_offsets[i] <= begin_offsets[i]) { keys_out[i] = dpct::key_value_pair( 1, ::std::numeric_limits< typename ::std::iterator_traits<Iter1>::value_type>::lowest()); } else { dpct::arg_index_input_iterator<Iter1, int> arg_index(keys_in + begin_offsets[i]); keys_out[i] = *::std::max_element( arg_index, arg_index + (end_offsets[i] - begin_offsets[i]), [](const auto &a, const auto &b) { return a.value < b.value; }); } }); }); policy.queue().wait(); } } // end namespace dpct #endif
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/memory.h
//==---- memory.h ---------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_MEMORY_H__ #define __DPCT_MEMORY_H__ #include <sycl/sycl.hpp> // Memory management section: // device_pointer, device_reference, swap, device_iterator, malloc_device, // device_new, free_device, device_delete namespace dpct { namespace detail { template <typename T> struct make_allocatable { using type = T; }; template <> struct make_allocatable<void> { using type = dpct::byte_t; }; #if defined(__LIBSYCL_MAJOR_VERSION) && defined(__LIBSYCL_MINOR_VERSION) && \ defined(__LIBSYCL_PATCH_VERSION) #define _DPCT_LIBSYCL_VERSION \ (__LIBSYCL_MAJOR_VERSION * 10000 + __LIBSYCL_MINOR_VERSION * 100 + \ __LIBSYCL_PATCH_VERSION) #else #define _DPCT_LIBSYCL_VERSION 0 #endif template <typename _DataT> using __buffer_allocator = #if _DPCT_LIBSYCL_VERSION >= 60000 sycl::buffer_allocator<typename make_allocatable<_DataT>::type>; #else sycl::buffer_allocator; #endif } // namespace detail #ifdef DPCT_USM_LEVEL_NONE template <typename T, sycl::access_mode Mode = sycl::access_mode::read_write, typename Allocator = detail::__buffer_allocator<T>> class device_pointer; #else template <typename T> class device_pointer; #endif template <typename T> struct device_reference { using pointer = device_pointer<T>; using value_type = T; template <typename OtherT> device_reference(const device_reference<OtherT> &input) : value(input.value) {} device_reference(const pointer &input) : value((*input).value) {} device_reference(value_type &input) : value(input) {} template <typename OtherT> device_reference &operator=(const device_reference<OtherT> &input) { value = input; return *this; }; device_reference &operator=(const device_reference &input) { T val = input.value; value = val; return *this; }; device_reference &operator=(const value_type &x) { value = x; return *this; }; pointer operator&() const { return pointer(&value); }; operator value_type() const { return T(value); } device_reference &operator++() { ++value; return *this; }; device_reference &operator--() { --value; return *this; }; device_reference operator++(int) { device_reference ref(*this); ++(*this); return ref; }; device_reference operator--(int) { device_reference ref(*this); --(*this); return ref; }; device_reference &operator+=(const T &input) { value += input; return *this; }; device_reference &operator-=(const T &input) { value -= input; return *this; }; device_reference &operator*=(const T &input) { value *= input; return *this; }; device_reference &operator/=(const T &input) { value /= input; return *this; }; device_reference &operator%=(const T &input) { value %= input; return *this; }; device_reference &operator&=(const T &input) { value &= input; return *this; }; device_reference &operator|=(const T &input) { value |= input; return *this; }; device_reference &operator^=(const T &input) { value ^= input; return *this; }; device_reference &operator<<=(const T &input) { value <<= input; return *this; }; device_reference &operator>>=(const T &input) { value >>= input; return *this; }; void swap(device_reference &input) { T tmp = (*this); *this = (input); input = (tmp); } T &value; }; template <typename T> void swap(device_reference<T> &x, device_reference<T> &y) { x.swap(y); } template <typename T> void swap(T &x, T &y) { T tmp = x; x = y; y = tmp; } namespace internal { // struct for checking if iterator is heterogeneous or not template <typename Iter, typename Void = void> // for non-heterogeneous iterators struct is_hetero_iterator : std::false_type {}; template <typename Iter> // for heterogeneous iterators struct is_hetero_iterator< Iter, typename std::enable_if<Iter::is_hetero::value, void>::type> : std::true_type {}; } // namespace internal #ifdef DPCT_USM_LEVEL_NONE template <typename T, sycl::access_mode Mode, typename Allocator> class device_iterator; template <typename ValueType, typename Allocator, typename Derived> class device_pointer_base { protected: sycl::buffer<ValueType, 1, Allocator> buffer; std::size_t idx; public: using pointer = ValueType *; using difference_type = std::make_signed<std::size_t>::type; device_pointer_base(sycl::buffer<ValueType, 1> in, std::size_t i = 0) : buffer(in), idx(i) {} #ifdef __USE_DPCT template <typename OtherT> device_pointer_base(OtherT *ptr) : buffer( dpct::detail::mem_mgr::instance() .translate_ptr(ptr) .buffer.template reinterpret<ValueType, 1>(sycl::range<1>( dpct::detail::mem_mgr::instance().translate_ptr(ptr).size / sizeof(ValueType)))), idx(ptr - (ValueType*)dpct::detail::mem_mgr::instance() .translate_ptr(ptr).alloc_ptr) {} #endif device_pointer_base(const std::size_t count) : buffer(sycl::range<1>(count / sizeof(ValueType))), idx() {} // buffer has no default ctor we pass zero-range to create an empty buffer device_pointer_base() : buffer(sycl::range<1>(0)) {} device_pointer_base(const device_pointer_base &in) : buffer(in.buffer), idx(in.idx) {} pointer get() const { auto res = (const_cast<device_pointer_base *>(this) ->buffer.template get_access<sycl::access_mode::read_write>()) .get_pointer(); return res + idx; } operator ValueType *() { auto res = (buffer.template get_access<sycl::access_mode::read_write>()) .get_pointer(); return res + idx; } operator ValueType *() const { auto res = (const_cast<device_pointer_base *>(this) ->buffer.template get_access<sycl::access_mode::read_write>()) .get_pointer(); return res + idx; } Derived operator+(difference_type forward) const { return Derived{buffer, idx + forward}; } Derived operator-(difference_type backward) const { return Derived{buffer, idx - backward}; } Derived operator++(int) { Derived p(buffer, idx); idx += 1; return p; } Derived operator--(int) { Derived p(buffer, idx); idx -= 1; return p; } difference_type operator-(const Derived &it) const { return idx - it.idx; } template <typename OtherIterator> typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value, difference_type>::type operator-(const OtherIterator &it) const { return idx - std::distance(oneapi::dpl::begin(buffer), it); } std::size_t get_idx() const { return idx; } // required sycl::buffer<ValueType, 1, Allocator> get_buffer() { return buffer; } // required }; template <typename T, sycl::access_mode Mode, typename Allocator> class device_pointer : public device_pointer_base<T, Allocator, device_pointer<T, Mode, Allocator>> { private: using base_type = device_pointer_base<T, Allocator, device_pointer>; public: using value_type = T; using difference_type = std::make_signed<std::size_t>::type; using pointer = T *; using reference = T &; using iterator_category = std::random_access_iterator_tag; using is_hetero = std::true_type; // required using is_passed_directly = std::false_type; static constexpr sycl::access_mode mode = Mode; // required device_pointer(sycl::buffer<T, 1> in, std::size_t i = 0) : base_type(in, i) {} #ifdef __USE_DPCT template <typename OtherT> device_pointer(OtherT *ptr) : base_type(ptr) {} #endif // needed for malloc_device, count is number of bytes to allocate device_pointer(const std::size_t count) : base_type(count) {} device_pointer() : base_type() {} device_pointer(const device_pointer &in) : base_type(in) {} device_pointer &operator+=(difference_type forward) { this->idx += forward; return *this; } device_pointer &operator-=(difference_type backward) { this->idx -= backward; return *this; } // include operators from base class using base_type::operator++; using base_type::operator--; device_pointer &operator++() { this->idx += 1; return *this; } device_pointer &operator--() { this->idx -= 1; return *this; } }; template <sycl::access_mode Mode, typename Allocator> class device_pointer<void, Mode, Allocator> : public device_pointer_base<dpct::byte_t, Allocator, device_pointer<void, Mode, Allocator>> { private: using base_type = device_pointer_base<dpct::byte_t, Allocator, device_pointer>; public: using value_type = dpct::byte_t; using difference_type = std::make_signed<std::size_t>::type; using pointer = void *; using reference = value_type &; using iterator_category = std::random_access_iterator_tag; using is_hetero = std::true_type; // required using is_passed_directly = std::false_type; static constexpr sycl::access_mode mode = Mode; // required device_pointer(sycl::buffer<value_type, 1> in, std::size_t i = 0) : base_type(in, i) {} #ifdef __USE_DPCT template <typename OtherT> device_pointer(OtherT *ptr) : base_type(ptr) {} #endif // needed for malloc_device, count is number of bytes to allocate device_pointer(const std::size_t count) : base_type(count) {} device_pointer() : base_type() {} device_pointer(const device_pointer &in) : base_type(in) {} device_pointer &operator+=(difference_type forward) { this->idx += forward; return *this; } device_pointer &operator-=(difference_type backward) { this->idx -= backward; return *this; } // include operators from base class using base_type::operator++; using base_type::operator--; device_pointer &operator++() { this->idx += 1; return *this; } device_pointer &operator--() { this->idx -= 1; return *this; } }; #else template <typename T> class device_iterator; template <typename ValueType, typename Derived> class device_pointer_base { protected: ValueType *ptr; public: using pointer = ValueType *; using difference_type = std::make_signed<std::size_t>::type; device_pointer_base(ValueType *p) : ptr(p) {} device_pointer_base(const std::size_t count) { sycl::queue default_queue = dpct::get_default_queue(); ptr = static_cast<ValueType *>(sycl::malloc_shared( count, default_queue.get_device(), default_queue.get_context())); } device_pointer_base() {} pointer get() const { return ptr; } operator ValueType *() { return ptr; } operator ValueType *() const { return ptr; } ValueType &operator[](difference_type idx) { return ptr[idx]; } ValueType &operator[](difference_type idx) const { return ptr[idx]; } Derived operator+(difference_type forward) const { return Derived{ptr + forward}; } Derived operator-(difference_type backward) const { return Derived{ptr - backward}; } Derived operator++(int) { Derived p(ptr); ++ptr; return p; } Derived operator--(int) { Derived p(ptr); --ptr; return p; } difference_type operator-(const Derived &it) const { return ptr - it.ptr; } }; template <typename T> class device_pointer : public device_pointer_base<T, device_pointer<T>> { private: using base_type = device_pointer_base<T, device_pointer<T>>; public: using value_type = T; using difference_type = std::make_signed<std::size_t>::type; using pointer = T *; using reference = T &; using const_reference = const T &; using iterator_category = std::random_access_iterator_tag; using is_hetero = std::false_type; // required using is_passed_directly = std::true_type; // required device_pointer(T *p) : base_type(p) {} // needed for malloc_device, count is number of bytes to allocate device_pointer(const std::size_t count) : base_type(count) {} device_pointer() : base_type() {} device_pointer &operator=(const device_iterator<T> &in) { this->ptr = static_cast<device_pointer<T>>(in).ptr; return *this; } // include operators from base class using base_type::operator++; using base_type::operator--; device_pointer &operator++() { ++(this->ptr); return *this; } device_pointer &operator--() { --(this->ptr); return *this; } device_pointer &operator+=(difference_type forward) { this->ptr = this->ptr + forward; return *this; } device_pointer &operator-=(difference_type backward) { this->ptr = this->ptr - backward; return *this; } }; template <> class device_pointer<void> : public device_pointer_base<dpct::byte_t, device_pointer<void>> { private: using base_type = device_pointer_base<dpct::byte_t, device_pointer<void>>; public: using value_type = dpct::byte_t; using difference_type = std::make_signed<std::size_t>::type; using pointer = void *; using reference = value_type &; using const_reference = const value_type &; using iterator_category = std::random_access_iterator_tag; using is_hetero = std::false_type; // required using is_passed_directly = std::true_type; // required device_pointer(void *p) : base_type(static_cast<value_type *>(p)) {} // needed for malloc_device, count is number of bytes to allocate device_pointer(const std::size_t count) : base_type(count) {} device_pointer() : base_type() {} pointer get() const { return static_cast<pointer>(this->ptr); } operator void *() { return this->ptr; } operator void *() const { return this->ptr; } // include operators from base class using base_type::operator++; using base_type::operator--; device_pointer &operator++() { ++(this->ptr); return *this; } device_pointer &operator--() { --(this->ptr); return *this; } device_pointer &operator+=(difference_type forward) { this->ptr = this->ptr + forward; return *this; } device_pointer &operator-=(difference_type backward) { this->ptr = this->ptr - backward; return *this; } }; #endif #ifdef DPCT_USM_LEVEL_NONE template <typename T, sycl::access_mode Mode = sycl::access_mode::read_write, typename Allocator = detail::__buffer_allocator<T>> class device_iterator : public device_pointer<T, Mode, Allocator> { using Base = device_pointer<T, Mode, Allocator>; public: using value_type = T; using difference_type = std::make_signed<std::size_t>::type; using pointer = T *; using reference = T &; using iterator_category = std::random_access_iterator_tag; using is_hetero = std::true_type; // required using is_passed_directly = std::false_type; // required static constexpr sycl::access_mode mode = Mode; // required device_iterator() : Base() {} device_iterator(sycl::buffer<T, 1, Allocator> vec, std::size_t index) : Base(vec, index) {} template <sycl::access_mode inMode> device_iterator(const device_iterator<T, inMode, Allocator> &in) : Base(in.buffer, in.idx) {} // required for iter_mode device_iterator &operator=(const device_iterator &in) { Base::buffer = in.buffer; Base::idx = in.idx; return *this; } reference operator*() const { return const_cast<device_iterator *>(this) ->buffer.template get_access<mode>()[Base::idx]; } reference operator[](difference_type i) const { return *(*this + i); } device_iterator &operator++() { ++Base::idx; return *this; } device_iterator &operator--() { --Base::idx; return *this; } device_iterator operator++(int) { device_iterator it(*this); ++(*this); return it; } device_iterator operator--(int) { device_iterator it(*this); --(*this); return it; } device_iterator operator+(difference_type forward) const { const auto new_idx = Base::idx + forward; return {Base::buffer, new_idx}; } device_iterator &operator+=(difference_type forward) { Base::idx += forward; return *this; } device_iterator operator-(difference_type backward) const { return {Base::buffer, Base::idx - backward}; } device_iterator &operator-=(difference_type backward) { Base::idx -= backward; return *this; } friend device_iterator operator+(difference_type forward, const device_iterator &it) { return it + forward; } difference_type operator-(const device_iterator &it) const { return Base::idx - it.idx; } template <typename OtherIterator> typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value, difference_type>::type operator-(const OtherIterator &it) const { return Base::idx - std::distance(oneapi::dpl::begin(Base::buffer), it); } bool operator==(const device_iterator &it) const { return *this - it == 0; } bool operator!=(const device_iterator &it) const { return !(*this == it); } bool operator<(const device_iterator &it) const { return *this - it < 0; } bool operator>(const device_iterator &it) const { return it < *this; } bool operator<=(const device_iterator &it) const { return !(*this > it); } bool operator>=(const device_iterator &it) const { return !(*this < it); } std::size_t get_idx() const { return Base::idx; } // required sycl::buffer<T, 1, Allocator> get_buffer() { return Base::buffer; } // required }; #else template <typename T> class device_iterator : public device_pointer<T> { using Base = device_pointer<T>; protected: std::size_t idx; public: using value_type = T; using difference_type = std::make_signed<std::size_t>::type; using pointer = typename Base::pointer; using reference = typename Base::reference; using iterator_category = std::random_access_iterator_tag; using is_hetero = std::false_type; // required using is_passed_directly = std::true_type; // required static constexpr sycl::access_mode mode = sycl::access_mode::read_write; // required device_iterator() : Base(nullptr), idx(0) {} device_iterator(T *vec, std::size_t index) : Base(vec), idx(index) {} template <sycl::access_mode inMode> device_iterator(const device_iterator<T> &in) : Base(in.ptr), idx(in.idx) {} // required for iter_mode device_iterator &operator=(const device_iterator &in) { Base::operator=(in); idx = in.idx; return *this; } reference operator*() const { return *(Base::ptr + idx); } reference operator[](difference_type i) { return Base::ptr[idx + i]; } reference operator[](difference_type i) const { return Base::ptr[idx + i]; } device_iterator &operator++() { ++idx; return *this; } device_iterator &operator--() { --idx; return *this; } device_iterator operator++(int) { device_iterator it(*this); ++(*this); return it; } device_iterator operator--(int) { device_iterator it(*this); --(*this); return it; } device_iterator operator+(difference_type forward) const { const auto new_idx = idx + forward; return {Base::ptr, new_idx}; } device_iterator &operator+=(difference_type forward) { idx += forward; return *this; } device_iterator operator-(difference_type backward) const { return {Base::ptr, idx - backward}; } device_iterator &operator-=(difference_type backward) { idx -= backward; return *this; } friend device_iterator operator+(difference_type forward, const device_iterator &it) { return it + forward; } difference_type operator-(const device_iterator &it) const { return idx - it.idx; } template <typename OtherIterator> typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value, difference_type>::type operator-(const OtherIterator &it) const { return idx - it.get_idx(); } bool operator==(const device_iterator &it) const { return *this - it == 0; } bool operator!=(const device_iterator &it) const { return !(*this == it); } bool operator<(const device_iterator &it) const { return *this - it < 0; } bool operator>(const device_iterator &it) const { return it < *this; } bool operator<=(const device_iterator &it) const { return !(*this > it); } bool operator>=(const device_iterator &it) const { return !(*this < it); } std::size_t get_idx() const { return idx; } // required device_iterator &get_buffer() { return *this; } // required std::size_t size() const { return idx; } }; #endif template <typename T> device_pointer<T> malloc_device(const std::size_t num_elements) { return device_pointer<T>(num_elements * sizeof(T)); } static inline device_pointer<void> malloc_device(const std::size_t num_bytes) { return device_pointer<void>(num_bytes); } template <typename T> device_pointer<T> device_new(device_pointer<T> p, const T &value, const std::size_t count = 1) { std::vector<T> result(count, value); p.buffer = sycl::buffer<T, 1>(result.begin(), result.end()); return p + count; } template <typename T> device_pointer<T> device_new(device_pointer<T> p, const std::size_t count = 1) { return device_new(p, T{}, count); } template <typename T> device_pointer<T> device_new(const std::size_t count = 1) { return device_pointer<T>(count); } template <typename T> void free_device(device_pointer<T> ptr) {} template <typename T> typename std::enable_if<!std::is_trivially_destructible<T>::value, void>::type device_delete(device_pointer<T> p, const std::size_t count = 1) { for (std::size_t i = 0; i < count; ++i) { p[i].~T(); } } template <typename T> typename std::enable_if<std::is_trivially_destructible<T>::value, void>::type device_delete(device_pointer<T>, const std::size_t count = 1) {} template <typename T> device_pointer<T> get_device_pointer(T *ptr) { return device_pointer<T>(ptr); } template <typename T> device_pointer<T> get_device_pointer(const device_pointer<T> &ptr) { return device_pointer<T>(ptr); } template <typename T> T *get_raw_pointer(const device_pointer<T> &ptr) { return ptr.get(); } template <typename Pointer> Pointer get_raw_pointer(const Pointer &ptr) { return ptr; } template <typename T> const T &get_raw_reference(const device_reference<T> &ref) { return ref.value; } template <typename T> T &get_raw_reference(device_reference<T> &ref) { return ref.value; } template <typename T> const T &get_raw_reference(const T &ref) { return ref; } template <typename T> T &get_raw_reference(T &ref) { return ref; } } // namespace dpct #endif
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/vector.h
//==---- vector.h ---------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_VECTOR_H__ #define __DPCT_VECTOR_H__ #include <oneapi/dpl/algorithm> #include <oneapi/dpl/execution> #include <sycl/sycl.hpp> #include "memory.h" #include <algorithm> #include <iterator> #include <vector> #include "../device.hpp" namespace dpct { namespace internal { template <typename Iter, typename Void = void> // for non-iterators struct is_iterator : std::false_type {}; template <typename Iter> // For iterators struct is_iterator< Iter, typename std::enable_if< !std::is_void<typename Iter::iterator_category>::value, void>::type> : std::true_type {}; template <typename T> // For pointers struct is_iterator<T *> : std::true_type {}; } // end namespace internal #ifndef DPCT_USM_LEVEL_NONE template <typename T, typename Allocator = sycl::usm_allocator<T, sycl::usm::alloc::shared>> class device_vector { public: using iterator = device_iterator<T>; using const_iterator = const iterator; using reference = device_reference<T>; using const_reference = const reference; using value_type = T; using pointer = T *; using const_pointer = const T *; using difference_type = typename ::std::iterator_traits<iterator>::difference_type; using size_type = ::std::size_t; private: Allocator _alloc; size_type _size; size_type _capacity; pointer _storage; size_type _min_capacity() const { return size_type(1); } void _set_capacity_and_alloc() { _capacity = ::std::max(_size * 2, _min_capacity()); _storage = _alloc.allocate(_capacity); } public: template <typename OtherA> operator ::std::vector<T, OtherA>() const { auto __tmp = ::std::vector<T, OtherA>(this->size()); ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), this->begin(), this->end(), __tmp.begin()); return __tmp; } device_vector() : _alloc(get_default_queue()), _size(0), _capacity(_min_capacity()) { _set_capacity_and_alloc(); } ~device_vector() /*= default*/ { _alloc.deallocate(_storage, _capacity); }; explicit device_vector(size_type n) : device_vector(n, T()) {} explicit device_vector(size_type n, const T &value) : _alloc(get_default_queue()), _size(n) { _set_capacity_and_alloc(); if (_size > 0) { ::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()), begin(), end(), T(value)); } } device_vector(const device_vector &other) : _alloc(get_default_queue()) { _size = other.size(); _capacity = other.capacity(); _storage = _alloc.allocate(_capacity); ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), other.begin(), other.end(), begin()); } device_vector(device_vector &&other) : _alloc(get_default_queue()), _size(other.size()), _capacity(other.capacity()), _storage(other._storage) { other._size = 0; other._capacity = 0; other._storage = nullptr; } template <typename InputIterator> device_vector(InputIterator first, typename ::std::enable_if< internal::is_iterator<InputIterator>::value && !::std::is_pointer<InputIterator>::value && ::std::is_same<typename ::std::iterator_traits< InputIterator>::iterator_category, ::std::random_access_iterator_tag>::value, InputIterator>::type last) : _alloc(get_default_queue()) { _size = ::std::distance(first, last); _set_capacity_and_alloc(); if (_size > 0) { ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), first, last, begin()); } } template <typename InputIterator> device_vector(InputIterator first, typename ::std::enable_if<::std::is_pointer<InputIterator>::value, InputIterator>::type last) : _alloc(get_default_queue()) { _size = ::std::distance(first, last); _set_capacity_and_alloc(); if (_size > 0) { auto ptr_type = sycl::get_pointer_type(first, get_default_context()); if (ptr_type != sycl::usm::alloc::host && ptr_type != sycl::usm::alloc::unknown) { ::std::copy( oneapi::dpl::execution::make_device_policy(get_default_queue()), first, last, begin()); } else { sycl::buffer<T, 1> buf(first, last); auto buf_first = oneapi::dpl::begin(buf); auto buf_last = oneapi::dpl::end(buf); ::std::copy( oneapi::dpl::execution::make_device_policy(get_default_queue()), buf_first, buf_last, begin()); } } } template <typename InputIterator> device_vector(InputIterator first, typename ::std::enable_if< internal::is_iterator<InputIterator>::value && !::std::is_pointer<InputIterator>::value && !::std::is_same<typename ::std::iterator_traits< InputIterator>::iterator_category, ::std::random_access_iterator_tag>::value, InputIterator>::type last) : _alloc(get_default_queue()), _size(::std::distance(first, last)) { _set_capacity_and_alloc(); ::std::vector<T> _tmp(first, last); if (_size > 0) { ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), _tmp.begin(), _tmp.end(), this->begin()); } } template <typename OtherAllocator> device_vector(const device_vector<T, OtherAllocator> &v) : _alloc(get_default_queue()), _storage(v.real_begin()), _size(v.size()), _capacity(v.capacity()) {} template <typename OtherAllocator> device_vector(::std::vector<T, OtherAllocator> &v) : _alloc(get_default_queue()), _size(v.size()) { _set_capacity_and_alloc(); if (_size > 0) { ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), v.begin(), v.end(), this->begin()); } } template <typename OtherAllocator> device_vector &operator=(const ::std::vector<T, OtherAllocator> &v) { resize(v.size()); if (_size > 0) { ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), v.begin(), v.end(), begin()); } return *this; } device_vector &operator=(const device_vector &other) { // Copy assignment operator: resize(other.size()); if (_size > 0) { ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), other.begin(), other.end(), begin()); } return *this; } device_vector &operator=(device_vector &&other) { // Move assignment operator: device_vector dummy(::std::move(other)); this->swap(dummy); return *this; } size_type size() const { return _size; } iterator begin() noexcept { return device_iterator<T>(_storage, 0); } iterator end() { return device_iterator<T>(_storage, size()); } const_iterator begin() const noexcept { return device_iterator<T>(_storage, 0); } const_iterator cbegin() const noexcept { return begin(); } const_iterator end() const { return device_iterator<T>(_storage, size()); } const_iterator cend() const { return end(); } T *real_begin() { return _storage; } const T *real_begin() const { return _storage; } void swap(device_vector &v) { ::std::swap(_size, v._size); ::std::swap(_capacity, v._capacity); ::std::swap(_storage, v._storage); ::std::swap(_alloc, v._alloc); } reference operator[](size_type n) { return _storage[n]; } const_reference operator[](size_type n) const { return _storage[n]; } void reserve(size_type n) { if (n > capacity()) { // allocate buffer for new size auto tmp = _alloc.allocate(2 * n); // copy content (old buffer to new buffer) ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), begin(), end(), tmp); // deallocate old memory _alloc.deallocate(_storage, _capacity); _storage = tmp; _capacity = 2 * n; } } void resize(size_type new_size, const T &x = T()) { reserve(new_size); if (_size < new_size) { ::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()), begin() + _size, begin() + new_size, x); } _size = new_size; } size_type max_size(void) const { return ::std::numeric_limits<size_type>::max() / sizeof(T); } size_type capacity() const { return _capacity; } const_reference front() const { return *begin(); } reference front() { return *begin(); } const_reference back(void) const { return *(end() - 1); } reference back(void) { return *(end() - 1); } pointer data(void) { return _storage; } const_pointer data(void) const { return _storage; } void shrink_to_fit(void) { if (_size != capacity()) { size_type tmp_capacity = ::std::max(_size, _min_capacity()); auto tmp = _alloc.allocate(tmp_capacity); if (_size > 0) { ::std::copy( oneapi::dpl::execution::make_device_policy(get_default_queue()), begin(), end(), tmp); } _alloc.deallocate(_storage, _capacity); _storage = tmp; _capacity = tmp_capacity; } } void assign(size_type n, const T &x) { resize(n); if (_size > 0) { ::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()), begin(), begin() + n, x); } } template <typename InputIterator> void assign(InputIterator first, typename ::std::enable_if<internal::is_iterator<InputIterator>::value, InputIterator>::type last) { auto n = ::std::distance(first, last); resize(n); if (_size > 0) { ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), first, last, begin()); } } void clear(void) { _size = 0; } bool empty(void) const { return (size() == 0); } void push_back(const T &x) { insert(end(), size_type(1), x); } void pop_back(void) { if (_size > 0) --_size; } iterator erase(iterator first, iterator last) { auto n = ::std::distance(first, last); if (last == end()) { _size = _size - n; return end(); } auto m = ::std::distance(last, end()); if (m <= 0) { return end(); } auto tmp = _alloc.allocate(m); // copy remainder to temporary buffer. ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), last, end(), tmp); // override (erase) subsequence in storage. ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), tmp, tmp + m, first); _alloc.deallocate(tmp, m); _size -= n; return begin() + first.get_idx() + n; } iterator erase(iterator pos) { return erase(pos, pos + 1); } iterator insert(iterator position, const T &x) { auto n = ::std::distance(begin(), position); insert(position, size_type(1), x); return begin() + n; } void insert(iterator position, size_type n, const T &x) { if (position == end()) { resize(size() + n); ::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()), end() - n, end(), x); } else { auto i_n = ::std::distance(begin(), position); // allocate temporary storage auto m = ::std::distance(position, end()); // will throw if position is not inside active vector auto tmp = _alloc.allocate(m); // copy remainder ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), position, end(), tmp); resize(size() + n); // resizing might invalidate position position = begin() + position.get_idx(); ::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()), position, position + n, x); ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), tmp, tmp + m, position + n); _alloc.deallocate(tmp, m); } } template <typename InputIterator> void insert(iterator position, InputIterator first, typename ::std::enable_if<internal::is_iterator<InputIterator>::value, InputIterator>::type last) { auto n = ::std::distance(first, last); if (position == end()) { resize(size() + n); ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), first, last, end()); } else { auto m = ::std::distance(position, end()); // will throw if position is not inside active vector auto tmp = _alloc.allocate(m); ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), position, end(), tmp); resize(size() + n); // resizing might invalidate position position = begin() + position.get_idx(); ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), first, last, position); ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), tmp, tmp + m, position + n); _alloc.deallocate(tmp, m); } } Allocator get_allocator() const { return _alloc; } }; #else template <typename T, typename Allocator = detail::__buffer_allocator<T>> class device_vector { static_assert( std::is_same<Allocator, detail::__buffer_allocator<T>>::value, "device_vector doesn't support custom allocator when USM is not used."); public: using iterator = device_iterator<T>; using const_iterator = const iterator; using reference = device_reference<T>; using const_reference = const reference; using value_type = T; using pointer = T *; using const_pointer = const T *; using difference_type = typename std::iterator_traits<iterator>::difference_type; using size_type = std::size_t; private: using Buffer = sycl::buffer<T, 1>; using Range = sycl::range<1>; // Using mem_mgr to handle memory allocation void *_storage; size_type _size; size_type _min_capacity() const { return size_type(1); } void *alloc_store(size_type num_bytes) { return detail::mem_mgr::instance().mem_alloc(num_bytes); } public: template <typename OtherA> operator std::vector<T, OtherA>() const { auto __tmp = std::vector<T, OtherA>(this->size()); std::copy(oneapi::dpl::execution::dpcpp_default, this->begin(), this->end(), __tmp.begin()); return __tmp; } device_vector() : _storage(alloc_store(_min_capacity() * sizeof(T))), _size(0) {} ~device_vector() = default; explicit device_vector(size_type n) : device_vector(n, T()) {} explicit device_vector(size_type n, const T &value) : _storage(alloc_store(std::max(n, _min_capacity()) * sizeof(T))), _size(n) { auto buf = get_buffer(); std::fill(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(buf), oneapi::dpl::begin(buf) + n, T(value)); } device_vector(const device_vector &other) : _storage(other._storage), _size(other.size()) {} device_vector(device_vector &&other) : _storage(std::move(other._storage)), _size(other.size()) {} template <typename InputIterator> device_vector(InputIterator first, typename std::enable_if< internal::is_iterator<InputIterator>::value && !std::is_pointer<InputIterator>::value && std::is_same<typename std::iterator_traits< InputIterator>::iterator_category, std::random_access_iterator_tag>::value, InputIterator>::type last) : _storage(alloc_store(std::distance(first, last) * sizeof(T))), _size(std::distance(first, last)) { auto buf = get_buffer(); auto dst = oneapi::dpl::begin(buf); std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), first, last, dst); } template <typename InputIterator> device_vector(InputIterator first, typename std::enable_if<std::is_pointer<InputIterator>::value, InputIterator>::type last) : _storage(alloc_store(std::distance(first, last) * sizeof(T))), _size(std::distance(first, last)) { auto buf = get_buffer(); Buffer tmp_buf(first, last); auto start = oneapi::dpl::begin(tmp_buf); auto end = oneapi::dpl::end(tmp_buf); auto dst = oneapi::dpl::begin(buf); std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), start, end, dst); } template <typename InputIterator> device_vector(InputIterator first, typename std::enable_if< internal::is_iterator<InputIterator>::value && !std::is_same<typename std::iterator_traits< InputIterator>::iterator_category, std::random_access_iterator_tag>::value, InputIterator>::type last) : _storage(alloc_store(std::distance(first, last) * sizeof(T))), _size(std::distance(first, last)) { auto buf = get_buffer(); std::vector<T> tmp(first, last); Buffer tmp_buf(tmp); auto start = oneapi::dpl::begin(tmp_buf); auto end = oneapi::dpl::end(tmp_buf); auto dst = oneapi::dpl::begin(buf); std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), start, end, dst); } template <typename OtherAllocator> device_vector(const device_vector<T, OtherAllocator> &v) : _storage(alloc_store(v.size() * sizeof(T))), _size(v.size()) { auto buf = get_buffer(); auto dst = oneapi::dpl::begin(buf); std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), v.real_begin(), v.real_begin() + v.size(), dst); } template <typename OtherAllocator> device_vector(std::vector<T, OtherAllocator> &v) : _storage(alloc_store(v.size() * sizeof(T))), _size(v.size()) { std::copy(oneapi::dpl::execution::dpcpp_default, v.begin(), v.end(), oneapi::dpl::begin(get_buffer())); } device_vector &operator=(const device_vector &other) { // Copy assignment operator: _size = other.size(); void *tmp = alloc_store(_size * sizeof(T)); auto tmp_buf = detail::mem_mgr::instance() .translate_ptr(tmp) .buffer.template reinterpret<T, 1>(sycl::range<1>(_size)); std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(other.get_buffer()), oneapi::dpl::end(other.get_buffer()), oneapi::dpl::begin(tmp_buf)); detail::mem_mgr::instance().mem_free(_storage); _storage = tmp; return *this; } device_vector &operator=(device_vector &&other) { // Move assignment operator: _size = other.size(); this->_storage = std::move(other._storage); return *this; } template <typename OtherAllocator> device_vector &operator=(const std::vector<T, OtherAllocator> &v) { Buffer data(v.begin(), v.end()); _size = v.size(); void *tmp = alloc_store(_size * sizeof(T)); auto tmp_buf = detail::mem_mgr::instance() .translate_ptr(tmp) .buffer.template reinterpret<T, 1>(sycl::range<1>(_size)); std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(data), oneapi::dpl::end(data), oneapi::dpl::begin(tmp_buf)); detail::mem_mgr::instance().mem_free(_storage); _storage = tmp; return *this; } Buffer get_buffer() const { return detail::mem_mgr::instance() .translate_ptr(_storage) .buffer.template reinterpret<T, 1>(sycl::range<1>(capacity())); } size_type size() const { return _size; } iterator begin() noexcept { return device_iterator<T>(get_buffer(), 0); } iterator end() { return device_iterator<T>(get_buffer(), _size); } const_iterator begin() const noexcept { return device_iterator<T>(get_buffer(), 0); } const_iterator cbegin() const noexcept { return begin(); } const_iterator end() const { return device_iterator<T>(get_buffer(), _size); } const_iterator cend() const { return end(); } T *real_begin() { return (detail::mem_mgr::instance() .translate_ptr(_storage) .buffer.template get_access<sycl::access_mode::read_write>()) .get_pointer(); } const T *real_begin() const { return const_cast<device_vector *>(this) ->detail::mem_mgr::instance() .translate_ptr(_storage) .buffer.template get_access<sycl::access_mode::read_write>() .get_pointer(); } void swap(device_vector &v) { void *temp = v._storage; v._storage = this->_storage; this->_storage = temp; std::swap(_size, v._size); } reference operator[](size_type n) { return *(begin() + n); } const_reference operator[](size_type n) const { return *(begin() + n); } void reserve(size_type n) { if (n > capacity()) { // create new buffer (allocate for new size) void *a = alloc_store(n * sizeof(T)); // copy content (old buffer to new buffer) if (_storage != nullptr) { auto tmp = detail::mem_mgr::instance() .translate_ptr(a) .buffer.template reinterpret<T, 1>(sycl::range<1>(n)); auto src_buf = get_buffer(); std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(src_buf), oneapi::dpl::end(src_buf), oneapi::dpl::begin(tmp)); // deallocate old memory detail::mem_mgr::instance().mem_free(_storage); } _storage = a; } } void resize(size_type new_size, const T &x = T()) { reserve(new_size); if (_size < new_size) { auto src_buf = get_buffer(); std::fill(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(src_buf) + _size, oneapi::dpl::begin(src_buf) + new_size, x); } _size = new_size; } size_type max_size(void) const { return std::numeric_limits<size_type>::max() / sizeof(T); } size_type capacity() const { return _storage != nullptr ? detail::mem_mgr::instance() .translate_ptr(_storage) .buffer.size() / sizeof(T) : 0; } const_reference front() const { return *begin(); } reference front() { return *begin(); } const_reference back(void) const { return *(end() - 1); } reference back(void) { return *(end() - 1); } pointer data(void) { return reinterpret_cast<pointer>(_storage); } const_pointer data(void) const { return reinterpret_cast<const_pointer>(_storage); } void shrink_to_fit(void) { if (_size != capacity()) { void *a = alloc_store(_size * sizeof(T)); auto tmp = detail::mem_mgr::instance() .translate_ptr(a) .buffer.template reinterpret<T, 1>(sycl::range<1>(_size)); std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(get_buffer()), oneapi::dpl::begin(get_buffer()) + _size, oneapi::dpl::begin(tmp)); detail::mem_mgr::instance().mem_free(_storage); _storage = a; } } void assign(size_type n, const T &x) { resize(n); std::fill(oneapi::dpl::execution::dpcpp_default, begin(), begin() + n, x); } template <typename InputIterator> void assign(InputIterator first, typename std::enable_if<internal::is_iterator<InputIterator>::value, InputIterator>::type last) { auto n = std::distance(first, last); resize(n); if (internal::is_iterator<InputIterator>::value && !std::is_pointer<InputIterator>::value) std::copy(oneapi::dpl::execution::dpcpp_default, first, last, begin()); else { Buffer tmp(first, last); std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp), oneapi::dpl::end(tmp), begin()); } } void clear(void) { _size = 0; detail::mem_mgr::instance().mem_free(_storage); _storage = nullptr; } bool empty(void) const { return (size() == 0); } void push_back(const T &x) { insert(end(), size_type(1), x); } void pop_back(void) { if (_size > 0) --_size; } iterator erase(iterator first, iterator last) { auto n = std::distance(first, last); if (last == end()) { _size = _size - n; return end(); } Buffer tmp{Range(std::distance(last, end()))}; // copy remainder to temporary buffer. std::copy(oneapi::dpl::execution::dpcpp_default, last, end(), oneapi::dpl::begin(tmp)); // override (erase) subsequence in storage. std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp), oneapi::dpl::end(tmp), first); resize(_size - n); return begin() + first.get_idx() + n; } iterator erase(iterator pos) { return erase(pos, pos + 1); } iterator insert(iterator position, const T &x) { auto n = std::distance(begin(), position); insert(position, size_type(1), x); return begin() + n; } void insert(iterator position, size_type n, const T &x) { if (position == end()) { resize(size() + n); std::fill(oneapi::dpl::execution::dpcpp_default, end() - n, end(), x); } else { auto i_n = std::distance(begin(), position); // allocate temporary storage Buffer tmp{Range(std::distance(position, end()))}; // copy remainder std::copy(oneapi::dpl::execution::dpcpp_default, position, end(), oneapi::dpl::begin(tmp)); resize(size() + n); // resizing might invalidate position position = begin() + position.get_idx(); std::fill(oneapi::dpl::execution::dpcpp_default, position, position + n, x); std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp), oneapi::dpl::end(tmp), position + n); } } template <typename InputIterator> void insert(iterator position, InputIterator first, typename std::enable_if<internal::is_iterator<InputIterator>::value, InputIterator>::type last) { auto n = std::distance(first, last); if (position == end()) { resize(size() + n); std::copy(oneapi::dpl::execution::dpcpp_default, first, last, end()); } else { Buffer tmp{Range(std::distance(position, end()))}; std::copy(oneapi::dpl::execution::dpcpp_default, position, end(), oneapi::dpl::begin(tmp)); resize(size() + n); // resizing might invalidate position position = begin() + position.get_idx(); std::copy(oneapi::dpl::execution::dpcpp_default, first, last, position); std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp), oneapi::dpl::end(tmp), position + n); } } }; #endif } // end namespace dpct #endif
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/dpcpp_extensions.h
//==---- dpcpp_extensions.h ------------------*- C++ -*---------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------===// #ifndef __DPCT_DPCPP_EXTENSIONS_H__ #define __DPCT_DPCPP_EXTENSIONS_H__ #include <sycl/sycl.hpp> #include <stdexcept> #ifdef SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS #include <sycl/ext/oneapi/experimental/user_defined_reductions.hpp> #endif #include "../dpct.hpp" namespace dpct { namespace group { namespace detail { template <typename... _Args> constexpr auto __reduce_over_group(_Args... __args) { return sycl::reduce_over_group(__args...); } template <typename... _Args> constexpr auto __group_broadcast(_Args... __args) { return sycl::group_broadcast(__args...); } template <typename... _Args> constexpr auto __exclusive_scan_over_group(_Args... __args) { return sycl::exclusive_scan_over_group(__args...); } template <typename... _Args> constexpr auto __inclusive_scan_over_group(_Args... __args) { return sycl::inclusive_scan_over_group(__args...); } } // end namespace detail /// Perform an exclusive scan over the values of inputs from all work-items in /// the group using the operator binary_op, which must be one of the SYCL 2020 /// group algorithms library function objects. /// /// \param item A work-item in a group. /// \param inputs Pointer to the input data for the scan operation. /// \param outputs Pointer to the location where scan results will be stored. /// \param init initial value of the scan result. /// \param binary_op functor that implements the binary operation used to /// perform the scan. template <typename Item, typename T, class BinaryOperation, int VALUES_PER_THREAD> __dpct_inline__ void exclusive_scan(const Item &item, T (&inputs)[VALUES_PER_THREAD], T (&outputs)[VALUES_PER_THREAD], T init, BinaryOperation binary_op) { T result = inputs[0]; #pragma unroll for (int i = 1; i < VALUES_PER_THREAD; ++i) { result = binary_op(result, inputs[i]); } T exclusive_result = detail::__exclusive_scan_over_group(item.get_group(), result, binary_op); T input = inputs[0]; if (item.get_local_linear_id() == 0) { outputs[0] = init; } else { outputs[0] = exclusive_result; } #pragma unroll for (int i = 1; i < VALUES_PER_THREAD; ++i) { T output = binary_op(input, outputs[i - 1]); input = inputs[i]; outputs[i] = output; } } /// Perform an exclusive scan over the values of input from all work-items in /// the group using the operator binary_op, which must be one of the SYCL 2020 /// group algorithms library function objects. /// /// \param item A work-item in a group. /// \param input Input data for the scan operation. /// \param init initial value of the scan result. /// \param binary_op functor that implements the binary operation used to /// perform the scan. \param group_aggregate group-wide aggregate of all inputs /// in the work-items of the group. \returns exclusive scan of the first i /// work-items where item is the i-th work item. template <typename Item, typename T, class BinaryOperation> __dpct_inline__ T exclusive_scan(const Item &item, T input, T init, BinaryOperation binary_op, T &group_aggregate) { T output = detail::__exclusive_scan_over_group(item.get_group(), input, init, binary_op); if (item.get_local_linear_id() == item.get_local_range().size() - 1) { group_aggregate = binary_op(output, input); } group_aggregate = detail::__group_broadcast( item.get_group(), group_aggregate, item.get_local_range().size() - 1); return output; } /// Perform an exclusive scan over the values of input from all work-items in /// the group using the operator binary_op, which must be one of the SYCL 2020 /// group algorithms library function objects. /// /// \param item A work-item in a group. /// \param input Input data for the scan operation. /// \param binary_op functor that implements the binary operation used to /// perform the scan. \param prefix_callback_op functor invoked by the first /// work-item in the group that returns the /// initial value in the resulting scan of the work-items in the group. /// \returns exclusive scan of the input elements assigned to work-items in the /// group. template <typename Item, typename T, class BinaryOperation, class GroupPrefixCallbackOperation> __dpct_inline__ T exclusive_scan(const Item &item, T input, BinaryOperation binary_op, GroupPrefixCallbackOperation &prefix_callback_op) { T group_aggregate; T output = detail::__exclusive_scan_over_group(item.get_group(), input, binary_op); if (item.get_local_linear_id() == item.get_local_range().size() - 1) { group_aggregate = binary_op(output, input); } group_aggregate = detail::__group_broadcast( item.get_group(), group_aggregate, item.get_local_range().size() - 1); T group_prefix = prefix_callback_op(group_aggregate); if (item.get_local_linear_id() == 0) { output = group_prefix; } else { output = binary_op(group_prefix, output); } return output; } namespace detail { typedef uint16_t digit_counter_type; typedef uint32_t packed_counter_type; template <int N, int CURRENT_VAL = N, int COUNT = 0> struct log2 { enum { VALUE = log2<N, (CURRENT_VAL >> 1), COUNT + 1>::VALUE }; }; template <int N, int COUNT> struct log2<N, 0, COUNT> { enum { VALUE = (1 << (COUNT - 1) < N) ? COUNT : COUNT - 1 }; }; __dpct_inline__ uint32_t bfe(uint32_t source, uint32_t bit_start, uint32_t num_bits) { const uint32_t MASK = (1 << num_bits) - 1; return (source >> bit_start) & MASK; } template <int RADIX_BITS, bool DESCENDING = false> class radix_rank { public: static size_t get_local_memory_size(size_t group_threads) { return group_threads * PADDED_COUNTER_LANES * sizeof(packed_counter_type); } radix_rank(uint8_t *local_memory) : _local_memory(local_memory) {} template <typename Item, int VALUES_PER_THREAD> __dpct_inline__ void rank_keys(const Item &item, uint32_t (&keys)[VALUES_PER_THREAD], int (&ranks)[VALUES_PER_THREAD], int current_bit, int num_bits) { digit_counter_type thread_prefixes[VALUES_PER_THREAD]; digit_counter_type *digit_counters[VALUES_PER_THREAD]; digit_counter_type *buffer = reinterpret_cast<digit_counter_type *>(_local_memory); reset_local_memory(item); item.barrier(sycl::access::fence_space::local_space); #pragma unroll for (int i = 0; i < VALUES_PER_THREAD; ++i) { uint32_t digit = bfe(keys[i], current_bit, num_bits); uint32_t sub_counter = digit >> LOG_COUNTER_LANES; uint32_t counter_lane = digit & (COUNTER_LANES - 1); if (DESCENDING) { sub_counter = PACKING_RATIO - 1 - sub_counter; counter_lane = COUNTER_LANES - 1 - counter_lane; } digit_counters[i] = &buffer[counter_lane * item.get_local_range().size() * PACKING_RATIO + item.get_local_linear_id() * PACKING_RATIO + sub_counter]; thread_prefixes[i] = *digit_counters[i]; *digit_counters[i] = thread_prefixes[i] + 1; } item.barrier(sycl::access::fence_space::local_space); scan_counters(item); item.barrier(sycl::access::fence_space::local_space); for (int i = 0; i < VALUES_PER_THREAD; ++i) { ranks[i] = thread_prefixes[i] + *digit_counters[i]; } } private: template <typename Item> __dpct_inline__ void reset_local_memory(const Item &item) { packed_counter_type *ptr = reinterpret_cast<packed_counter_type *>(_local_memory); #pragma unroll for (int i = 0; i < PADDED_COUNTER_LANES; ++i) { ptr[i * item.get_local_range().size() + item.get_local_linear_id()] = 0; } } template <typename Item> __dpct_inline__ packed_counter_type upsweep(const Item &item) { packed_counter_type sum = 0; packed_counter_type *ptr = reinterpret_cast<packed_counter_type *>(_local_memory); #pragma unroll for (int i = 0; i < PADDED_COUNTER_LANES; i++) { cached_segment[i] = ptr[item.get_local_linear_id() * PADDED_COUNTER_LANES + i]; } #pragma unroll for (int i = 0; i < PADDED_COUNTER_LANES; ++i) { sum += cached_segment[i]; } return sum; } template <typename Item> __dpct_inline__ void exclusive_downsweep(const Item &item, packed_counter_type raking_partial) { packed_counter_type *ptr = reinterpret_cast<packed_counter_type *>(_local_memory); packed_counter_type sum = raking_partial; #pragma unroll for (int i = 0; i < PADDED_COUNTER_LANES; ++i) { packed_counter_type value = cached_segment[i]; cached_segment[i] = sum; sum += value; } #pragma unroll for (int i = 0; i < PADDED_COUNTER_LANES; ++i) { ptr[item.get_local_linear_id() * PADDED_COUNTER_LANES + i] = cached_segment[i]; } } struct prefix_callback { __dpct_inline__ packed_counter_type operator()(packed_counter_type block_aggregate) { packed_counter_type block_prefix = 0; #pragma unroll for (int packed = 1; packed < PACKING_RATIO; packed++) { block_prefix += block_aggregate << (sizeof(digit_counter_type) * 8 * packed); } return block_prefix; } }; template <typename Item> __dpct_inline__ void scan_counters(const Item &item) { packed_counter_type raking_partial = upsweep(item); prefix_callback callback; packed_counter_type exclusive_partial = exclusive_scan( item, raking_partial, sycl::ext::oneapi::plus<packed_counter_type>(), callback); exclusive_downsweep(item, exclusive_partial); } private: static constexpr int PACKING_RATIO = sizeof(packed_counter_type) / sizeof(digit_counter_type); static constexpr int LOG_PACKING_RATIO = log2<PACKING_RATIO>::VALUE; static constexpr int LOG_COUNTER_LANES = RADIX_BITS - LOG_PACKING_RATIO; static constexpr int COUNTER_LANES = 1 << LOG_COUNTER_LANES; static constexpr int PADDED_COUNTER_LANES = COUNTER_LANES + 1; packed_counter_type cached_segment[PADDED_COUNTER_LANES]; uint8_t *_local_memory; }; template <typename T, typename U> struct base_traits { static __dpct_inline__ U twiddle_in(U key) { throw std::runtime_error("Not implemented"); } static __dpct_inline__ U twiddle_out(U key) { throw std::runtime_error("Not implemented"); } }; template <typename U> struct base_traits<uint32_t, U> { static __dpct_inline__ U twiddle_in(U key) { return key; } static __dpct_inline__ U twiddle_out(U key) { return key; } }; template <typename U> struct base_traits<int, U> { static constexpr U HIGH_BIT = U(1) << ((sizeof(U) * 8) - 1); static __dpct_inline__ U twiddle_in(U key) { return key ^ HIGH_BIT; } static __dpct_inline__ U twiddle_out(U key) { return key ^ HIGH_BIT; } }; template <typename U> struct base_traits<float, U> { static constexpr U HIGH_BIT = U(1) << ((sizeof(U) * 8) - 1); static __dpct_inline__ U twiddle_in(U key) { U mask = (key & HIGH_BIT) ? U(-1) : HIGH_BIT; return key ^ mask; } static __dpct_inline__ U twiddle_out(U key) { U mask = (key & HIGH_BIT) ? HIGH_BIT : U(-1); return key ^ mask; } }; template <typename T> struct traits : base_traits<T, T> {}; template <> struct traits<uint32_t> : base_traits<uint32_t, uint32_t> {}; template <> struct traits<int> : base_traits<int, uint32_t> {}; template <> struct traits<float> : base_traits<float, uint32_t> {}; } // namespace detail namespace detail { template <int N> struct power_of_two { enum { VALUE = ((N & (N - 1)) == 0) }; }; __dpct_inline__ uint32_t shr_add(uint32_t x, uint32_t shift, uint32_t addend) { return (x >> shift) + addend; } } // namespace detail /// Implements scatter to blocked exchange pattern used in radix sort algorithm. /// /// \tparam T type of the data elements exchanges /// \tparam VALUES_PER_THREAD number of data elements assigned to a thread template <typename T, int VALUES_PER_THREAD> class exchange { public: static size_t get_local_memory_size(size_t group_threads) { size_t padding_values = (INSERT_PADDING) ? ((group_threads * VALUES_PER_THREAD) >> LOG_LOCAL_MEMORY_BANKS) : 0; return (group_threads * VALUES_PER_THREAD + padding_values) * sizeof(T); } exchange(uint8_t *local_memory) : _local_memory(local_memory) {} /// Rearrange elements from rank order to blocked order template <typename Item> __dpct_inline__ void scatter_to_blocked(Item item, T (&keys)[VALUES_PER_THREAD], int (&ranks)[VALUES_PER_THREAD]) { T *buffer = reinterpret_cast<T *>(_local_memory); #pragma unroll for (int i = 0; i < VALUES_PER_THREAD; i++) { int offset = ranks[i]; if (INSERT_PADDING) offset = detail::shr_add(offset, LOG_LOCAL_MEMORY_BANKS, offset); buffer[offset] = keys[i]; } item.barrier(sycl::access::fence_space::local_space); #pragma unroll for (int i = 0; i < VALUES_PER_THREAD; i++) { int offset = (item.get_local_id(0) * VALUES_PER_THREAD) + i; if (INSERT_PADDING) offset = detail::shr_add(offset, LOG_LOCAL_MEMORY_BANKS, offset); keys[i] = buffer[offset]; } } private: static constexpr int LOG_LOCAL_MEMORY_BANKS = 5; static constexpr bool INSERT_PADDING = (VALUES_PER_THREAD > 4) && (detail::power_of_two<VALUES_PER_THREAD>::VALUE); uint8_t *_local_memory; }; /// Implements radix sort to sort integer data elements assigned to all threads /// in the group. /// /// \tparam T type of the data elements exchanges /// \tparam VALUES_PER_THREAD number of data elements assigned to a thread /// \tparam DECENDING boolean value indicating if data elements are sorted in /// decending order. template <typename T, int VALUES_PER_THREAD, bool DESCENDING = false> class radix_sort { public: static size_t get_local_memory_size(size_t group_threads) { size_t ranks_size = detail::radix_rank<RADIX_BITS>::get_local_memory_size(group_threads); size_t exchange_size = exchange<T, VALUES_PER_THREAD>::get_local_memory_size(group_threads); return sycl::max(ranks_size, exchange_size); } radix_sort(uint8_t *local_memory) : _local_memory(local_memory) {} template <typename Item> __dpct_inline__ void sort(const Item &item, T (&keys)[VALUES_PER_THREAD], int begin_bit = 0, int end_bit = 8 * sizeof(T)) { uint32_t(&unsigned_keys)[VALUES_PER_THREAD] = reinterpret_cast<uint32_t(&)[VALUES_PER_THREAD]>(keys); #pragma unroll for (int i = 0; i < VALUES_PER_THREAD; ++i) { unsigned_keys[i] = detail::traits<T>::twiddle_in(unsigned_keys[i]); } while (true) { int pass_bits = sycl::min(RADIX_BITS, end_bit - begin_bit); int ranks[VALUES_PER_THREAD]; detail::radix_rank<RADIX_BITS, DESCENDING>(_local_memory) .template rank_keys(item, unsigned_keys, ranks, begin_bit, pass_bits); begin_bit += RADIX_BITS; item.barrier(sycl::access::fence_space::local_space); exchange<T, VALUES_PER_THREAD>(_local_memory) .scatter_to_blocked(item, keys, ranks); item.barrier(sycl::access::fence_space::local_space); if (begin_bit >= end_bit) break; } #pragma unroll for (int i = 0; i < VALUES_PER_THREAD; ++i) { unsigned_keys[i] = detail::traits<T>::twiddle_out(unsigned_keys[i]); } } private: static constexpr int RADIX_BITS = 4; uint8_t *_local_memory; }; /// Perform a reduction of the data elements assigned to all threads in the /// group. /// /// \param item A work-item in a group. /// \param inputs Pointer to the input data for the reduce operation. /// \param binary_op functor that implements the binary operation used to /// perform the scan. \returns value of the reduction using binary_op template <typename Item, typename T, class BinaryOperation, int VALUES_PER_THREAD> __dpct_inline__ T reduce(Item item, T (&inputs)[VALUES_PER_THREAD], BinaryOperation binary_op) { T result = inputs[0]; #pragma unroll for (int i = 1; i < VALUES_PER_THREAD; i++) { result = binary_op(result, inputs[i]); } return detail::__reduce_over_group(item.get_group(), result, binary_op); } /// Perform a reduction on a limited number of the work items in a subgroup /// /// \param item A work-item in a group. /// \param value value per work item which is to be reduced /// \param items_to_reduce num work items at the start of the subgroup to reduce /// \param binary_op functor that implements the binary operation used to /// perform the scan. \returns value of the reduction using binary_op template <typename Item, typename T, class BinaryOperation> __dpct_inline__ typename ::std::enable_if_t<sycl::has_known_identity_v<BinaryOperation, T>, T> reduce_over_partial_group(const Item &item, const T &value, const ::std::uint16_t &items_to_reduce, BinaryOperation binary_op) { T value_temp = (item.get_local_linear_id() < items_to_reduce) ? value : sycl::known_identity_v<BinaryOperation, T>; return detail::__reduce_over_group(item.get_sub_group(), value_temp, binary_op); } /// Perform an inclusive scan over the values of inputs from all work-items in /// the group using the operator binary_op, which must be one of the SYCL 2020 /// group algorithms library function objects. /// /// \param item A work-item in a group. /// \param inputs Pointer to the input data for the scan operation. /// \param outputs Pointer to the location where scan results will be stored. /// \param binary_op functor that implements the binary operation used to /// perform the scan. \returns inclusive scan of the input elements assigned to /// work-items in the group. template <typename Item, typename T, class BinaryOperation, int VALUES_PER_THREAD> __dpct_inline__ void inclusive_scan(const Item &item, T (&inputs)[VALUES_PER_THREAD], T (&outputs)[VALUES_PER_THREAD], BinaryOperation binary_op) { T result = inputs[0]; #pragma unroll for (int i = 1; i < VALUES_PER_THREAD; ++i) { result = binary_op(result, inputs[i]); } T exclusive_result = detail::__exclusive_scan_over_group(item.get_group(), result, binary_op); if (item.get_local_linear_id() == 0) { outputs[0] = inputs[0]; } else { outputs[0] = binary_op(inputs[0], exclusive_result); } #pragma unroll for (int i = 1; i < VALUES_PER_THREAD; ++i) { outputs[i] = binary_op(inputs[i], outputs[i - 1]); } } /// Perform an inclusive scan over the values of inputs from all work-items in /// the group using the operator binary_op, which must be one of the SYCL 2020 /// group algorithms library function objects. /// /// \param item A work-item in a group. /// \param input Pointer to the input data for the scan operation. /// \param binary_op functor that implements the binary operation used to /// perform the scan. \param group_aggregate group-wide aggregate of all inputs /// in the work-items of the group. \returns inclusive scan of the input /// elements assigned to work-items in the group. template <typename Item, typename T, class BinaryOperation> __dpct_inline__ T inclusive_scan(const Item &item, T input, BinaryOperation binary_op, T &group_aggregate) { T output = detail::__inclusive_scan_over_group(item.get_group(), input, binary_op); if (item.get_local_linear_id() == item.get_local_range().size() - 1) { group_aggregate = output; } group_aggregate = detail::__group_broadcast( item.get_group(), group_aggregate, item.get_local_range().size() - 1); return output; } /// Perform an inclusive scan over the values of input from all work-items in /// the group using the operator binary_op, which must be one of the SYCL 2020 /// group algorithms library function objects. /// /// \param item A work-item in a group. /// \param input Input data for the scan operation. /// \param binary_op functor that implements the binary operation used to /// perform the scan. \param prefix_callback_op functor invoked by the first /// work-item in the group that returns the /// initial value in the resulting scan of the work-items in the group. /// \returns inclusive scan of the input elements assigned to work-items in the /// group. template <typename Item, typename T, class BinaryOperation, class GroupPrefixCallbackOperation> __dpct_inline__ T inclusive_scan(const Item &item, T input, BinaryOperation binary_op, GroupPrefixCallbackOperation &prefix_callback_op) { T group_aggregate; T output = inclusive_scan(item, input, binary_op, group_aggregate); T group_prefix = prefix_callback_op(group_aggregate); return binary_op(group_prefix, output); } } // namespace group namespace device { namespace detail { template <typename... _Args> constexpr auto __joint_reduce(_Args... __args) { return sycl::joint_reduce(__args...); } } // namespace detail /// Perform a reduce on each of the segments specified within data stored on /// the device. /// /// \param queue Command queue used to access device used for reduction /// \param inputs Pointer to the data elements on the device to be reduced /// \param outputs Pointer to the storage where the reduced value for each /// segment will be stored \param segment_count number of segments to be reduced /// \param begin_offsets Pointer to the set of indices that are the first /// element in each segment \param end_offsets Pointer to the set of indices /// that are one past the last element in each segment \param binary_op functor /// that implements the binary operation used to perform the scan. \param init /// initial value of the reduction for each segment. template <int GROUP_SIZE, typename T, typename OffsetT, class BinaryOperation> void segmented_reduce(sycl::queue queue, T *inputs, T *outputs, size_t segment_count, OffsetT *begin_offsets, OffsetT *end_offsets, BinaryOperation binary_op, T init) { sycl::range<1> global_size(segment_count * GROUP_SIZE); sycl::range<1> local_size(GROUP_SIZE); queue.submit([&](sycl::handler &cgh) { cgh.parallel_for( sycl::nd_range<1>(global_size, local_size), [=](sycl::nd_item<1> item) { OffsetT segment_begin = begin_offsets[item.get_group_linear_id()]; OffsetT segment_end = end_offsets[item.get_group_linear_id()]; if (segment_begin == segment_end) { if (item.get_local_linear_id() == 0) { outputs[item.get_group_linear_id()] = init; } return; } sycl::multi_ptr<T, sycl::access::address_space::global_space> input_ptr = inputs; T group_aggregate = detail::__joint_reduce( item.get_group(), input_ptr + segment_begin, input_ptr + segment_end, init, binary_op); if (item.get_local_linear_id() == 0) { outputs[item.get_group_linear_id()] = group_aggregate; } }); }); } #ifdef SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS namespace experimental { namespace detail { template <typename _Tp, typename... _Ts> struct __is_any { constexpr static bool value = std::disjunction_v< std::is_same<std::remove_cv_t<_Tp>, std::remove_cv_t<_Ts>>...>; }; template <typename _Tp, typename _Bp> struct __in_native_op_list { constexpr static bool value = __is_any<_Bp, sycl::plus<_Tp>, sycl::bit_or<_Tp>, sycl::bit_xor<_Tp>, sycl::bit_and<_Tp>, sycl::maximum<_Tp>, sycl::minimum<_Tp>, sycl::multiplies<_Tp>>::value; }; template <typename _Tp, typename _Bp> struct __is_native_op { constexpr static bool value = __in_native_op_list<_Tp, _Bp>::value || __in_native_op_list<void, _Bp>::value; }; } // namespace detail /// Perform a reduce on each of the segments specified within data stored on /// the device. Compared with dpct::device::segmented_reduce, this experimental /// feature support user define reductions. /// /// \param queue Command queue used to access device used for reduction /// \param inputs Pointer to the data elements on the device to be reduced /// \param outputs Pointer to the storage where the reduced value for each /// segment will be stored \param segment_count number of segments to be reduced /// \param begin_offsets Pointer to the set of indices that are the first /// element in each segment \param end_offsets Pointer to the set of indices /// that are one past the last element in each segment \param binary_op functor /// that implements the binary operation used to perform the scan. \param init /// initial value of the reduction for each segment. template <int GROUP_SIZE, typename T, typename OffsetT, class BinaryOperation> void segmented_reduce(sycl::queue queue, T *inputs, T *outputs, size_t segment_count, OffsetT *begin_offsets, OffsetT *end_offsets, BinaryOperation binary_op, T init) { sycl::range<1> global_size(segment_count * GROUP_SIZE); sycl::range<1> local_size(GROUP_SIZE); if constexpr (!detail::__is_native_op<T, BinaryOperation>::value) { queue.submit([&](sycl::handler &cgh) { size_t temp_memory_size = GROUP_SIZE * sizeof(T); auto scratch = sycl::local_accessor<std::byte, 1>(temp_memory_size, cgh); cgh.parallel_for( sycl::nd_range<1>(global_size, local_size), [=](sycl::nd_item<1> item) { OffsetT segment_begin = begin_offsets[item.get_group_linear_id()]; OffsetT segment_end = end_offsets[item.get_group_linear_id()]; if (segment_begin == segment_end) { if (item.get_local_linear_id() == 0) { outputs[item.get_group_linear_id()] = init; } return; } // Create a handle that associates the group with an allocation it // can use auto handle = sycl::ext::oneapi::experimental::group_with_scratchpad( item.get_group(), sycl::span(&scratch[0], temp_memory_size)); T group_aggregate = sycl::ext::oneapi::experimental::joint_reduce( handle, inputs + segment_begin, inputs + segment_end, init, binary_op); if (item.get_local_linear_id() == 0) { outputs[item.get_group_linear_id()] = group_aggregate; } }); }); } else { dpct::device::segmented_reduce<GROUP_SIZE>(queue, inputs, outputs, segment_count, begin_offsets, end_offsets, binary_op, init); } } } // namespace experimental #endif // SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS } // namespace device } // namespace dpct #endif
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/functional.h
//==---- functional.h -----------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_FUNCTIONAL_H__ #define __DPCT_FUNCTIONAL_H__ #include <functional> #include <oneapi/dpl/functional> #include <oneapi/dpl/iterator> #if ONEDPL_USE_DPCPP_BACKEND #include <oneapi/dpl/pstl/hetero/dpcpp/parallel_backend_sycl_utils.h> #endif #include <tuple> #include <utility> namespace dpct { struct null_type {}; namespace internal { template <class _ExecPolicy, class _T> using enable_if_execution_policy = typename std::enable_if<oneapi::dpl::execution::is_execution_policy< typename std::decay<_ExecPolicy>::type>::value, _T>::type; template <typename _T> struct is_hetero_execution_policy : ::std::false_type {}; template <typename... PolicyParams> struct is_hetero_execution_policy< oneapi::dpl::execution::device_policy<PolicyParams...>> : ::std::true_type { }; template <typename _T> struct is_fpga_execution_policy : ::std::false_type {}; #if _ONEDPL_FPGA_DEVICE template <unsigned int unroll_factor, typename... PolicyParams> struct is_hetero_execution_policy< execution::fpga_policy<unroll_factor, PolicyParams...>> : ::std::true_type { }; #endif template <class _ExecPolicy, class _T> using enable_if_hetero_execution_policy = typename std::enable_if< is_hetero_execution_policy<typename std::decay<_ExecPolicy>::type>::value, _T>::type; #if _ONEDPL_CPP14_INTEGER_SEQUENCE_PRESENT template <std::size_t... _Sp> using index_sequence = std::index_sequence<_Sp...>; template <std::size_t _Np> using make_index_sequence = std::make_index_sequence<_Np>; #else template <std::size_t... _Sp> class index_sequence {}; template <std::size_t _Np, std::size_t... _Sp> struct make_index_sequence_impl : make_index_sequence_impl<_Np - 1, _Np - 1, _Sp...> {}; template <std::size_t... _Sp> struct make_index_sequence_impl<0, _Sp...> { using type = index_sequence<_Sp...>; }; template <std::size_t _Np> using make_index_sequence = typename make_index_sequence_impl<_Np>::type; #endif // Minimal buffer implementations for temporary storage in mapping rules // Some of our algorithms need to start with raw memory buffer, // not an initialized array, because initialization/destruction // would make the span be at least O(N). #if ONEDPL_USE_DPCPP_BACKEND template <typename _Tp> class __buffer { sycl::buffer<_Tp, 1> __buf; __buffer(const __buffer &) = delete; void operator=(const __buffer &) = delete; public: // Try to obtain buffer of given size to store objects of _Tp type __buffer(std::size_t __n) : __buf(sycl::range<1>(__n)) {} // Return pointer to buffer, or NULL if buffer could not be obtained. auto get() -> decltype(oneapi::dpl::begin(__buf)) const { return oneapi::dpl::begin(__buf); } }; #else template <typename _Tp> class __buffer { std::unique_ptr<_Tp> _M_ptr; __buffer(const __buffer &) = delete; void operator=(const __buffer &) = delete; public: // Try to obtain buffer of given size to store objects of _Tp type __buffer(const std::size_t __n) : _M_ptr(new _Tp[__n]) {} // Return pointer to buffer, or NULL if buffer could not be obtained. _Tp *get() const { return _M_ptr.get(); } }; #endif // Implements C++14 std::less<void> specialization to allow parameter type // deduction. class __less { public: template <typename _Xp, typename _Yp> bool operator()(_Xp &&__x, _Yp &&__y) const { return std::forward<_Xp>(__x) < std::forward<_Yp>(__y); } }; template <typename Policy, typename NewName> struct rebind_policy { using type = Policy; }; template <typename KernelName, typename NewName> struct rebind_policy<oneapi::dpl::execution::device_policy<KernelName>, NewName> { using type = oneapi::dpl::execution::device_policy<NewName>; }; #if _ONEDPL_FPGA_DEVICE template <unsigned int factor, typename KernelName, typename NewName> struct rebind_policy<oneapi::dpl::execution::fpga_policy<factor, KernelName>, NewName> { using type = oneapi::dpl::execution::fpga_policy<factor, NewName>; }; #endif template <typename T1, typename T2, typename R1 = typename std::iterator_traits<T1>::reference, typename R2 = typename std::iterator_traits<T2>::reference> struct perm_fun { typedef R2 result_of; perm_fun(T1 input) : source(input) {} R2 operator()(R1 x) const { return *(source + x); } private: T1 source; }; // Functor compares first element (key) from tied sequence. template <typename Compare = class internal::__less> struct compare_key_fun { typedef bool result_of; compare_key_fun(Compare _comp = internal::__less()) : comp(_comp) {} template <typename _T1, typename _T2> result_of operator()(_T1 &&a, _T2 &&b) const { using std::get; return comp(get<0>(a), get<0>(b)); } private: mutable Compare comp; }; // Functor evaluates second element of tied sequence with predicate. // Used by: copy_if, remove_copy_if, stable_partition_copy // Lambda: template <typename Predicate> struct predicate_key_fun { typedef bool result_of; predicate_key_fun(Predicate _pred) : pred(_pred) {} template <typename _T1> result_of operator()(_T1 &&a) const { using std::get; return pred(get<1>(a)); } private: mutable Predicate pred; }; // Used by: remove_if template <typename Predicate> struct negate_predicate_key_fun { typedef bool result_of; negate_predicate_key_fun(Predicate _pred) : pred(_pred) {} template <typename _T1> result_of operator()(_T1 &&a) const { using std::get; return !pred(get<1>(a)); } private: mutable Predicate pred; }; template <typename T> struct sequence_fun { using result_type = T; sequence_fun(T _init, T _step) : init(_init), step(_step) {} template <typename _T> result_type operator()(_T &&i) const { return static_cast<T>(init + step * i); } private: const T init; const T step; }; //[binary_pred](Ref a, Ref b){ return(binary_pred(get<0>(a),get<0>(b))); template <typename Predicate> struct unique_fun { typedef bool result_of; unique_fun(Predicate _pred) : pred(_pred) {} template <typename _T> result_of operator()(_T &&a, _T &&b) const { using std::get; return pred(get<0>(a), get<0>(b)); } private: mutable Predicate pred; }; // Lambda: [pred, &new_value](Ref1 a, Ref2 s) {return pred(s) ? new_value : a; // }); template <typename T, typename Predicate> struct replace_if_fun { public: typedef T result_of; replace_if_fun(Predicate _pred, T _new_value) : pred(_pred), new_value(_new_value) {} template <typename _T1, typename _T2> T operator()(_T1 &&a, _T2 &&s) const { return pred(s) ? new_value : a; } private: mutable Predicate pred; const T new_value; }; //[pred,op](Ref a){return pred(a) ? op(a) : a; } template <typename T, typename Predicate, typename Operator> struct transform_if_fun { transform_if_fun(Predicate _pred, Operator _op) : pred(_pred), op(_op) {} template <typename _T> void operator()(_T&& t) const { using std::get; if (pred(get<0>(t))) get<1>(t) = op(get<0>(t)); } private: mutable Predicate pred; mutable Operator op; }; //[pred, op](Ref1 a, Ref2 s) { return pred(s) ? op(a) : a; }); template <typename T, typename Predicate, typename Operator> struct transform_if_unary_zip_mask_fun { transform_if_unary_zip_mask_fun(Predicate _pred, Operator _op) : pred(_pred), op(_op) {} template <typename _T> void operator()(_T&& t) const { using std::get; if (pred(get<1>(t))) get<2>(t) = op(get<0>(t)); } private: mutable Predicate pred; mutable Operator op; }; template <typename T, typename Predicate, typename BinaryOperation> class transform_if_zip_mask_fun { public: transform_if_zip_mask_fun(Predicate _pred = oneapi::dpl::identity(), BinaryOperation _op = oneapi::dpl::identity()) : pred(_pred), op(_op) {} template <typename _T> void operator()(_T &&t) const { using std::get; if (pred(get<2>(t))) get<3>(t) = op(get<0>(t), get<1>(t)); } private: mutable Predicate pred; mutable BinaryOperation op; }; // This following code is similar to a section of code in // oneDPL/include/oneapi/dpl/pstl/hetero/dpcpp/parallel_backend_sycl_radix_sort.h // It has a similar approach, and could be consolidated. // Outside of some differences in approach, there are two significant // differences in function. // // 1) This code allows the output type of the bit range translation to be fit // into to the minimal type required to provide that many bits. The code in // oneDPL to calculate the bucket for the radix is similar but its output is // always std::uint32_t. The assumption that the bit range desired will fit in // 32 bits is not true for this code. // // 2) This code ensures that for floating point type, -0.0f and 0.0f map to the // same value. This allows the output of this translation to be used to provide // a sort which ensures the stability of these values for floating point types. template <int N> struct uint_byte_map {}; template <> struct uint_byte_map<1> { using type = uint8_t; }; template <> struct uint_byte_map<2> { using type = uint16_t; }; template <> struct uint_byte_map<4> { using type = uint32_t; }; template <> struct uint_byte_map<8> { using type = uint64_t; }; template <typename T> struct uint_map { using type = typename uint_byte_map<sizeof(T)>::type; }; template <typename T, typename OutKeyT> class translate_key { using uint_type_t = typename uint_map<T>::type; public: translate_key(int begin_bit, int end_bit) { shift = begin_bit; mask = ~OutKeyT(0); // all ones mask = mask >> (sizeof(OutKeyT) * 8 - (end_bit - begin_bit)); // setup appropriate mask flip_sign = uint_type_t(1) << (sizeof(uint_type_t) * 8 - 1); // sign bit flip_key = ~uint_type_t(0); // 0xF...F } inline OutKeyT operator()(const T &key) const { uint_type_t intermediate; if constexpr (std::is_floating_point<T>::value) { // normal case (both -0.0f and 0.0f equal -0.0f) if (key != T(-0.0f)) { uint_type_t is_negative = reinterpret_cast<const uint_type_t &>(key) >> (sizeof(uint_type_t) * 8 - 1); intermediate = reinterpret_cast<const uint_type_t &>(key) ^ ((is_negative * flip_key) | flip_sign); } else // special case for -0.0f to keep stability with 0.0f { T negzero = T(-0.0f); intermediate = reinterpret_cast<const uint_type_t &>(negzero); } } else if constexpr (std::is_signed<T>::value) { intermediate = reinterpret_cast<const uint_type_t &>(key) ^ flip_sign; } else { intermediate = key; } return static_cast<OutKeyT>(intermediate >> shift) & mask; // shift, cast, and mask } private: uint8_t shift; OutKeyT mask; uint_type_t flip_sign; uint_type_t flip_key; }; } // end namespace internal } // end namespace dpct #endif
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/Common/helper_timer.h
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Helper Timing Functions #ifndef COMMON_HELPER_TIMER_H_ #define COMMON_HELPER_TIMER_H_ #ifndef EXIT_WAIVED #define EXIT_WAIVED 2 #endif // includes, system #include <vector> // includes, project #include <exception.h> // Definition of the StopWatch Interface, this is used if we don't want to use // the CUT functions But rather in a self contained class interface class StopWatchInterface { public: StopWatchInterface() {} virtual ~StopWatchInterface() {} public: //! Start time measurement virtual void start() = 0; //! Stop time measurement virtual void stop() = 0; //! Reset time counters to zero virtual void reset() = 0; //! Time in msec. after start. If the stop watch is still running (i.e. there //! was no call to stop()) then the elapsed time is returned, otherwise the //! time between the last start() and stop call is returned virtual float getTime() = 0; //! Mean time to date based on the number of times the stopwatch has been //! _stopped_ (ie finished sessions) and the current total time virtual float getAverageTime() = 0; }; ////////////////////////////////////////////////////////////////// // Begin Stopwatch timer class definitions for all OS platforms // ////////////////////////////////////////////////////////////////// #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) // includes, system #define WINDOWS_LEAN_AND_MEAN #include <windows.h> #undef min #undef max //! Windows specific implementation of StopWatch class StopWatchWin : public StopWatchInterface { public: //! Constructor, default StopWatchWin() : start_time(), end_time(), diff_time(0.0f), total_time(0.0f), running(false), clock_sessions(0), freq(0), freq_set(false) { if (!freq_set) { // helper variable LARGE_INTEGER temp; // get the tick frequency from the OS QueryPerformanceFrequency(reinterpret_cast<LARGE_INTEGER *>(&temp)); // convert to type in which it is needed freq = (static_cast<double>(temp.QuadPart)) / 1000.0; // rememeber query freq_set = true; } } // Destructor ~StopWatchWin() {} public: //! Start time measurement inline void start(); //! Stop time measurement inline void stop(); //! Reset time counters to zero inline void reset(); //! Time in msec. after start. If the stop watch is still running (i.e. there //! was no call to stop()) then the elapsed time is returned, otherwise the //! time between the last start() and stop call is returned inline float getTime(); //! Mean time to date based on the number of times the stopwatch has been //! _stopped_ (ie finished sessions) and the current total time inline float getAverageTime(); private: // member variables //! Start of measurement LARGE_INTEGER start_time; //! End of measurement LARGE_INTEGER end_time; //! Time difference between the last start and stop float diff_time; //! TOTAL time difference between starts and stops float total_time; //! flag if the stop watch is running bool running; //! Number of times clock has been started //! and stopped to allow averaging int clock_sessions; //! tick frequency double freq; //! flag if the frequency has been set bool freq_set; }; // functions, inlined //////////////////////////////////////////////////////////////////////////////// //! Start time measurement //////////////////////////////////////////////////////////////////////////////// inline void StopWatchWin::start() { QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time)); running = true; } //////////////////////////////////////////////////////////////////////////////// //! Stop time measurement and increment add to the current diff_time summation //! variable. Also increment the number of times this clock has been run. //////////////////////////////////////////////////////////////////////////////// inline void StopWatchWin::stop() { QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&end_time)); diff_time = static_cast<float>(((static_cast<double>(end_time.QuadPart) - static_cast<double>(start_time.QuadPart)) / freq)); total_time += diff_time; clock_sessions++; running = false; } //////////////////////////////////////////////////////////////////////////////// //! Reset the timer to 0. Does not change the timer running state but does //! recapture this point in time as the current start time if it is running. //////////////////////////////////////////////////////////////////////////////// inline void StopWatchWin::reset() { diff_time = 0; total_time = 0; clock_sessions = 0; if (running) { QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time)); } } //////////////////////////////////////////////////////////////////////////////// //! Time in msec. after start. If the stop watch is still running (i.e. there //! was no call to stop()) then the elapsed time is returned added to the //! current diff_time sum, otherwise the current summed time difference alone //! is returned. //////////////////////////////////////////////////////////////////////////////// inline float StopWatchWin::getTime() { // Return the TOTAL time to date float retval = total_time; if (running) { LARGE_INTEGER temp; QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&temp)); retval += static_cast<float>(((static_cast<double>(temp.QuadPart) - static_cast<double>(start_time.QuadPart)) / freq)); } return retval; } //////////////////////////////////////////////////////////////////////////////// //! Time in msec. for a single run based on the total number of COMPLETED runs //! and the total time. //////////////////////////////////////////////////////////////////////////////// inline float StopWatchWin::getAverageTime() { return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f; } #else // Declarations for Stopwatch on Linux and Mac OSX // includes, system #include <sys/time.h> #include <ctime> //! Windows specific implementation of StopWatch class StopWatchLinux : public StopWatchInterface { public: //! Constructor, default StopWatchLinux() : start_time(), diff_time(0.0), total_time(0.0), running(false), clock_sessions(0) {} // Destructor virtual ~StopWatchLinux() {} public: //! Start time measurement inline void start(); //! Stop time measurement inline void stop(); //! Reset time counters to zero inline void reset(); //! Time in msec. after start. If the stop watch is still running (i.e. there //! was no call to stop()) then the elapsed time is returned, otherwise the //! time between the last start() and stop call is returned inline float getTime(); //! Mean time to date based on the number of times the stopwatch has been //! _stopped_ (ie finished sessions) and the current total time inline float getAverageTime(); private: // helper functions //! Get difference between start time and current time inline float getDiffTime(); private: // member variables //! Start of measurement struct timeval start_time; //! Time difference between the last start and stop float diff_time; //! TOTAL time difference between starts and stops float total_time; //! flag if the stop watch is running bool running; //! Number of times clock has been started //! and stopped to allow averaging int clock_sessions; }; // functions, inlined //////////////////////////////////////////////////////////////////////////////// //! Start time measurement //////////////////////////////////////////////////////////////////////////////// inline void StopWatchLinux::start() { gettimeofday(&start_time, 0); running = true; } //////////////////////////////////////////////////////////////////////////////// //! Stop time measurement and increment add to the current diff_time summation //! variable. Also increment the number of times this clock has been run. //////////////////////////////////////////////////////////////////////////////// inline void StopWatchLinux::stop() { diff_time = getDiffTime(); total_time += diff_time; running = false; clock_sessions++; } //////////////////////////////////////////////////////////////////////////////// //! Reset the timer to 0. Does not change the timer running state but does //! recapture this point in time as the current start time if it is running. //////////////////////////////////////////////////////////////////////////////// inline void StopWatchLinux::reset() { diff_time = 0; total_time = 0; clock_sessions = 0; if (running) { gettimeofday(&start_time, 0); } } //////////////////////////////////////////////////////////////////////////////// //! Time in msec. after start. If the stop watch is still running (i.e. there //! was no call to stop()) then the elapsed time is returned added to the //! current diff_time sum, otherwise the current summed time difference alone //! is returned. //////////////////////////////////////////////////////////////////////////////// inline float StopWatchLinux::getTime() { // Return the TOTAL time to date float retval = total_time; if (running) { retval += getDiffTime(); } return retval; } //////////////////////////////////////////////////////////////////////////////// //! Time in msec. for a single run based on the total number of COMPLETED runs //! and the total time. //////////////////////////////////////////////////////////////////////////////// inline float StopWatchLinux::getAverageTime() { return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f; } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// inline float StopWatchLinux::getDiffTime() { struct timeval t_time; gettimeofday(&t_time, 0); // time difference in milli-seconds return static_cast<float>(1000.0 * (t_time.tv_sec - start_time.tv_sec) + (0.001 * (t_time.tv_usec - start_time.tv_usec))); } #endif // WIN32 //////////////////////////////////////////////////////////////////////////////// //! Timer functionality exported //////////////////////////////////////////////////////////////////////////////// //! Create a new timer //! @return true if a time has been created, otherwise false //! @param name of the new timer, 0 if the creation failed //////////////////////////////////////////////////////////////////////////////// inline bool sdkCreateTimer(StopWatchInterface **timer_interface) { // printf("sdkCreateTimer called object %08x\n", (void *)*timer_interface); #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) *timer_interface = reinterpret_cast<StopWatchInterface *>(new StopWatchWin()); #else *timer_interface = reinterpret_cast<StopWatchInterface *>(new StopWatchLinux()); #endif return (*timer_interface != NULL) ? true : false; } //////////////////////////////////////////////////////////////////////////////// //! Delete a timer //! @return true if a time has been deleted, otherwise false //! @param name of the timer to delete //////////////////////////////////////////////////////////////////////////////// inline bool sdkDeleteTimer(StopWatchInterface **timer_interface) { // printf("sdkDeleteTimer called object %08x\n", (void *)*timer_interface); if (*timer_interface) { delete *timer_interface; *timer_interface = NULL; } return true; } //////////////////////////////////////////////////////////////////////////////// //! Start the time with name \a name //! @param name name of the timer to start //////////////////////////////////////////////////////////////////////////////// inline bool sdkStartTimer(StopWatchInterface **timer_interface) { // printf("sdkStartTimer called object %08x\n", (void *)*timer_interface); if (*timer_interface) { (*timer_interface)->start(); } return true; } //////////////////////////////////////////////////////////////////////////////// //! Stop the time with name \a name. Does not reset. //! @param name name of the timer to stop //////////////////////////////////////////////////////////////////////////////// inline bool sdkStopTimer(StopWatchInterface **timer_interface) { // printf("sdkStopTimer called object %08x\n", (void *)*timer_interface); if (*timer_interface) { (*timer_interface)->stop(); } return true; } //////////////////////////////////////////////////////////////////////////////// //! Resets the timer's counter. //! @param name name of the timer to reset. //////////////////////////////////////////////////////////////////////////////// inline bool sdkResetTimer(StopWatchInterface **timer_interface) { // printf("sdkResetTimer called object %08x\n", (void *)*timer_interface); if (*timer_interface) { (*timer_interface)->reset(); } return true; } //////////////////////////////////////////////////////////////////////////////// //! Return the average time for timer execution as the total time //! for the timer dividied by the number of completed (stopped) runs the timer //! has made. //! Excludes the current running time if the timer is currently running. //! @param name name of the timer to return the time of //////////////////////////////////////////////////////////////////////////////// inline float sdkGetAverageTimerValue(StopWatchInterface **timer_interface) { // printf("sdkGetAverageTimerValue called object %08x\n", (void // *)*timer_interface); if (*timer_interface) { return (*timer_interface)->getAverageTime(); } else { return 0.0f; } } //////////////////////////////////////////////////////////////////////////////// //! Total execution time for the timer over all runs since the last reset //! or timer creation. //! @param name name of the timer to obtain the value of. //////////////////////////////////////////////////////////////////////////////// inline float sdkGetTimerValue(StopWatchInterface **timer_interface) { // printf("sdkGetTimerValue called object %08x\n", (void *)*timer_interface); if (*timer_interface) { return (*timer_interface)->getTime(); } else { return 0.0f; } } #endif // COMMON_HELPER_TIMER_H_
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/Common/helper_string.h
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // These are helper functions for the SDK samples (string parsing, timers, etc) #ifndef COMMON_HELPER_STRING_H_ #define COMMON_HELPER_STRING_H_ #include <stdio.h> #include <stdlib.h> #include <fstream> #include <string> #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) #ifndef _CRT_SECURE_NO_DEPRECATE #define _CRT_SECURE_NO_DEPRECATE #endif #ifndef STRCASECMP #define STRCASECMP _stricmp #endif #ifndef STRNCASECMP #define STRNCASECMP _strnicmp #endif #ifndef STRCPY #define STRCPY(sFilePath, nLength, sPath) strcpy_s(sFilePath, nLength, sPath) #endif #ifndef FOPEN #define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode) #endif #ifndef FOPEN_FAIL #define FOPEN_FAIL(result) (result != 0) #endif #ifndef SSCANF #define SSCANF sscanf_s #endif #ifndef SPRINTF #define SPRINTF sprintf_s #endif #else // Linux Includes #include <string.h> #include <strings.h> #ifndef STRCASECMP #define STRCASECMP strcasecmp #endif #ifndef STRNCASECMP #define STRNCASECMP strncasecmp #endif #ifndef STRCPY #define STRCPY(sFilePath, nLength, sPath) strcpy(sFilePath, sPath) #endif #ifndef FOPEN #define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode)) #endif #ifndef FOPEN_FAIL #define FOPEN_FAIL(result) (result == NULL) #endif #ifndef SSCANF #define SSCANF sscanf #endif #ifndef SPRINTF #define SPRINTF sprintf #endif #endif #ifndef EXIT_WAIVED #define EXIT_WAIVED 2 #endif // CUDA Utility Helper Functions inline int stringRemoveDelimiter(char delimiter, const char *string) { int string_start = 0; while (string[string_start] == delimiter) { string_start++; } if (string_start >= static_cast<int>(strlen(string) - 1)) { return 0; } return string_start; } inline int getFileExtension(char *filename, char **extension) { int string_length = static_cast<int>(strlen(filename)); while (filename[string_length--] != '.') { if (string_length == 0) break; } if (string_length > 0) string_length += 2; if (string_length == 0) *extension = NULL; else *extension = &filename[string_length]; return string_length; } inline bool checkCmdLineFlag(const int argc, const char **argv, const char *string_ref) { bool bFound = false; if (argc >= 1) { for (int i = 1; i < argc; i++) { int string_start = stringRemoveDelimiter('-', argv[i]); const char *string_argv = &argv[i][string_start]; const char *equal_pos = strchr(string_argv, '='); int argv_length = static_cast<int>( equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv); int length = static_cast<int>(strlen(string_ref)); if (length == argv_length && !STRNCASECMP(string_argv, string_ref, length)) { bFound = true; continue; } } } return bFound; } // This function wraps the CUDA Driver API into a template function template <class T> inline bool getCmdLineArgumentValue(const int argc, const char **argv, const char *string_ref, T *value) { bool bFound = false; if (argc >= 1) { for (int i = 1; i < argc; i++) { int string_start = stringRemoveDelimiter('-', argv[i]); const char *string_argv = &argv[i][string_start]; int length = static_cast<int>(strlen(string_ref)); if (!STRNCASECMP(string_argv, string_ref, length)) { if (length + 1 <= static_cast<int>(strlen(string_argv))) { int auto_inc = (string_argv[length] == '=') ? 1 : 0; *value = (T)atoi(&string_argv[length + auto_inc]); } bFound = true; i = argc; } } } return bFound; } inline int getCmdLineArgumentInt(const int argc, const char **argv, const char *string_ref) { bool bFound = false; int value = -1; if (argc >= 1) { for (int i = 1; i < argc; i++) { int string_start = stringRemoveDelimiter('-', argv[i]); const char *string_argv = &argv[i][string_start]; int length = static_cast<int>(strlen(string_ref)); if (!STRNCASECMP(string_argv, string_ref, length)) { if (length + 1 <= static_cast<int>(strlen(string_argv))) { int auto_inc = (string_argv[length] == '=') ? 1 : 0; value = atoi(&string_argv[length + auto_inc]); } else { value = 0; } bFound = true; continue; } } } if (bFound) { return value; } else { return 0; } } inline float getCmdLineArgumentFloat(const int argc, const char **argv, const char *string_ref) { bool bFound = false; float value = -1; if (argc >= 1) { for (int i = 1; i < argc; i++) { int string_start = stringRemoveDelimiter('-', argv[i]); const char *string_argv = &argv[i][string_start]; int length = static_cast<int>(strlen(string_ref)); if (!STRNCASECMP(string_argv, string_ref, length)) { if (length + 1 <= static_cast<int>(strlen(string_argv))) { int auto_inc = (string_argv[length] == '=') ? 1 : 0; value = static_cast<float>(atof(&string_argv[length + auto_inc])); } else { value = 0.f; } bFound = true; continue; } } } if (bFound) { return value; } else { return 0; } } inline bool getCmdLineArgumentString(const int argc, const char **argv, const char *string_ref, char **string_retval) { bool bFound = false; if (argc >= 1) { for (int i = 1; i < argc; i++) { int string_start = stringRemoveDelimiter('-', argv[i]); char *string_argv = const_cast<char *>(&argv[i][string_start]); int length = static_cast<int>(strlen(string_ref)); if (!STRNCASECMP(string_argv, string_ref, length)) { *string_retval = &string_argv[length + 1]; bFound = true; continue; } } } if (!bFound) { *string_retval = NULL; } return bFound; } ////////////////////////////////////////////////////////////////////////////// //! Find the path for a file assuming that //! files are found in the searchPath. //! //! @return the path if succeeded, otherwise 0 //! @param filename name of the file //! @param executable_path optional absolute path of the executable ////////////////////////////////////////////////////////////////////////////// inline char *sdkFindFilePath(const char *filename, const char *executable_path) { // <executable_name> defines a variable that is replaced with the name of the // executable // Typical relative search paths to locate needed companion files (e.g. sample // input data, or JIT source files) The origin for the relative search may be // the .exe file, a .bat file launching an .exe, a browser .exe launching the // .exe or .bat, etc const char *searchPath[] = { "./", // same dir "./data/", // same dir "../../../../Samples/<executable_name>/", // up 4 in tree "../../../Samples/<executable_name>/", // up 3 in tree "../../Samples/<executable_name>/", // up 2 in tree "../../../../Samples/<executable_name>/data/", // up 4 in tree "../../../Samples/<executable_name>/data/", // up 3 in tree "../../Samples/<executable_name>/data/", // up 2 in tree "../../../../Samples/0_Introduction/<executable_name>/", // up 4 in tree "../../../Samples/0_Introduction/<executable_name>/", // up 3 in tree "../../Samples/0_Introduction/<executable_name>/", // up 2 in tree "../../../../Samples/1_Utilities/<executable_name>/", // up 4 in tree "../../../Samples/1_Utilities/<executable_name>/", // up 3 in tree "../../Samples/1_Utilities/<executable_name>/", // up 2 in tree "../../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 4 in tree "../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 3 in tree "../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 2 in tree "../../../../Samples/3_CUDA_Features/<executable_name>/", // up 4 in tree "../../../Samples/3_CUDA_Features/<executable_name>/", // up 3 in tree "../../Samples/3_CUDA_Features/<executable_name>/", // up 2 in tree "../../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 4 in tree "../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 3 in tree "../../Samples/4_CUDA_Libraries/<executable_name>/", // up 2 in tree "../../../../Samples/5_Domain_Specific/<executable_name>/", // up 4 in tree "../../../Samples/5_Domain_Specific/<executable_name>/", // up 3 in tree "../../Samples/5_Domain_Specific/<executable_name>/", // up 2 in tree "../../../../Samples/6_Performance/<executable_name>/", // up 4 in tree "../../../Samples/6_Performance/<executable_name>/", // up 3 in tree "../../Samples/6_Performance/<executable_name>/", // up 2 in tree "../../../../Samples/0_Introduction/<executable_name>/data/", // up 4 in tree "../../../Samples/0_Introduction/<executable_name>/data/", // up 3 in tree "../../Samples/0_Introduction/<executable_name>/data/", // up 2 in tree "../../../../Samples/1_Utilities/<executable_name>/data/", // up 4 in tree "../../../Samples/1_Utilities/<executable_name>/data/", // up 3 in tree "../../Samples/1_Utilities/<executable_name>/data/", // up 2 in tree "../../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 4 in tree "../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 3 in tree "../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 2 in tree "../../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 4 in tree "../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 3 in tree "../../Samples/3_CUDA_Features/<executable_name>/data/", // up 2 in tree "../../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 4 in tree "../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 3 in tree "../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 2 in tree "../../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 4 in tree "../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 3 in tree "../../Samples/5_Domain_Specific/<executable_name>/data/", // up 2 in tree "../../../../Samples/6_Performance/<executable_name>/data/", // up 4 in tree "../../../Samples/6_Performance/<executable_name>/data/", // up 3 in tree "../../Samples/6_Performance/<executable_name>/data/", // up 2 in tree "../../../../Common/data/", // up 4 in tree "../../../Common/data/", // up 3 in tree "../../Common/data/" // up 2 in tree }; // Extract the executable name std::string executable_name; if (executable_path != 0) { executable_name = std::string(executable_path); #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) // Windows path delimiter size_t delimiter_pos = executable_name.find_last_of('\\'); executable_name.erase(0, delimiter_pos + 1); if (executable_name.rfind(".exe") != std::string::npos) { // we strip .exe, only if the .exe is found executable_name.resize(executable_name.size() - 4); } #else // Linux & OSX path delimiter size_t delimiter_pos = executable_name.find_last_of('/'); executable_name.erase(0, delimiter_pos + 1); #endif } // Loop over all search paths and return the first hit for (unsigned int i = 0; i < sizeof(searchPath) / sizeof(char *); ++i) { std::string path(searchPath[i]); size_t executable_name_pos = path.find("<executable_name>"); // If there is executable_name variable in the searchPath // replace it with the value if (executable_name_pos != std::string::npos) { if (executable_path != 0) { path.replace(executable_name_pos, strlen("<executable_name>"), executable_name); } else { // Skip this path entry if no executable argument is given continue; } } #ifdef _DEBUG printf("sdkFindFilePath <%s> in %s\n", filename, path.c_str()); #endif // Test if the file exists path.append(filename); FILE *fp; FOPEN(fp, path.c_str(), "rb"); if (fp != NULL) { fclose(fp); // File found // returning an allocated array here for backwards compatibility reasons char *file_path = reinterpret_cast<char *>(malloc(path.length() + 1)); STRCPY(file_path, path.length() + 1, path.c_str()); return file_path; } if (fp) { fclose(fp); } } // File not found printf("\nerror: sdkFindFilePath: file <%s> not found!\n", filename); return 0; } #endif // COMMON_HELPER_STRING_H_
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/Common/exception.h
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* CUda UTility Library */ #ifndef COMMON_EXCEPTION_H_ #define COMMON_EXCEPTION_H_ // includes, system #include <stdlib.h> #include <exception> #include <iostream> #include <stdexcept> #include <string> //! Exception wrapper. //! @param Std_Exception Exception out of namespace std for easy typing. template <class Std_Exception> class Exception : public Std_Exception { public: //! @brief Static construction interface //! @return Alwayss throws ( Located_Exception<Exception>) //! @param file file in which the Exception occurs //! @param line line in which the Exception occurs //! @param detailed details on the code fragment causing the Exception static void throw_it(const char *file, const int line, const char *detailed = "-"); //! Static construction interface //! @return Alwayss throws ( Located_Exception<Exception>) //! @param file file in which the Exception occurs //! @param line line in which the Exception occurs //! @param detailed details on the code fragment causing the Exception static void throw_it(const char *file, const int line, const std::string &detailed); //! Destructor virtual ~Exception() throw(); private: //! Constructor, default (private) Exception(); //! Constructor, standard //! @param str string returned by what() explicit Exception(const std::string &str); }; //////////////////////////////////////////////////////////////////////////////// //! Exception handler function for arbitrary exceptions //! @param ex exception to handle //////////////////////////////////////////////////////////////////////////////// template <class Exception_Typ> inline void handleException(const Exception_Typ &ex) { std::cerr << ex.what() << std::endl; exit(EXIT_FAILURE); } //! Convenience macros //! Exception caused by dynamic program behavior, e.g. file does not exist #define RUNTIME_EXCEPTION(msg) \ Exception<std::runtime_error>::throw_it(__FILE__, __LINE__, msg) //! Logic exception in program, e.g. an assert failed #define LOGIC_EXCEPTION(msg) \ Exception<std::logic_error>::throw_it(__FILE__, __LINE__, msg) //! Out of range exception #define RANGE_EXCEPTION(msg) \ Exception<std::range_error>::throw_it(__FILE__, __LINE__, msg) //////////////////////////////////////////////////////////////////////////////// //! Implementation // includes, system #include <sstream> //////////////////////////////////////////////////////////////////////////////// //! Static construction interface. //! @param Exception causing code fragment (file and line) and detailed infos. //////////////////////////////////////////////////////////////////////////////// /*static*/ template <class Std_Exception> void Exception<Std_Exception>::throw_it(const char *file, const int line, const char *detailed) { std::stringstream s; // Quiet heavy-weight but exceptions are not for // performance / release versions s << "Exception in file '" << file << "' in line " << line << "\n" << "Detailed description: " << detailed << "\n"; throw Exception(s.str()); } //////////////////////////////////////////////////////////////////////////////// //! Static construction interface. //! @param Exception causing code fragment (file and line) and detailed infos. //////////////////////////////////////////////////////////////////////////////// /*static*/ template <class Std_Exception> void Exception<Std_Exception>::throw_it(const char *file, const int line, const std::string &msg) { throw_it(file, line, msg.c_str()); } //////////////////////////////////////////////////////////////////////////////// //! Constructor, default (private). //////////////////////////////////////////////////////////////////////////////// template <class Std_Exception> Exception<Std_Exception>::Exception() : Std_Exception("Unknown Exception.\n") {} //////////////////////////////////////////////////////////////////////////////// //! Constructor, standard (private). //! String returned by what(). //////////////////////////////////////////////////////////////////////////////// template <class Std_Exception> Exception<Std_Exception>::Exception(const std::string &s) : Std_Exception(s) {} //////////////////////////////////////////////////////////////////////////////// //! Destructor //////////////////////////////////////////////////////////////////////////////// template <class Std_Exception> Exception<Std_Exception>::~Exception() throw() {} // functions, exported #endif // COMMON_EXCEPTION_H_
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/Common/helper_cuda.h
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions for initialization and error checking #ifndef COMMON_HELPER_CUDA_H_ #define COMMON_HELPER_CUDA_H_ #pragma once #include <sycl/sycl.hpp> #include <dpct/dpct.hpp> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <helper_string.h> #ifndef EXIT_WAIVED #define EXIT_WAIVED 2 #endif // Note, it is required that your SDK sample to include the proper header // files, please refer the CUDA examples for examples of the needed CUDA // headers, which may change depending on which CUDA functions are used. // CUDA Runtime error messages #ifdef __DPCT_HPP__ static const char *_cudaGetErrorEnum(dpct::err0 error) { /* DPCT1009:5: SYCL uses exceptions to report errors and does not use the error codes. The original code was commented out and a warning string was inserted. You need to rewrite this code. */ return "cudaGetErrorName is not supported" /*cudaGetErrorName(error)*/; } #endif #ifdef CUDA_DRIVER_API // CUDA Driver API errors static const char *_cudaGetErrorEnum(CUresult error) { static char unknown[] = "<unknown>"; const char *ret = NULL; cuGetErrorName(error, &ret); return ret ? ret : unknown; } #endif #ifdef CUBLAS_API_H_ // cuBLAS API errors static const char *_cudaGetErrorEnum(cublasStatus_t error) { switch (error) { case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS"; case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED"; case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED"; case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE"; case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH"; case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR"; case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED"; case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR"; case CUBLAS_STATUS_NOT_SUPPORTED: return "CUBLAS_STATUS_NOT_SUPPORTED"; case CUBLAS_STATUS_LICENSE_ERROR: return "CUBLAS_STATUS_LICENSE_ERROR"; } return "<unknown>"; } #endif #ifdef _CUFFT_H_ // cuFFT API errors static const char *_cudaGetErrorEnum(cufftResult error) { switch (error) { case CUFFT_SUCCESS: return "CUFFT_SUCCESS"; case CUFFT_INVALID_PLAN: return "CUFFT_INVALID_PLAN"; case CUFFT_ALLOC_FAILED: return "CUFFT_ALLOC_FAILED"; case CUFFT_INVALID_TYPE: return "CUFFT_INVALID_TYPE"; case CUFFT_INVALID_VALUE: return "CUFFT_INVALID_VALUE"; case CUFFT_INTERNAL_ERROR: return "CUFFT_INTERNAL_ERROR"; case CUFFT_EXEC_FAILED: return "CUFFT_EXEC_FAILED"; case CUFFT_SETUP_FAILED: return "CUFFT_SETUP_FAILED"; case CUFFT_INVALID_SIZE: return "CUFFT_INVALID_SIZE"; case CUFFT_UNALIGNED_DATA: return "CUFFT_UNALIGNED_DATA"; case CUFFT_INCOMPLETE_PARAMETER_LIST: return "CUFFT_INCOMPLETE_PARAMETER_LIST"; case CUFFT_INVALID_DEVICE: return "CUFFT_INVALID_DEVICE"; case CUFFT_PARSE_ERROR: return "CUFFT_PARSE_ERROR"; case CUFFT_NO_WORKSPACE: return "CUFFT_NO_WORKSPACE"; case CUFFT_NOT_IMPLEMENTED: return "CUFFT_NOT_IMPLEMENTED"; case CUFFT_LICENSE_ERROR: return "CUFFT_LICENSE_ERROR"; case CUFFT_NOT_SUPPORTED: return "CUFFT_NOT_SUPPORTED"; } return "<unknown>"; } #endif #ifdef CUSPARSEAPI // cuSPARSE API errors static const char *_cudaGetErrorEnum(cusparseStatus_t error) { switch (error) { case CUSPARSE_STATUS_SUCCESS: return "CUSPARSE_STATUS_SUCCESS"; case CUSPARSE_STATUS_NOT_INITIALIZED: return "CUSPARSE_STATUS_NOT_INITIALIZED"; case CUSPARSE_STATUS_ALLOC_FAILED: return "CUSPARSE_STATUS_ALLOC_FAILED"; case CUSPARSE_STATUS_INVALID_VALUE: return "CUSPARSE_STATUS_INVALID_VALUE"; case CUSPARSE_STATUS_ARCH_MISMATCH: return "CUSPARSE_STATUS_ARCH_MISMATCH"; case CUSPARSE_STATUS_MAPPING_ERROR: return "CUSPARSE_STATUS_MAPPING_ERROR"; case CUSPARSE_STATUS_EXECUTION_FAILED: return "CUSPARSE_STATUS_EXECUTION_FAILED"; case CUSPARSE_STATUS_INTERNAL_ERROR: return "CUSPARSE_STATUS_INTERNAL_ERROR"; case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; } return "<unknown>"; } #endif #ifdef CUSOLVER_COMMON_H_ // cuSOLVER API errors static const char *_cudaGetErrorEnum(cusolverStatus_t error) { switch (error) { case CUSOLVER_STATUS_SUCCESS: return "CUSOLVER_STATUS_SUCCESS"; case CUSOLVER_STATUS_NOT_INITIALIZED: return "CUSOLVER_STATUS_NOT_INITIALIZED"; case CUSOLVER_STATUS_ALLOC_FAILED: return "CUSOLVER_STATUS_ALLOC_FAILED"; case CUSOLVER_STATUS_INVALID_VALUE: return "CUSOLVER_STATUS_INVALID_VALUE"; case CUSOLVER_STATUS_ARCH_MISMATCH: return "CUSOLVER_STATUS_ARCH_MISMATCH"; case CUSOLVER_STATUS_MAPPING_ERROR: return "CUSOLVER_STATUS_MAPPING_ERROR"; case CUSOLVER_STATUS_EXECUTION_FAILED: return "CUSOLVER_STATUS_EXECUTION_FAILED"; case CUSOLVER_STATUS_INTERNAL_ERROR: return "CUSOLVER_STATUS_INTERNAL_ERROR"; case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; case CUSOLVER_STATUS_NOT_SUPPORTED: return "CUSOLVER_STATUS_NOT_SUPPORTED "; case CUSOLVER_STATUS_ZERO_PIVOT: return "CUSOLVER_STATUS_ZERO_PIVOT"; case CUSOLVER_STATUS_INVALID_LICENSE: return "CUSOLVER_STATUS_INVALID_LICENSE"; } return "<unknown>"; } #endif #ifdef CURAND_H_ // cuRAND API errors static const char *_cudaGetErrorEnum(int error) { switch (error) { case 0: return "CURAND_STATUS_SUCCESS"; case 100: return "CURAND_STATUS_VERSION_MISMATCH"; case 101: return "CURAND_STATUS_NOT_INITIALIZED"; case 102: return "CURAND_STATUS_ALLOCATION_FAILED"; case 103: return "CURAND_STATUS_TYPE_ERROR"; case 104: return "CURAND_STATUS_OUT_OF_RANGE"; case 105: return "CURAND_STATUS_LENGTH_NOT_MULTIPLE"; case 106: return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED"; case 201: return "CURAND_STATUS_LAUNCH_FAILURE"; case 202: return "CURAND_STATUS_PREEXISTING_FAILURE"; case 203: return "CURAND_STATUS_INITIALIZATION_FAILED"; case 204: return "CURAND_STATUS_ARCH_MISMATCH"; case 999: return "CURAND_STATUS_INTERNAL_ERROR"; } return "<unknown>"; } #endif #ifdef NVJPEGAPI // nvJPEG API errors static const char *_cudaGetErrorEnum(nvjpegStatus_t error) { switch (error) { case NVJPEG_STATUS_SUCCESS: return "NVJPEG_STATUS_SUCCESS"; case NVJPEG_STATUS_NOT_INITIALIZED: return "NVJPEG_STATUS_NOT_INITIALIZED"; case NVJPEG_STATUS_INVALID_PARAMETER: return "NVJPEG_STATUS_INVALID_PARAMETER"; case NVJPEG_STATUS_BAD_JPEG: return "NVJPEG_STATUS_BAD_JPEG"; case NVJPEG_STATUS_JPEG_NOT_SUPPORTED: return "NVJPEG_STATUS_JPEG_NOT_SUPPORTED"; case NVJPEG_STATUS_ALLOCATOR_FAILURE: return "NVJPEG_STATUS_ALLOCATOR_FAILURE"; case NVJPEG_STATUS_EXECUTION_FAILED: return "NVJPEG_STATUS_EXECUTION_FAILED"; case NVJPEG_STATUS_ARCH_MISMATCH: return "NVJPEG_STATUS_ARCH_MISMATCH"; case NVJPEG_STATUS_INTERNAL_ERROR: return "NVJPEG_STATUS_INTERNAL_ERROR"; } return "<unknown>"; } #endif #ifdef NV_NPPIDEFS_H // NPP API errors static const char *_cudaGetErrorEnum(NppStatus error) { switch (error) { case NPP_NOT_SUPPORTED_MODE_ERROR: return "NPP_NOT_SUPPORTED_MODE_ERROR"; case NPP_ROUND_MODE_NOT_SUPPORTED_ERROR: return "NPP_ROUND_MODE_NOT_SUPPORTED_ERROR"; case NPP_RESIZE_NO_OPERATION_ERROR: return "NPP_RESIZE_NO_OPERATION_ERROR"; case NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY: return "NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY"; #if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000 case NPP_BAD_ARG_ERROR: return "NPP_BAD_ARGUMENT_ERROR"; case NPP_COEFF_ERROR: return "NPP_COEFFICIENT_ERROR"; case NPP_RECT_ERROR: return "NPP_RECTANGLE_ERROR"; case NPP_QUAD_ERROR: return "NPP_QUADRANGLE_ERROR"; case NPP_MEM_ALLOC_ERR: return "NPP_MEMORY_ALLOCATION_ERROR"; case NPP_HISTO_NUMBER_OF_LEVELS_ERROR: return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR"; case NPP_INVALID_INPUT: return "NPP_INVALID_INPUT"; case NPP_POINTER_ERROR: return "NPP_POINTER_ERROR"; case NPP_WARNING: return "NPP_WARNING"; case NPP_ODD_ROI_WARNING: return "NPP_ODD_ROI_WARNING"; #else // These are for CUDA 5.5 or higher case NPP_BAD_ARGUMENT_ERROR: return "NPP_BAD_ARGUMENT_ERROR"; case NPP_COEFFICIENT_ERROR: return "NPP_COEFFICIENT_ERROR"; case NPP_RECTANGLE_ERROR: return "NPP_RECTANGLE_ERROR"; case NPP_QUADRANGLE_ERROR: return "NPP_QUADRANGLE_ERROR"; case NPP_MEMORY_ALLOCATION_ERR: return "NPP_MEMORY_ALLOCATION_ERROR"; case NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR: return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR"; case NPP_INVALID_HOST_POINTER_ERROR: return "NPP_INVALID_HOST_POINTER_ERROR"; case NPP_INVALID_DEVICE_POINTER_ERROR: return "NPP_INVALID_DEVICE_POINTER_ERROR"; #endif case NPP_LUT_NUMBER_OF_LEVELS_ERROR: return "NPP_LUT_NUMBER_OF_LEVELS_ERROR"; case NPP_TEXTURE_BIND_ERROR: return "NPP_TEXTURE_BIND_ERROR"; case NPP_WRONG_INTERSECTION_ROI_ERROR: return "NPP_WRONG_INTERSECTION_ROI_ERROR"; case NPP_NOT_EVEN_STEP_ERROR: return "NPP_NOT_EVEN_STEP_ERROR"; case NPP_INTERPOLATION_ERROR: return "NPP_INTERPOLATION_ERROR"; case NPP_RESIZE_FACTOR_ERROR: return "NPP_RESIZE_FACTOR_ERROR"; case NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR: return "NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR"; #if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000 case NPP_MEMFREE_ERR: return "NPP_MEMFREE_ERR"; case NPP_MEMSET_ERR: return "NPP_MEMSET_ERR"; case NPP_MEMCPY_ERR: return "NPP_MEMCPY_ERROR"; case NPP_MIRROR_FLIP_ERR: return "NPP_MIRROR_FLIP_ERR"; #else case NPP_MEMFREE_ERROR: return "NPP_MEMFREE_ERROR"; case NPP_MEMSET_ERROR: return "NPP_MEMSET_ERROR"; case NPP_MEMCPY_ERROR: return "NPP_MEMCPY_ERROR"; case NPP_MIRROR_FLIP_ERROR: return "NPP_MIRROR_FLIP_ERROR"; #endif case NPP_ALIGNMENT_ERROR: return "NPP_ALIGNMENT_ERROR"; case NPP_STEP_ERROR: return "NPP_STEP_ERROR"; case NPP_SIZE_ERROR: return "NPP_SIZE_ERROR"; case NPP_NULL_POINTER_ERROR: return "NPP_NULL_POINTER_ERROR"; case NPP_CUDA_KERNEL_EXECUTION_ERROR: return "NPP_CUDA_KERNEL_EXECUTION_ERROR"; case NPP_NOT_IMPLEMENTED_ERROR: return "NPP_NOT_IMPLEMENTED_ERROR"; case NPP_ERROR: return "NPP_ERROR"; case NPP_SUCCESS: return "NPP_SUCCESS"; case NPP_WRONG_INTERSECTION_QUAD_WARNING: return "NPP_WRONG_INTERSECTION_QUAD_WARNING"; case NPP_MISALIGNED_DST_ROI_WARNING: return "NPP_MISALIGNED_DST_ROI_WARNING"; case NPP_AFFINE_QUAD_INCORRECT_WARNING: return "NPP_AFFINE_QUAD_INCORRECT_WARNING"; case NPP_DOUBLE_SIZE_WARNING: return "NPP_DOUBLE_SIZE_WARNING"; case NPP_WRONG_INTERSECTION_ROI_WARNING: return "NPP_WRONG_INTERSECTION_ROI_WARNING"; #if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x6000 /* These are 6.0 or higher */ case NPP_LUT_PALETTE_BITSIZE_ERROR: return "NPP_LUT_PALETTE_BITSIZE_ERROR"; case NPP_ZC_MODE_NOT_SUPPORTED_ERROR: return "NPP_ZC_MODE_NOT_SUPPORTED_ERROR"; case NPP_QUALITY_INDEX_ERROR: return "NPP_QUALITY_INDEX_ERROR"; case NPP_CHANNEL_ORDER_ERROR: return "NPP_CHANNEL_ORDER_ERROR"; case NPP_ZERO_MASK_VALUE_ERROR: return "NPP_ZERO_MASK_VALUE_ERROR"; case NPP_NUMBER_OF_CHANNELS_ERROR: return "NPP_NUMBER_OF_CHANNELS_ERROR"; case NPP_COI_ERROR: return "NPP_COI_ERROR"; case NPP_DIVISOR_ERROR: return "NPP_DIVISOR_ERROR"; case NPP_CHANNEL_ERROR: return "NPP_CHANNEL_ERROR"; case NPP_STRIDE_ERROR: return "NPP_STRIDE_ERROR"; case NPP_ANCHOR_ERROR: return "NPP_ANCHOR_ERROR"; case NPP_MASK_SIZE_ERROR: return "NPP_MASK_SIZE_ERROR"; case NPP_MOMENT_00_ZERO_ERROR: return "NPP_MOMENT_00_ZERO_ERROR"; case NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR: return "NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR"; case NPP_THRESHOLD_ERROR: return "NPP_THRESHOLD_ERROR"; case NPP_CONTEXT_MATCH_ERROR: return "NPP_CONTEXT_MATCH_ERROR"; case NPP_FFT_FLAG_ERROR: return "NPP_FFT_FLAG_ERROR"; case NPP_FFT_ORDER_ERROR: return "NPP_FFT_ORDER_ERROR"; case NPP_SCALE_RANGE_ERROR: return "NPP_SCALE_RANGE_ERROR"; case NPP_DATA_TYPE_ERROR: return "NPP_DATA_TYPE_ERROR"; case NPP_OUT_OFF_RANGE_ERROR: return "NPP_OUT_OFF_RANGE_ERROR"; case NPP_DIVIDE_BY_ZERO_ERROR: return "NPP_DIVIDE_BY_ZERO_ERROR"; case NPP_RANGE_ERROR: return "NPP_RANGE_ERROR"; case NPP_NO_MEMORY_ERROR: return "NPP_NO_MEMORY_ERROR"; case NPP_ERROR_RESERVED: return "NPP_ERROR_RESERVED"; case NPP_NO_OPERATION_WARNING: return "NPP_NO_OPERATION_WARNING"; case NPP_DIVIDE_BY_ZERO_WARNING: return "NPP_DIVIDE_BY_ZERO_WARNING"; #endif #if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x7000 /* These are 7.0 or higher */ case NPP_OVERFLOW_ERROR: return "NPP_OVERFLOW_ERROR"; case NPP_CORRUPTED_DATA_ERROR: return "NPP_CORRUPTED_DATA_ERROR"; #endif } return "<unknown>"; } #endif template <typename T> void check(T result, char const *const func, const char *const file, int const line) { } #ifdef __DPCT_HPP__ // This will output the proper CUDA error strings in the event // that a CUDA host call returns an error #define checkCudaErrors(val) check((val), #val, __FILE__, __LINE__) // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError(msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line) { /* DPCT1010:6: SYCL uses exceptions to report errors and does not use the error codes. The call was replaced with 0. You need to rewrite this code. */ dpct::err0 err = 0; } // This will only print the proper error string when calling cudaGetLastError // but not exit program incase error detected. #define printLastCudaError(msg) __printLastCudaError(msg, __FILE__, __LINE__) inline void __printLastCudaError(const char *errorMessage, const char *file, const int line) { /* DPCT1010:8: SYCL uses exceptions to report errors and does not use the error codes. The call was replaced with 0. You need to rewrite this code. */ dpct::err0 err = 0; } #endif #ifndef MAX #define MAX(a, b) (a > b ? a : b) #endif // Float To Int conversion inline int ftoi(float value) { return (value >= 0 ? static_cast<int>(value + 0.5) : static_cast<int>(value - 0.5)); } // Beginning of GPU Architecture definitions inline int _ConvertSMVer2Cores(int major, int minor) { // Defines for GPU Architecture types (using the SM version to determine // the # of cores per SM typedef struct dpct_type_113531 { int SM; // 0xMm (hexidecimal notation), M = SM Major version, // and m = SM minor version int Cores; } sSMtoCores; sSMtoCores nGpuArchCoresPerSM[] = { {0x30, 192}, {0x32, 192}, {0x35, 192}, {0x37, 192}, {0x50, 128}, {0x52, 128}, {0x53, 128}, {0x60, 64}, {0x61, 128}, {0x62, 128}, {0x70, 64}, {0x72, 64}, {0x75, 64}, {0x80, 64}, {0x86, 128}, {0x87, 128}, {0x89, 128}, {0x90, 128}, {-1, -1}}; int index = 0; while (nGpuArchCoresPerSM[index].SM != -1) { if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) { return nGpuArchCoresPerSM[index].Cores; } index++; } // If we don't find the values, we default use the previous one // to run properly printf( "MapSMtoCores for SM %d.%d is undefined." " Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[index - 1].Cores); return nGpuArchCoresPerSM[index - 1].Cores; } inline const char* _ConvertSMVer2ArchName(int major, int minor) { // Defines for GPU Architecture types (using the SM version to determine // the GPU Arch name) typedef struct dpct_type_281558 { int SM; // 0xMm (hexidecimal notation), M = SM Major version, // and m = SM minor version const char* name; } sSMtoArchName; sSMtoArchName nGpuArchNameSM[] = { {0x30, "Kepler"}, {0x32, "Kepler"}, {0x35, "Kepler"}, {0x37, "Kepler"}, {0x50, "Maxwell"}, {0x52, "Maxwell"}, {0x53, "Maxwell"}, {0x60, "Pascal"}, {0x61, "Pascal"}, {0x62, "Pascal"}, {0x70, "Volta"}, {0x72, "Xavier"}, {0x75, "Turing"}, {0x80, "Ampere"}, {0x86, "Ampere"}, {0x87, "Ampere"}, {0x89, "Ada"}, {0x90, "Hopper"}, {-1, "Graphics Device"}}; int index = 0; while (nGpuArchNameSM[index].SM != -1) { if (nGpuArchNameSM[index].SM == ((major << 4) + minor)) { return nGpuArchNameSM[index].name; } index++; } // If we don't find the values, we default use the previous one // to run properly printf( "MapSMtoArchName for SM %d.%d is undefined." " Default to use %s\n", major, minor, nGpuArchNameSM[index - 1].name); return nGpuArchNameSM[index - 1].name; } // end of GPU Architecture definitions #ifdef __DPCT_HPP__ // General GPU Device CUDA Initialization inline int gpuDeviceInit(int devID) { int device_count; checkCudaErrors(DPCT_CHECK_ERROR( device_count = dpct::dev_mgr::instance().device_count())); if (device_count == 0) { fprintf(stderr, "gpuDeviceInit() CUDA error: " "no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } if (devID < 0) { devID = 0; } if (devID > device_count - 1) { fprintf(stderr, "\n"); fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", device_count); fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid" " GPU device. <<\n", devID); fprintf(stderr, "\n"); return -devID; } int computeMode = -1, major = 0, minor = 0; /* DPCT1035:10: All SYCL devices can be used by the host to submit tasks. You may need to adjust this code. */ checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1)); checkCudaErrors(DPCT_CHECK_ERROR( major = dpct::dev_mgr::instance().get_device(devID).get_major_version())); checkCudaErrors(DPCT_CHECK_ERROR( minor = dpct::dev_mgr::instance().get_device(devID).get_minor_version())); /* DPCT1035:11: All SYCL devices can be used by the host to submit tasks. You may need to adjust this code. */ if (computeMode == 0) { fprintf(stderr, "Error: device is running in <Compute Mode " "Prohibited>, no threads can use cudaSetDevice().\n"); return -1; } if (major < 1) { fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n"); exit(EXIT_FAILURE); } /* DPCT1093:12: The "devID" device may be not the one intended for use. Adjust the selected device if needed. */ checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID))); printf("gpuDeviceInit() CUDA Device [%d]: \"%s\n", devID, _ConvertSMVer2ArchName(major, minor)); return devID; } // This function returns the best GPU (with maximum GFLOPS) inline int gpuGetMaxGflopsDeviceId() try { int current_device = 0, sm_per_multiproc = 0; int max_perf_device = 0; int device_count = 0; int devices_prohibited = 0; uint64_t max_compute_perf = 0; checkCudaErrors(DPCT_CHECK_ERROR( device_count = dpct::dev_mgr::instance().device_count())); if (device_count == 0) { fprintf(stderr, "gpuGetMaxGflopsDeviceId() CUDA error:" " no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } // Find the best CUDA capable GPU device current_device = 0; while (current_device < device_count) { int computeMode = -1, major = 0, minor = 0; /* DPCT1035:13: All SYCL devices can be used by the host to submit tasks. You may need to adjust this code. */ checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1)); checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance() .get_device(current_device) .get_major_version())); checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance() .get_device(current_device) .get_minor_version())); // If this GPU is not running on Compute Mode prohibited, // then we can add it to the list /* DPCT1035:14: All SYCL devices can be used by the host to submit tasks. You may need to adjust this code. */ if (computeMode != 0) { if (major == 9999 && minor == 9999) { sm_per_multiproc = 1; } else { sm_per_multiproc = _ConvertSMVer2Cores(major, minor); } int multiProcessorCount = 0, clockRate = 0; checkCudaErrors( DPCT_CHECK_ERROR(multiProcessorCount = dpct::dev_mgr::instance() .get_device(current_device) .get_max_compute_units())); dpct::err0 result = DPCT_CHECK_ERROR(clockRate = dpct::dev_mgr::instance() .get_device(current_device) .get_max_clock_frequency()); uint64_t compute_perf = (uint64_t)multiProcessorCount * sm_per_multiproc * clockRate; if (compute_perf > max_compute_perf) { max_compute_perf = compute_perf; max_perf_device = current_device; } } else { devices_prohibited++; } ++current_device; } if (devices_prohibited == device_count) { fprintf(stderr, "gpuGetMaxGflopsDeviceId() CUDA error:" " all devices have compute mode prohibited.\n"); exit(EXIT_FAILURE); } return max_perf_device; } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); } // Initialization code to find the best CUDA Device inline int findCudaDevice(int argc, const char **argv) { int devID = 0; // If the command-line has a device number specified, use it if (checkCmdLineFlag(argc, argv, "device")) { devID = getCmdLineArgumentInt(argc, argv, "device="); if (devID < 0) { printf("Invalid command line parameter\n "); exit(EXIT_FAILURE); } else { devID = gpuDeviceInit(devID); if (devID < 0) { printf("exiting...\n"); exit(EXIT_FAILURE); } } } else { // Otherwise pick the device with highest Gflops/s devID = gpuGetMaxGflopsDeviceId(); /* DPCT1093:15: The "devID" device may be not the one intended for use. Adjust the selected device if needed. */ checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID))); int major = 0, minor = 0; checkCudaErrors(DPCT_CHECK_ERROR( major = dpct::dev_mgr::instance().get_device(devID).get_major_version())); checkCudaErrors(DPCT_CHECK_ERROR( minor = dpct::dev_mgr::instance().get_device(devID).get_minor_version())); printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, _ConvertSMVer2ArchName(major, minor), major, minor); } return devID; } inline int findIntegratedGPU() { int current_device = 0; int device_count = 0; int devices_prohibited = 0; checkCudaErrors(DPCT_CHECK_ERROR( device_count = dpct::dev_mgr::instance().device_count())); if (device_count == 0) { fprintf(stderr, "CUDA error: no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } // Find the integrated GPU which is compute capable while (current_device < device_count) { int computeMode = -1, integrated = -1; /* DPCT1035:16: All SYCL devices can be used by the host to submit tasks. You may need to adjust this code. */ checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1)); checkCudaErrors( DPCT_CHECK_ERROR(integrated = dpct::dev_mgr::instance() .get_device(current_device) .get_integrated())); // If GPU is integrated and is not running on Compute Mode prohibited, // then cuda can map to GLES resource /* DPCT1035:17: All SYCL devices can be used by the host to submit tasks. You may need to adjust this code. */ if (integrated && (computeMode != 0)) { /* DPCT1093:18: The "current_device" device may be not the one intended for use. Adjust the selected device if needed. */ checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(current_device))); int major = 0, minor = 0; checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance() .get_device(current_device) .get_major_version())); checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance() .get_device(current_device) .get_minor_version())); printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", current_device, _ConvertSMVer2ArchName(major, minor), major, minor); return current_device; } else { devices_prohibited++; } current_device++; } if (devices_prohibited == device_count) { fprintf(stderr, "CUDA error:" " No GLES-CUDA Interop capable GPU found.\n"); exit(EXIT_FAILURE); } return -1; } // General check for CUDA GPU SM Capabilities inline bool checkCudaCapabilities(int major_version, int minor_version) { int dev; int major = 0, minor = 0; checkCudaErrors(dev = dpct::dev_mgr::instance().current_device_id()); checkCudaErrors(DPCT_CHECK_ERROR( major = dpct::dev_mgr::instance().get_device(dev).get_major_version())); checkCudaErrors(DPCT_CHECK_ERROR( minor = dpct::dev_mgr::instance().get_device(dev).get_minor_version())); if ((major > major_version) || (major == major_version && minor >= minor_version)) { printf(" Device %d: <%16s >, Compute SM %d.%d detected\n", dev, _ConvertSMVer2ArchName(major, minor), major, minor); return true; } else { printf( " No GPU device was found that can support " "CUDA compute capability %d.%d.\n", major_version, minor_version); return false; } } #endif // end of CUDA Helper Functions #endif // COMMON_HELPER_CUDA_H_
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/Samples/2_Concepts_and_Techniques/sortingNetworks/sortingNetworks_common.dp.hpp
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SORTINGNETWORKS_COMMON_CUH #define SORTINGNETWORKS_COMMON_CUH #include <sycl/sycl.hpp> #include <dpct/dpct.hpp> #include "sortingNetworks_common.h" // Enables maximum occupancy #define SHARED_SIZE_LIMIT 1024U // Map to single instructions on G8x / G9x / G100 #define UMUL(a, b) __umul24((a), (b)) #define UMAD(a, b, c) (UMUL((a), (b)) + (c)) inline void Comparator(uint &keyA, uint &valA, uint &keyB, uint &valB, uint dir) { uint t; if ((keyA > keyB) == dir) { t = keyA; keyA = keyB; keyB = t; t = valA; valA = valB; valB = t; } } #endif
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/Samples/2_Concepts_and_Techniques/sortingNetworks/sortingNetworks_common.h
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ //////////////////////////////////////////////////////////////////////////////// // Shortcut definition //////////////////////////////////////////////////////////////////////////////// typedef unsigned int uint; /////////////////////////////////////////////////////////////////////////////// // Sort result validation routines //////////////////////////////////////////////////////////////////////////////// // Sorted keys array validation (check for integrity and proper order) extern "C" uint validateSortedKeys(uint *resKey, uint *srcKey, uint batchSize, uint arrayLength, uint numValues, uint dir); extern "C" int validateValues(uint *resKey, uint *resVal, uint *srcKey, uint batchSize, uint arrayLength); //////////////////////////////////////////////////////////////////////////////// // CUDA sorting networks //////////////////////////////////////////////////////////////////////////////// extern "C" uint oddEvenMergeSort(uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint batchSize, uint arrayLength, uint dir);
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/Samples/2_Concepts_and_Techniques/sortingNetworks/sortingNetworks_validate.cpp
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "sortingNetworks_common.h" //////////////////////////////////////////////////////////////////////////////// // Validate sorted keys array (check for integrity and proper order) //////////////////////////////////////////////////////////////////////////////// extern "C" uint validateSortedKeys(uint *resKey, uint *srcKey, uint batchSize, uint arrayLength, uint numValues, uint dir) { uint *srcHist; uint *resHist; if (arrayLength < 2) { printf("validateSortedKeys(): arrayLength too short, exiting...\n"); return 1; } printf("...inspecting keys array: "); srcHist = (uint *)malloc(numValues * sizeof(uint)); resHist = (uint *)malloc(numValues * sizeof(uint)); int flag = 1; for (uint j = 0; j < batchSize; j++, srcKey += arrayLength, resKey += arrayLength) { // Build histograms for keys arrays memset(srcHist, 0, numValues * sizeof(uint)); memset(resHist, 0, numValues * sizeof(uint)); for (uint i = 0; i < arrayLength; i++) { if (srcKey[i] < numValues && resKey[i] < numValues) { srcHist[srcKey[i]]++; resHist[resKey[i]]++; } else { flag = 0; break; } } if (!flag) { printf("***Set %u source/result key arrays are not limited properly***\n", j); goto brk; } // Compare the histograms for (uint i = 0; i < numValues; i++) if (srcHist[i] != resHist[i]) { flag = 0; break; } if (!flag) { printf("***Set %u source/result keys histograms do not match***\n", j); goto brk; } if (dir) { // Ascending order for (uint i = 0; i < arrayLength - 1; i++) if (resKey[i + 1] < resKey[i]) { flag = 0; break; } } else { // Descending order for (uint i = 0; i < arrayLength - 1; i++) if (resKey[i + 1] > resKey[i]) { flag = 0; break; } } if (!flag) { printf("***Set %u result key array is not ordered properly***\n", j); goto brk; } } brk: free(resHist); free(srcHist); if (flag) printf("OK\n"); return flag; } extern "C" int validateValues(uint *resKey, uint *resVal, uint *srcKey, uint batchSize, uint arrayLength) { int correctFlag = 1, stableFlag = 1; printf("...inspecting keys and values array: "); for (uint i = 0; i < batchSize; i++, resKey += arrayLength, resVal += arrayLength) { for (uint j = 0; j < arrayLength; j++) { if (resKey[j] != srcKey[resVal[j]]) correctFlag = 0; if ((j < arrayLength - 1) && (resKey[j] == resKey[j + 1]) && (resVal[j] > resVal[j + 1])) stableFlag = 0; } } printf(correctFlag ? "OK\n" : "***corrupted!!!***\n"); printf(stableFlag ? "...stability property: stable!\n" : "...stability property: NOT stable\n"); return correctFlag; }
cpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/Samples/2_Concepts_and_Techniques/sortingNetworks/oddEvenMergeSort.dp.cpp
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ //Based on http://www.iti.fh-flensburg.de/lang/algorithmen/sortieren/networks/oemen.htm #include <sycl/sycl.hpp> #include <dpct/dpct.hpp> #include <assert.h> #include <helper_cuda.h> #include "sortingNetworks_common.h" #include "sortingNetworks_common.dp.hpp" //////////////////////////////////////////////////////////////////////////////// // Monolithic Bacther's sort kernel for short arrays fitting into shared memory //////////////////////////////////////////////////////////////////////////////// void oddEvenMergeSortShared(uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint arrayLength, uint dir, const sycl::nd_item<3> &item_ct1, uint *s_key, uint *s_val) { // Handle to thread block group auto cta = item_ct1.get_group(); // Shared memory storage for one or more small vectors // Offset to the beginning of subbatch and load data d_SrcKey += item_ct1.get_group(2) * SHARED_SIZE_LIMIT + item_ct1.get_local_id(2); d_SrcVal += item_ct1.get_group(2) * SHARED_SIZE_LIMIT + item_ct1.get_local_id(2); d_DstKey += item_ct1.get_group(2) * SHARED_SIZE_LIMIT + item_ct1.get_local_id(2); d_DstVal += item_ct1.get_group(2) * SHARED_SIZE_LIMIT + item_ct1.get_local_id(2); s_key[item_ct1.get_local_id(2) + 0] = d_SrcKey[0]; s_val[item_ct1.get_local_id(2) + 0] = d_SrcVal[0]; s_key[item_ct1.get_local_id(2) + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)]; s_val[item_ct1.get_local_id(2) + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)]; for (uint size = 2; size <= arrayLength; size <<= 1) { uint stride = size / 2; uint offset = item_ct1.get_local_id(2) & (stride - 1); { /* DPCT1065:1: Consider replacing sycl::nd_item::barrier() with sycl::nd_item::barrier(sycl::access::fence_space::local_space) for better performance if there is no access to global memory. */ item_ct1.barrier(); uint pos = 2 * item_ct1.get_local_id(2) - (item_ct1.get_local_id(2) & (stride - 1)); Comparator(s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], dir); stride >>= 1; } for (; stride > 0; stride >>= 1) { /* DPCT1065:2: Consider replacing sycl::nd_item::barrier() with sycl::nd_item::barrier(sycl::access::fence_space::local_space) for better performance if there is no access to global memory. */ item_ct1.barrier(); uint pos = 2 * item_ct1.get_local_id(2) - (item_ct1.get_local_id(2) & (stride - 1)); if (offset >= stride) Comparator(s_key[pos - stride], s_val[pos - stride], s_key[pos + 0], s_val[pos + 0], dir); } } /* DPCT1065:0: Consider replacing sycl::nd_item::barrier() with sycl::nd_item::barrier(sycl::access::fence_space::local_space) for better performance if there is no access to global memory. */ item_ct1.barrier(); d_DstKey[0] = s_key[item_ct1.get_local_id(2) + 0]; d_DstVal[0] = s_val[item_ct1.get_local_id(2) + 0]; d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[item_ct1.get_local_id(2) + (SHARED_SIZE_LIMIT / 2)]; d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[item_ct1.get_local_id(2) + (SHARED_SIZE_LIMIT / 2)]; } //////////////////////////////////////////////////////////////////////////////// // Odd-even merge sort iteration kernel // for large arrays (not fitting into shared memory) //////////////////////////////////////////////////////////////////////////////// void oddEvenMergeGlobal(uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint arrayLength, uint size, uint stride, uint dir, const sycl::nd_item<3> &item_ct1) { uint global_comparatorI = item_ct1.get_group(2) * item_ct1.get_local_range(2) + item_ct1.get_local_id(2); // Odd-even merge uint pos = 2 * global_comparatorI - (global_comparatorI & (stride - 1)); if (stride < size / 2) { uint offset = global_comparatorI & ((size / 2) - 1); if (offset >= stride) { uint keyA = d_SrcKey[pos - stride]; uint valA = d_SrcVal[pos - stride]; uint keyB = d_SrcKey[pos + 0]; uint valB = d_SrcVal[pos + 0]; Comparator(keyA, valA, keyB, valB, dir); d_DstKey[pos - stride] = keyA; d_DstVal[pos - stride] = valA; d_DstKey[pos + 0] = keyB; d_DstVal[pos + 0] = valB; } } else { uint keyA = d_SrcKey[pos + 0]; uint valA = d_SrcVal[pos + 0]; uint keyB = d_SrcKey[pos + stride]; uint valB = d_SrcVal[pos + stride]; Comparator(keyA, valA, keyB, valB, dir); d_DstKey[pos + 0] = keyA; d_DstVal[pos + 0] = valA; d_DstKey[pos + stride] = keyB; d_DstVal[pos + stride] = valB; } } //////////////////////////////////////////////////////////////////////////////// // Interface function //////////////////////////////////////////////////////////////////////////////// // Helper function extern "C" uint factorRadix2(uint *log2L, uint L) { if (!L) { *log2L = 0; return 0; } else { for (*log2L = 0; (L & 1) == 0; L >>= 1, *log2L++) ; return L; } } extern "C" uint oddEvenMergeSort(uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint batchSize, uint arrayLength, uint dir) { // Nothing to sort if (arrayLength < 2) return 0; // Only power-of-two array lengths are supported by this implementation uint log2L; uint factorizationRemainder = factorRadix2(&log2L, arrayLength); assert(factorizationRemainder == 1); dir = (dir != 0); uint blockCount = (batchSize * arrayLength) / SHARED_SIZE_LIMIT; uint threadCount = SHARED_SIZE_LIMIT / 2; if (arrayLength <= SHARED_SIZE_LIMIT) { assert(SHARED_SIZE_LIMIT % arrayLength == 0); /* DPCT1049:3: The work-group size passed to the SYCL kernel may exceed the limit. To get the device limit, query info::device::max_work_group_size. Adjust the work-group size if needed. */ dpct::get_default_queue().submit([&](sycl::handler &cgh) { /* DPCT1101:19: 'SHARED_SIZE_LIMIT' expression was replaced with a value. Modify the code to use the original expression, provided in comments, if it is correct. */ sycl::local_accessor<uint, 1> s_key_acc_ct1( sycl::range<1>(1024 /*SHARED_SIZE_LIMIT*/), cgh); /* DPCT1101:20: 'SHARED_SIZE_LIMIT' expression was replaced with a value. Modify the code to use the original expression, provided in comments, if it is correct. */ sycl::local_accessor<uint, 1> s_val_acc_ct1( sycl::range<1>(1024 /*SHARED_SIZE_LIMIT*/), cgh); cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, blockCount) * sycl::range<3>(1, 1, threadCount), sycl::range<3>(1, 1, threadCount)), [=](sycl::nd_item<3> item_ct1) { oddEvenMergeSortShared(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, arrayLength, dir, item_ct1, s_key_acc_ct1.get_pointer(), s_val_acc_ct1.get_pointer()); }); }); } else { /* DPCT1049:4: The work-group size passed to the SYCL kernel may exceed the limit. To get the device limit, query info::device::max_work_group_size. Adjust the work-group size if needed. */ dpct::get_default_queue().submit([&](sycl::handler &cgh) { /* DPCT1101:21: 'SHARED_SIZE_LIMIT' expression was replaced with a value. Modify the code to use the original expression, provided in comments, if it is correct. */ sycl::local_accessor<uint, 1> s_key_acc_ct1( sycl::range<1>(1024 /*SHARED_SIZE_LIMIT*/), cgh); /* DPCT1101:22: 'SHARED_SIZE_LIMIT' expression was replaced with a value. Modify the code to use the original expression, provided in comments, if it is correct. */ sycl::local_accessor<uint, 1> s_val_acc_ct1( sycl::range<1>(1024 /*SHARED_SIZE_LIMIT*/), cgh); cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, blockCount) * sycl::range<3>(1, 1, threadCount), sycl::range<3>(1, 1, threadCount)), [=](sycl::nd_item<3> item_ct1) { oddEvenMergeSortShared(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, SHARED_SIZE_LIMIT, dir, item_ct1, s_key_acc_ct1.get_pointer(), s_val_acc_ct1.get_pointer()); }); }); for (uint size = 2 * SHARED_SIZE_LIMIT; size <= arrayLength; size <<= 1) for (unsigned stride = size / 2; stride > 0; stride >>= 1) { // Unlike with bitonic sort, combining bitonic merge steps with // stride = [SHARED_SIZE_LIMIT / 2 .. 1] seems to be impossible as there // are dependencies between data elements crossing the SHARED_SIZE_LIMIT // borders dpct::get_default_queue().parallel_for( sycl::nd_range<3>( sycl::range<3>(1, 1, (batchSize * arrayLength) / 512) * sycl::range<3>(1, 1, 256), sycl::range<3>(1, 1, 256)), [=](sycl::nd_item<3> item_ct1) { oddEvenMergeGlobal(d_DstKey, d_DstVal, d_DstKey, d_DstVal, arrayLength, size, stride, dir, item_ct1); }); } } return threadCount; }
cpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/Samples/2_Concepts_and_Techniques/sortingNetworks/main.cpp.dp.cpp
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * This sample implements bitonic sort and odd-even merge sort, algorithms * belonging to the class of sorting networks. * While generally subefficient on large sequences * compared to algorithms with better asymptotic algorithmic complexity * (i.e. merge sort or radix sort), may be the algorithms of choice for sorting * batches of short- or mid-sized arrays. * Refer to the excellent tutorial by H. W. Lang: * http://www.iti.fh-flensburg.de/lang/algorithmen/sortieren/networks/indexen.htm * * Victor Podlozhnyuk, 07/09/2009 */ // CUDA Runtime #include <sycl/sycl.hpp> #include <dpct/dpct.hpp> // Utilities and system includes #include <helper_cuda.h> #include <helper_timer.h> #include "sortingNetworks_common.h" //////////////////////////////////////////////////////////////////////////////// // Test driver //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) try { dpct::err0 error; printf("%s Starting...\n\n", argv[0]); printf("Starting up CUDA context...\n"); int dev = findCudaDevice(argc, (const char **)argv); uint *h_InputKey, *h_InputVal, *h_OutputKeyGPU, *h_OutputValGPU; uint *d_InputKey, *d_InputVal, *d_OutputKey, *d_OutputVal; StopWatchInterface *hTimer = NULL; const uint N = 1048576; const uint DIR = 0; const uint numValues = 65536; const uint numIterations = 1; printf("Allocating and initializing host arrays...\n\n"); sdkCreateTimer(&hTimer); h_InputKey = (uint *)malloc(N * sizeof(uint)); h_InputVal = (uint *)malloc(N * sizeof(uint)); h_OutputKeyGPU = (uint *)malloc(N * sizeof(uint)); h_OutputValGPU = (uint *)malloc(N * sizeof(uint)); srand(2001); for (uint i = 0; i < N; i++) { h_InputKey[i] = rand() % numValues; h_InputVal[i] = i; } printf("Allocating and initializing CUDA arrays...\n\n"); error = DPCT_CHECK_ERROR( d_InputKey = sycl::malloc_device<uint>(N, dpct::get_default_queue())); checkCudaErrors(error); error = DPCT_CHECK_ERROR( d_InputVal = sycl::malloc_device<uint>(N, dpct::get_default_queue())); checkCudaErrors(error); error = DPCT_CHECK_ERROR( d_OutputKey = sycl::malloc_device<uint>(N, dpct::get_default_queue())); checkCudaErrors(error); error = DPCT_CHECK_ERROR( d_OutputVal = sycl::malloc_device<uint>(N, dpct::get_default_queue())); checkCudaErrors(error); error = DPCT_CHECK_ERROR(dpct::get_default_queue() .memcpy(d_InputKey, h_InputKey, N * sizeof(uint)) .wait()); checkCudaErrors(error); error = DPCT_CHECK_ERROR(dpct::get_default_queue() .memcpy(d_InputVal, h_InputVal, N * sizeof(uint)) .wait()); checkCudaErrors(error); int flag = 1; printf("Running GPU bitonic sort (%u identical iterations)...\n\n", numIterations); for (uint arrayLength = 64; arrayLength <= N; arrayLength *= 2) { printf("Testing array length %u (%u arrays per batch)...\n", arrayLength, N / arrayLength); error = DPCT_CHECK_ERROR(dpct::get_current_device().queues_wait_and_throw()); checkCudaErrors(error); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); uint threadCount = 0; for (uint i = 0; i < numIterations; i++) threadCount = oddEvenMergeSort(d_OutputKey, d_OutputVal, d_InputKey, d_InputVal, N / arrayLength, arrayLength, DIR); error = DPCT_CHECK_ERROR(dpct::get_current_device().queues_wait_and_throw()); checkCudaErrors(error); sdkStopTimer(&hTimer); printf("Average time: %f ms\n\n", sdkGetTimerValue(&hTimer) / numIterations); if (arrayLength == N) { double dTimeSecs = 1.0e-3 * sdkGetTimerValue(&hTimer) / numIterations; printf( "sortingNetworks-bitonic, Throughput = %.4f MElements/s, Time = %.5f " "s, Size = %u elements, NumDevsUsed = %u, Workgroup = %u\n", (1.0e-6 * (double)arrayLength / dTimeSecs), dTimeSecs, arrayLength, 1, threadCount); } printf("\nValidating the results...\n"); printf("...reading back GPU results\n"); error = DPCT_CHECK_ERROR( dpct::get_default_queue() .memcpy(h_OutputKeyGPU, d_OutputKey, N * sizeof(uint)) .wait()); checkCudaErrors(error); error = DPCT_CHECK_ERROR( dpct::get_default_queue() .memcpy(h_OutputValGPU, d_OutputVal, N * sizeof(uint)) .wait()); checkCudaErrors(error); int keysFlag = validateSortedKeys(h_OutputKeyGPU, h_InputKey, N / arrayLength, arrayLength, numValues, DIR); int valuesFlag = validateValues(h_OutputKeyGPU, h_OutputValGPU, h_InputKey, N / arrayLength, arrayLength); flag = flag && keysFlag && valuesFlag; printf("\n"); } printf("Shutting down...\n"); sdkDeleteTimer(&hTimer); sycl::free(d_OutputVal, dpct::get_default_queue()); sycl::free(d_OutputKey, dpct::get_default_queue()); sycl::free(d_InputVal, dpct::get_default_queue()); sycl::free(d_InputKey, dpct::get_default_queue()); free(h_OutputValGPU); free(h_OutputKeyGPU); free(h_InputVal); free(h_InputKey); exit(flag ? EXIT_SUCCESS : EXIT_FAILURE); } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); }
cpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/ccl_utils.hpp
//==---- ccl_utils.hpp----------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_CCL_UTILS_HPP__ #define __DPCT_CCL_UTILS_HPP__ #include <sycl/sycl.hpp> #include <oneapi/ccl.hpp> #include <unordered_map> #include <memory> #include "device.hpp" namespace dpct { namespace ccl { namespace detail { /// Get stored kvs with specified kvs address. inline std::shared_ptr<oneapi::ccl::kvs> & get_kvs(const oneapi::ccl::kvs::address_type &addr) { struct hash { std::size_t operator()(const oneapi::ccl::kvs::address_type &in) const { return std::hash<std::string_view>()(std::string_view(in.data(), in.size())); } }; static std::unordered_map<oneapi::ccl::kvs::address_type, std::shared_ptr<oneapi::ccl::kvs>, hash> kvs_map; return kvs_map[addr]; } /// Help class to init ccl environment. class ccl_init_helper { public: ccl_init_helper() { oneapi::ccl::init(); } }; } // namespace detail /// Get concatenated library version as an integer. static inline int get_version() { oneapi::ccl::init(); auto ver = oneapi::ccl::get_library_version(); return ver.major * 10000 + ver.minor * 100 + ver.update; } /// Create main kvs and return its address. static inline oneapi::ccl::kvs::address_type create_kvs_address() { oneapi::ccl::init(); auto ptr = oneapi::ccl::create_main_kvs(); auto addr = ptr->get_address(); detail::get_kvs(addr) = ptr; return addr; } /// Get stored kvs with /p addr if exist. Otherwise, create kvs with /p addr. static inline std::shared_ptr<oneapi::ccl::kvs> create_kvs(const oneapi::ccl::kvs::address_type &addr) { oneapi::ccl::init(); auto &ptr = detail::get_kvs(addr); if (!ptr) ptr = oneapi::ccl::create_kvs(addr); return ptr; } /// dpct communicator extension class communicator_wrapper : public dpct::ccl::detail::ccl_init_helper { public: communicator_wrapper( int size, int rank, oneapi::ccl::kvs::address_type id, const oneapi::ccl::comm_attr &attr = oneapi::ccl::default_comm_attr) : _device_comm(oneapi::ccl::create_device( static_cast<sycl::device &>(dpct::get_current_device()))), _context_comm(oneapi::ccl::create_context(dpct::get_default_context())), _comm(oneapi::ccl::create_communicator( size, rank, _device_comm, _context_comm, dpct::ccl::create_kvs(id), attr)) { _queue_init = false; _ccl_stream_ptr = nullptr; } ~communicator_wrapper() { delete _ccl_stream_ptr; }; /// Return the rank in a oneapi::ccl::communicator /// \returns The rank corresponding to communicator object int rank() const { return _comm.rank(); } /// Retrieves the number of rank in oneapi::ccl::communicator /// \returns The number of the ranks int size() const { return _comm.size(); } /// Return underlying native device, which was used in oneapi::ccl::communicator sycl::device get_device() const { return _comm.get_device().get_native(); } /// \brief allreduce is a collective communication operation that performs the global reduction operation /// on values from all ranks of communicator and distributes the result back to all ranks. /// \param send_buf the buffer with @c count elements of @c dtype that stores local data to be reduced /// \param recv_buf [out] the buffer to store reduced result, must have the same dimension as @c send_buf /// \param count the number of elements of type @c dtype in @c send_buf and @c recv_buf /// \param dtype the datatype of elements in @c send_buf and @c recv_buf /// \param rtype the type of the reduction operation to be applied /// \param queue_ptr a sycl::queue ptr associated with the operation /// \return @ref void void allreduce(const void *sendbuff, void *recvbuff, size_t count, oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype, sycl::queue *queue_ptr) { call_func_wrapper( [=](const oneapi::ccl::stream &stream) { return oneapi::ccl::allreduce(sendbuff, recvbuff, count, dtype, rtype, _comm, stream); }, queue_ptr); } /// \brief reduce is a collective communication operation that performs the /// global reduction operation on values from all ranks of the communicator /// and returns the result to the root rank. /// \param send_buf the buffer with @c count elements of @c dtype that stores /// local data to be reduced /// \param recv_buf [out] the buffer to store reduced result, /// must have the same dimension as @c send_buf /// \param count the number of elements of type @c dtype in @c send_buf and @c recv_buf /// \param dtype the datatype of elements in @c send_buf and @c recv_buf /// \param root the rank that gets the result of reduction /// \param rtype the type of the reduction operation to be applied /// \param queue_ptr a sycl::queue ptr associated with the operation /// \return @ref void void reduce(const void *sendbuff, void *recvbuff, size_t count, oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype, int root, sycl::queue *queue_ptr) { call_func_wrapper( [=](const oneapi::ccl::stream &stream) { return oneapi::ccl::reduce(sendbuff, recvbuff, count, dtype, rtype, root, _comm, stream); }, queue_ptr); } /// \brief broadcast is a collective communication operation that broadcasts data /// from one rank of communicator (denoted as root) to all other ranks. /// Only support in-place operation /// \param send_buf the buffer with @c count elements of @c dtype that stores /// local data to be reduced /// \param recv_buf [out] the buffer to store reduced result /// \param count the number of elements of type @c dtype in @c buf /// \param dtype thedatatype of elements in @c buf /// \param root the rank that broadcasts @c buf /// \param queue_ptr a sycl::queue ptr associated with the operation /// \return @ref void void broadcast(void *sendbuff, void *recvbuff, size_t count, oneapi::ccl::datatype dtype, int root, sycl::queue *queue_ptr) { if (sendbuff != recvbuff) { throw std::runtime_error( "oneCCL broadcast only support in-place operation. " "send_buf and recv_buf must be same."); return; } call_func_wrapper( [=](const oneapi::ccl::stream &stream) { return oneapi::ccl::broadcast(recvbuff, count, dtype, root, _comm, stream); }, queue_ptr); } /// \brief reduce_scatter is a collective communication operation that performs the global reduction operation /// on values from all ranks of the communicator and scatters the result in blocks back to all ranks. /// \param send_buf the buffer with @c count elements of @c dtype that stores local data to be reduced /// \param recv_buf [out] the buffer to store reduced result, must have the same dimension as @c send_buf /// \param recv_count the number of elements of type @c dtype in receive block /// \param dtype the datatype of elements in @c send_buf and @c recv_buf /// \param rtype the type of the reduction operation to be applied /// \param queue_ptr a sycl::queue ptr associated with the operation /// \return @ref void void reduce_scatter(const void *sendbuff, void *recvbuff, size_t recv_count, oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype, sycl::queue *queue_ptr) { call_func_wrapper( [=](const oneapi::ccl::stream &stream) { return oneapi::ccl::reduce_scatter(sendbuff, recvbuff, recv_count, dtype, rtype, _comm, stream); }, queue_ptr); } private: oneapi::ccl::device _device_comm; oneapi::ccl::context _context_comm; oneapi::ccl::communicator _comm; sycl::queue _queue; bool _queue_init; oneapi::ccl::stream *_ccl_stream_ptr; template <class Fn> void call_func_wrapper(Fn func, sycl::queue *qptr) { if (_queue_init && *qptr != _queue) { call_func_async(func, qptr); } else { if(!_queue_init) { _queue = *qptr; _queue_init = true; _ccl_stream_ptr = new oneapi::ccl::stream(oneapi::ccl::create_stream(_queue)); } std::invoke(func, *_ccl_stream_ptr); } } class call_func_async { sycl::queue *_q_ptr; struct call_async_impl { oneapi::ccl::stream _ccl_stream_impl; oneapi::ccl::event _ccl_event_impl; template <class Fn> explicit call_async_impl(Fn func, sycl::queue *qptr) : _ccl_stream_impl(oneapi::ccl::create_stream(*qptr)), _ccl_event_impl(std::invoke(func, _ccl_stream_impl)) {} }; call_async_impl *_imp; public: template <class Fn> explicit call_func_async(Fn func, sycl::queue *qptr) : _q_ptr(qptr), _imp(new call_async_impl(func, qptr)) {} ~call_func_async() { _q_ptr->submit([&](sycl::handler &cgh) { cgh.host_task([=] { _imp->_ccl_event_impl.wait(); delete _imp; }); }); } }; }; typedef dpct::ccl::communicator_wrapper *comm_ptr; } // namespace ccl } // namespace dpct #endif // __DPCT_CCL_UTILS_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/util.hpp
//==---- util.hpp ---------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_UTIL_HPP__ #define __DPCT_UTIL_HPP__ #include <sycl/sycl.hpp> #include <complex> #include <type_traits> #include <cassert> #include <cstdint> // TODO: Remove these function definitions once they exist in the DPC++ compiler #if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER) template <typename T> __SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT __attribute__((noduplicate)) T __spirv_GroupNonUniformShuffle(__spv::Scope::Flag, T, unsigned) noexcept; template <typename T> __SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT __attribute__((noduplicate)) T __spirv_GroupNonUniformShuffleDown(__spv::Scope::Flag, T, unsigned) noexcept; template <typename T> __SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT __attribute__((noduplicate)) T __spirv_GroupNonUniformShuffleUp(__spv::Scope::Flag, T, unsigned) noexcept; #endif namespace dpct { namespace detail { template <typename tag, typename T> class generic_error_type { public: generic_error_type() = default; generic_error_type(T value) : value{value} {} operator T() const { return value; } private: T value; }; } // namespace detail using err0 = detail::generic_error_type<struct err0_tag, int>; using err1 = detail::generic_error_type<struct err1_tag, int>; template <int... Ints> struct integer_sequence {}; template <int Size, int... Ints> struct make_index_sequence : public make_index_sequence<Size - 1, Size - 1, Ints...> {}; template <int... Ints> struct make_index_sequence<0, Ints...> : public integer_sequence<Ints...> {}; template <typename T> struct DataType { using T2 = T; }; template <typename T> struct DataType<sycl::vec<T, 2>> { using T2 = std::complex<T>; }; inline void matrix_mem_copy(void *to_ptr, const void *from_ptr, int to_ld, int from_ld, int rows, int cols, int elem_size, memcpy_direction direction = automatic, sycl::queue &queue = dpct::get_default_queue(), bool async = false) { if (to_ptr == from_ptr && to_ld == from_ld) { return; } if (to_ld == from_ld) { size_t copy_size = elem_size * ((cols - 1) * (size_t)to_ld + rows); if (async) detail::dpct_memcpy(queue, (void *)to_ptr, (void *)from_ptr, copy_size, direction); else detail::dpct_memcpy(queue, (void *)to_ptr, (void *)from_ptr, copy_size, direction).wait(); } else { if (async) detail::dpct_memcpy(queue, to_ptr, from_ptr, elem_size * to_ld, elem_size * from_ld, elem_size * rows, cols, direction); else sycl::event::wait(detail::dpct_memcpy( queue, to_ptr, from_ptr, elem_size * to_ld, elem_size * from_ld, elem_size * rows, cols, direction)); } } /// Copy matrix data. The default leading dimension is column. /// \param [out] to_ptr A pointer points to the destination location. /// \param [in] from_ptr A pointer points to the source location. /// \param [in] to_ld The leading dimension the destination matrix. /// \param [in] from_ld The leading dimension the source matrix. /// \param [in] rows The number of rows of the source matrix. /// \param [in] cols The number of columns of the source matrix. /// \param [in] direction The direction of the data copy. /// \param [in] queue The queue where the routine should be executed. /// \param [in] async If this argument is true, the return of the function /// does NOT guarantee the copy is completed. template <typename T> inline void matrix_mem_copy(T *to_ptr, const T *from_ptr, int to_ld, int from_ld, int rows, int cols, memcpy_direction direction = automatic, sycl::queue &queue = dpct::get_default_queue(), bool async = false) { using Ty = typename DataType<T>::T2; matrix_mem_copy((void *)to_ptr, (void *)from_ptr, to_ld, from_ld, rows, cols, sizeof(Ty), direction, queue, async); } /// Cast the high or low 32 bits of a double to an integer. /// \param [in] d The double value. /// \param [in] use_high32 Cast the high 32 bits of the double if true; /// otherwise cast the low 32 bits. inline int cast_double_to_int(double d, bool use_high32 = true) { sycl::vec<double, 1> v0{d}; auto v1 = v0.as<sycl::int2>(); if (use_high32) return v1[1]; return v1[0]; } /// Combine two integers, the first as the high 32 bits and the second /// as the low 32 bits, into a double. /// \param [in] high32 The integer as the high 32 bits /// \param [in] low32 The integer as the low 32 bits inline double cast_ints_to_double(int high32, int low32) { sycl::int2 v0{low32, high32}; auto v1 = v0.as<sycl::vec<double, 1>>(); return v1; } /// Reverse the bit order of an unsigned integer /// \param [in] a Input unsigned integer value /// \returns Value of a with the bit order reversed template <typename T> inline T reverse_bits(T a) { static_assert(std::is_unsigned<T>::value && std::is_integral<T>::value, "unsigned integer required"); if (!a) return 0; T mask = 0; size_t count = 4 * sizeof(T); mask = ~mask >> count; while (count) { a = ((a & mask) << count) | ((a & ~mask) >> count); count = count >> 1; mask = mask ^ (mask << count); } return a; } /// \param [in] a The first value contains 4 bytes /// \param [in] b The second value contains 4 bytes /// \param [in] s The selector value, only lower 16bit used /// \returns the permutation result of 4 bytes selected in the way /// specified by \p s from \p a and \p b inline unsigned int byte_level_permute(unsigned int a, unsigned int b, unsigned int s) { unsigned int ret; ret = ((((std::uint64_t)b << 32 | a) >> (s & 0x7) * 8) & 0xff) | (((((std::uint64_t)b << 32 | a) >> ((s >> 4) & 0x7) * 8) & 0xff) << 8) | (((((std::uint64_t)b << 32 | a) >> ((s >> 8) & 0x7) * 8) & 0xff) << 16) | (((((std::uint64_t)b << 32 | a) >> ((s >> 12) & 0x7) * 8) & 0xff) << 24); return ret; } /// Find position of first least significant set bit in an integer. /// ffs(0) returns 0. /// /// \param [in] a Input integer value /// \returns The position template <typename T> inline int ffs(T a) { static_assert(std::is_integral<T>::value, "integer required"); return (sycl::ctz(a) + 1) % (sizeof(T) * 8 + 1); } /// select_from_sub_group allows work-items to obtain a copy of a value held by /// any other work-item in the sub_group. The input sub_group will be divided /// into several logical sub_groups with id range [0, \p logical_sub_group_size /// - 1]. Each work-item in logical sub_group gets value from another work-item /// whose id is \p remote_local_id. If \p remote_local_id is outside the /// logical sub_group id range, \p remote_local_id will modulo with \p /// logical_sub_group_size. The \p logical_sub_group_size must be a power of 2 /// and not exceed input sub_group size. /// \tparam T Input value type /// \param [in] g Input sub_group /// \param [in] x Input value /// \param [in] remote_local_id Input source work item id /// \param [in] logical_sub_group_size Input logical sub_group size /// \returns The result template <typename T> T select_from_sub_group(sycl::sub_group g, T x, int remote_local_id, int logical_sub_group_size = 32) { unsigned int start_index = g.get_local_linear_id() / logical_sub_group_size * logical_sub_group_size; return sycl::select_from_group( g, x, start_index + remote_local_id % logical_sub_group_size); } /// shift_sub_group_left move values held by the work-items in a sub_group /// directly to another work-item in the sub_group, by shifting values a fixed /// number of work-items to the left. The input sub_group will be divided into /// several logical sub_groups with id range [0, \p logical_sub_group_size - 1]. /// Each work-item in logical sub_group gets value from another work-item whose /// id is caller's id adds \p delta. If calculated id is outside the logical /// sub_group id range, the work-item will get value from itself. The \p /// logical_sub_group_size must be a power of 2 and not exceed input sub_group /// size. /// \tparam T Input value type /// \param [in] g Input sub_group /// \param [in] x Input value /// \param [in] delta Input delta /// \param [in] logical_sub_group_size Input logical sub_group size /// \returns The result template <typename T> T shift_sub_group_left(sycl::sub_group g, T x, unsigned int delta, int logical_sub_group_size = 32) { unsigned int id = g.get_local_linear_id(); unsigned int end_index = (id / logical_sub_group_size + 1) * logical_sub_group_size; T result = sycl::shift_group_left(g, x, delta); if ((id + delta) >= end_index) { result = x; } return result; } /// shift_sub_group_right move values held by the work-items in a sub_group /// directly to another work-item in the sub_group, by shifting values a fixed /// number of work-items to the right. The input sub_group will be divided into /// several logical_sub_groups with id range [0, \p logical_sub_group_size - 1]. /// Each work-item in logical_sub_group gets value from another work-item whose /// id is caller's id subtracts \p delta. If calculated id is outside the /// logical sub_group id range, the work-item will get value from itself. The \p /// logical_sub_group_size must be a power of 2 and not exceed input sub_group /// size. /// \tparam T Input value type /// \param [in] g Input sub_group /// \param [in] x Input value /// \param [in] delta Input delta /// \param [in] logical_sub_group_size Input logical sub_group size /// \returns The result template <typename T> T shift_sub_group_right(sycl::sub_group g, T x, unsigned int delta, int logical_sub_group_size = 32) { unsigned int id = g.get_local_linear_id(); unsigned int start_index = id / logical_sub_group_size * logical_sub_group_size; T result = sycl::shift_group_right(g, x, delta); if ((id - start_index) < delta) { result = x; } return result; } /// permute_sub_group_by_xor permutes values by exchanging values held by pairs /// of work-items identified by computing the bitwise exclusive OR of the /// work-item id and some fixed mask. The input sub_group will be divided into /// several logical sub_groups with id range [0, \p logical_sub_group_size - 1]. /// Each work-item in logical sub_group gets value from another work-item whose /// id is bitwise exclusive OR of the caller's id and \p mask. If calculated id /// is outside the logical sub_group id range, the work-item will get value from /// itself. The \p logical_sub_group_size must be a power of 2 and not exceed /// input sub_group size. /// \tparam T Input value type /// \param [in] g Input sub_group /// \param [in] x Input value /// \param [in] mask Input mask /// \param [in] logical_sub_group_size Input logical sub_group size /// \returns The result template <typename T> T permute_sub_group_by_xor(sycl::sub_group g, T x, unsigned int mask, int logical_sub_group_size = 32) { unsigned int id = g.get_local_linear_id(); unsigned int start_index = id / logical_sub_group_size * logical_sub_group_size; unsigned int target_offset = (id % logical_sub_group_size) ^ mask; return sycl::select_from_group(g, x, target_offset < logical_sub_group_size ? start_index + target_offset : id); } namespace experimental { /// Masked version of select_from_sub_group, which execute masked sub-group /// operation. The parameter member_mask indicating the work-items participating /// the call. Whether the n-th bit is set to 1 representing whether the /// work-item with id n is participating the call. All work-items named in /// member_mask must be executed with the same member_mask, or the result is /// undefined. /// \tparam T Input value type /// \param [in] member_mask Input mask /// \param [in] g Input sub_group /// \param [in] x Input value /// \param [in] remote_local_id Input source work item id /// \param [in] logical_sub_group_size Input logical sub_group size /// \returns The result template <typename T> T select_from_sub_group(unsigned int member_mask, sycl::sub_group g, T x, int remote_local_id, int logical_sub_group_size = 32) { unsigned int start_index = g.get_local_linear_id() / logical_sub_group_size * logical_sub_group_size; unsigned logical_remote_id = start_index + remote_local_id % logical_sub_group_size; #if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER) #if defined(__SPIR__) return __spirv_GroupNonUniformShuffle(__spv::Scope::Subgroup, x, logical_remote_id); #else throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group " "only supports SPIR-V backends."); #endif // __SPIR__ #else (void)g; (void)x; (void)remote_local_id; (void)logical_sub_group_size; (void)member_mask; throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not " "supported on host device and none intel compiler."); #endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER } /// Masked version of shift_sub_group_left, which execute masked sub-group /// operation. The parameter member_mask indicating the work-items participating /// the call. Whether the n-th bit is set to 1 representing whether the /// work-item with id n is participating the call. All work-items named in /// member_mask must be executed with the same member_mask, or the result is /// undefined. /// \tparam T Input value type /// \param [in] member_mask Input mask /// \param [in] g Input sub_group /// \param [in] x Input value /// \param [in] delta Input delta /// \param [in] logical_sub_group_size Input logical sub_group size /// \returns The result template <typename T> T shift_sub_group_left(unsigned int member_mask, sycl::sub_group g, T x, unsigned int delta, int logical_sub_group_size = 32) { unsigned int id = g.get_local_linear_id(); unsigned int end_index = (id / logical_sub_group_size + 1) * logical_sub_group_size; #if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER) #if defined(__SPIR__) T result = __spirv_GroupNonUniformShuffleDown(__spv::Scope::Subgroup, x, delta); if ((id + delta) >= end_index) { result = x; } return result; #else throw sycl::exception(sycl::errc::runtime, "Masked version of shift_sub_group_left " "only supports SPIR-V backends."); #endif // __SPIR__ #else (void)g; (void)x; (void)delta; (void)logical_sub_group_size; (void)member_mask; throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not " "supported on host device and none intel compiler."); #endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER } /// Masked version of shift_sub_group_right, which execute masked sub-group /// operation. The parameter member_mask indicating the work-items participating /// the call. Whether the n-th bit is set to 1 representing whether the /// work-item with id n is participating the call. All work-items named in /// member_mask must be executed with the same member_mask, or the result is /// undefined. /// \tparam T Input value type /// \param [in] member_mask Input mask /// \param [in] g Input sub_group /// \param [in] x Input value /// \param [in] delta Input delta /// \param [in] logical_sub_group_size Input logical sub_group size /// \returns The result template <typename T> T shift_sub_group_right(unsigned int member_mask, sycl::sub_group g, T x, unsigned int delta, int logical_sub_group_size = 32) { unsigned int id = g.get_local_linear_id(); unsigned int start_index = id / logical_sub_group_size * logical_sub_group_size; #if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER) #if defined(__SPIR__) T result = __spirv_GroupNonUniformShuffleUp(__spv::Scope::Subgroup, x, delta); if ((id - start_index) < delta) { result = x; } return result; #else throw sycl::exception(sycl::errc::runtime, "Masked version of shift_sub_group_right " "only supports SPIR-V backends."); #endif // __SPIR__ #else (void)g; (void)x; (void)delta; (void)logical_sub_group_size; (void)member_mask; throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not " "supported on host device and none intel compiler."); #endif // __SYCL_DEVICE_ONLY && __INTEL_LLVM_COMPILER } /// Masked version of permute_sub_group_by_xor, which execute masked sub-group /// operation. The parameter member_mask indicating the work-items participating /// the call. Whether the n-th bit is set to 1 representing whether the /// work-item with id n is participating the call. All work-items named in /// member_mask must be executed with the same member_mask, or the result is /// undefined. /// \tparam T Input value type /// \param [in] member_mask Input mask /// \param [in] g Input sub_group /// \param [in] x Input value /// \param [in] mask Input mask /// \param [in] logical_sub_group_size Input logical sub_group size /// \returns The result template <typename T> T permute_sub_group_by_xor(unsigned int member_mask, sycl::sub_group g, T x, unsigned int mask, int logical_sub_group_size = 32) { unsigned int id = g.get_local_linear_id(); unsigned int start_index = id / logical_sub_group_size * logical_sub_group_size; unsigned int target_offset = (id % logical_sub_group_size) ^ mask; unsigned logical_remote_id = (target_offset < logical_sub_group_size) ? start_index + target_offset : id; #if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER) #if defined(__SPIR__) return __spirv_GroupNonUniformShuffle(__spv::Scope::Subgroup, x, logical_remote_id); #else throw sycl::exception(sycl::errc::runtime, "Masked version of permute_sub_group_by_xor " "only supports SPIR-V backends."); #endif // __SPIR__ #else (void)g; (void)x; (void)mask; (void)logical_sub_group_size; (void)member_mask; throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not " "supported on host device and none intel compiler."); #endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER } } // namespace experimental /// Computes the multiplication of two complex numbers. /// \tparam T Complex element type /// \param [in] x The first input complex number /// \param [in] y The second input complex number /// \returns The result template <typename T> sycl::vec<T, 2> cmul(sycl::vec<T, 2> x, sycl::vec<T, 2> y) { std::complex<T> t1(x[0], x[1]), t2(y[0], y[1]); t1 = t1 * t2; return sycl::vec<T, 2>(t1.real(), t1.imag()); } /// Computes the division of two complex numbers. /// \tparam T Complex element type /// \param [in] x The first input complex number /// \param [in] y The second input complex number /// \returns The result template <typename T> sycl::vec<T, 2> cdiv(sycl::vec<T, 2> x, sycl::vec<T, 2> y) { std::complex<T> t1(x[0], x[1]), t2(y[0], y[1]); t1 = t1 / t2; return sycl::vec<T, 2>(t1.real(), t1.imag()); } /// Computes the magnitude of a complex number. /// \tparam T Complex element type /// \param [in] x The input complex number /// \returns The result template <typename T> T cabs(sycl::vec<T, 2> x) { std::complex<T> t(x[0], x[1]); return std::abs(t); } /// Computes the complex conjugate of a complex number. /// \tparam T Complex element type /// \param [in] x The input complex number /// \returns The result template <typename T> sycl::vec<T, 2> conj(sycl::vec<T, 2> x) { std::complex<T> t(x[0], x[1]); t = std::conj(t); return sycl::vec<T, 2>(t.real(), t.imag()); } inline int get_sycl_language_version() { #ifdef SYCL_LANGUAGE_VERSION return SYCL_LANGUAGE_VERSION; #else return 202000; #endif } namespace experimental { /// Synchronize work items from all work groups within a SYCL kernel. /// \param [in] item: Represents a work group. /// \param [in] counter: An atomic object defined on a device memory which can /// be accessed by work items in all work groups. The initial value of the /// counter should be zero. /// Note: Please make sure that all the work items of all work groups within /// a SYCL kernel can be scheduled actively at the same time on a device. template <int dimensions = 3> inline void nd_range_barrier(const sycl::nd_item<dimensions> &item, sycl::atomic_ref< unsigned int, sycl::memory_order::seq_cst, sycl::memory_scope::device, sycl::access::address_space::global_space> &counter) { static_assert(dimensions == 3, "dimensions must be 3."); unsigned int num_groups = item.get_group_range(2) * item.get_group_range(1) * item.get_group_range(0); item.barrier(); if (item.get_local_linear_id() == 0) { unsigned int inc = 1; unsigned int old_arrive = 0; bool is_group0 = (item.get_group(2) + item.get_group(1) + item.get_group(0) == 0); if (is_group0) { inc = 0x80000000 - (num_groups - 1); } old_arrive = counter.fetch_add(inc); // Synchronize all the work groups while (((old_arrive ^ counter.load()) & 0x80000000) == 0) ; } item.barrier(); } /// Synchronize work items from all work groups within a SYCL kernel. /// \param [in] item: Represents a work group. /// \param [in] counter: An atomic object defined on a device memory which can /// be accessed by work items in all work groups. The initial value of the /// counter should be zero. /// Note: Please make sure that all the work items of all work groups within /// a SYCL kernel can be scheduled actively at the same time on a device. template <> inline void nd_range_barrier(const sycl::nd_item<1> &item, sycl::atomic_ref< unsigned int, sycl::memory_order::seq_cst, sycl::memory_scope::device, sycl::access::address_space::global_space> &counter) { unsigned int num_groups = item.get_group_range(0); item.barrier(); if (item.get_local_linear_id() == 0) { unsigned int inc = 1; unsigned int old_arrive = 0; bool is_group0 = (item.get_group(0) == 0); if (is_group0) { inc = 0x80000000 - (num_groups - 1); } old_arrive = counter.fetch_add(inc); // Synchronize all the work groups while (((old_arrive ^ counter.load()) & 0x80000000) == 0) ; } item.barrier(); } /// The logical-group is a logical collection of some work-items within a /// work-group. /// Note: Please make sure that the logical-group size is a power of 2 in the /// range [1, current_sub_group_size]. class logical_group { sycl::nd_item<3> _item; sycl::group<3> _g; uint32_t _logical_group_size; uint32_t _group_linear_range_in_parent; public: /// Dividing \p parent_group into several logical-groups. /// \param [in] item Current work-item. /// \param [in] parent_group The group to be divided. /// \param [in] size The logical-group size. logical_group(sycl::nd_item<3> item, sycl::group<3> parent_group, uint32_t size) : _item(item), _g(parent_group), _logical_group_size(size) { _group_linear_range_in_parent = (_g.get_local_linear_range() - 1) / _logical_group_size + 1; } /// Returns the index of the work-item within the logical-group. uint32_t get_local_linear_id() const { return _item.get_local_linear_id() % _logical_group_size; } /// Returns the index of the logical-group in the parent group. uint32_t get_group_linear_id() const { return _item.get_local_linear_id() / _logical_group_size; } /// Returns the number of work-items in the logical-group. uint32_t get_local_linear_range() const { if (_g.get_local_linear_range() % _logical_group_size == 0) { return _logical_group_size; } uint32_t last_item_group_id = _g.get_local_linear_range() / _logical_group_size; uint32_t first_of_last_group = last_item_group_id * _logical_group_size; if (_item.get_local_linear_id() >= first_of_last_group) { return _g.get_local_linear_range() - first_of_last_group; } else { return _logical_group_size; } } /// Returns the number of logical-group in the parent group. uint32_t get_group_linear_range() const { return _group_linear_range_in_parent; } }; // The original source of the function calculate_max_active_wg_per_xecore was // under the license below: // // Copyright Intel Corporation // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // /// This function is used for occupancy calculation, it computes the max active /// work-group number per Xe-Core. Ref to /// https://github.com/oneapi-src/oneAPI-samples/tree/master/Tools/GPU-Occupancy-Calculator /// \param [out] num_wg Active work-group number. /// \param [in] wg_size Work-group size. /// \param [in] slm_size Share local memory size. /// \param [in] sg_size Sub-group size. /// \param [in] used_barrier Whether barrier is used. /// \param [in] used_large_grf Whether large General Register File is used. /// \return If no error, returns 0. /// If \p wg_size exceeds the max work-group size, the max work-group size will /// be used instead of \p wg_size and returns -1. inline int calculate_max_active_wg_per_xecore(int *num_wg, int wg_size, int slm_size = 0, int sg_size = 32, bool used_barrier = false, bool used_large_grf = false) { int ret = 0; const int slm_size_per_xe_core = 64 * 1024; const int max_barrier_registers = 32; dpct::device_ext &dev = dpct::get_current_device(); size_t max_wg_size = dev.get_info<sycl::info::device::max_work_group_size>(); if (wg_size > max_wg_size) { wg_size = max_wg_size; ret = -1; } int num_threads_ss = 56; int max_num_wg = 56; if (dev.has(sycl::aspect::ext_intel_gpu_eu_count_per_subslice) && dev.has(sycl::aspect::ext_intel_gpu_hw_threads_per_eu)) { auto eu_count = dev.get_info<sycl::info::device::ext_intel_gpu_eu_count_per_subslice>(); auto threads_count = dev.get_info<sycl::ext::intel::info::device::gpu_hw_threads_per_eu>(); num_threads_ss = eu_count * threads_count; max_num_wg = eu_count * threads_count; } if (used_barrier) { max_num_wg = max_barrier_registers; } // Calculate num_wg_slm int num_wg_slm = 0; if (slm_size == 0) { num_wg_slm = max_num_wg; } else { num_wg_slm = std::floor((float)slm_size_per_xe_core / slm_size); } // Calculate num_wg_threads if (used_large_grf) num_threads_ss = num_threads_ss / 2; int num_threads = std::ceil((float)wg_size / sg_size); int num_wg_threads = std::floor((float)num_threads_ss / num_threads); // Calculate num_wg *num_wg = std::min(num_wg_slm, num_wg_threads); *num_wg = std::min(*num_wg, max_num_wg); return ret; } } // namespace experimental /// If x <= 2, then return a pointer to the deafult queue; /// otherwise, return x reinterpreted as a dpct::queue_ptr. inline queue_ptr int_as_queue_ptr(uintptr_t x) { return x <= 2 ? &get_default_queue() : reinterpret_cast<queue_ptr>(x); } template <int n_nondefault_params, int n_default_params, typename T> class args_selector; /// args_selector is a helper class for extracting arguments from an /// array of pointers to arguments or buffer of arguments to pass to a /// kernel function. /// /// \param R(Ts...) The type of the kernel /// \param n_nondefault_params The number of nondefault parameters of the kernel /// (excluding parameters that like sycl::nd_item, etc.) /// \param n_default_params The number of default parameters of the kernel /// /// Example usage: /// With the following kernel: /// void foo(sycl::float2 *x, int n, sycl::nd_item<3> item_ct1, float f=.1) {} /// and with the declaration: /// args_selector<2, 1, decltype(foo)> selector(kernelParams, extra); /// we have: /// selector.get<0>() returns a reference to sycl::float*, /// selector.get<1>() returns a reference to int, /// selector.get<2>() returns a reference to float template <int n_nondefault_params, int n_default_params, typename R, typename... Ts> class args_selector<n_nondefault_params, n_default_params, R(Ts...)> { private: void **kernel_params; char *args_buffer; template <int i> static constexpr int account_for_default_params() { constexpr int n_total_params = sizeof...(Ts); if constexpr (i >= n_nondefault_params) { return n_total_params - n_default_params + (i - n_nondefault_params); } else { return i; } } public: /// Get the type of the ith argument of R(Ts...) /// \param [in] i Index of parameter to get /// \returns Type of ith parameter template <int i> using arg_type = std::tuple_element_t<account_for_default_params<i>(), std::tuple<Ts...>>; private: template <int i> static constexpr int get_offset() { if constexpr (i == 0) { // we can assume args_buffer is properly aligned to the // first argument return 0; } else { constexpr int prev_off = get_offset<i-1>(); constexpr int prev_past_end = prev_off + sizeof(arg_type<i-1>); using T = arg_type<i>; // is the past-the-end of the i-1st element properly aligned // with the ith element's alignment? if constexpr (prev_past_end % alignof(T) == 0) { return prev_past_end; } // otherwise bump prev_past_end to match alignment else { return prev_past_end + (alignof(T) - (prev_past_end % alignof(T))); } } } static char *get_args_buffer(void **extra) { if (!extra) return nullptr; for (; (std::size_t) *extra != 0; ++extra) { if ((std::size_t) *extra == 1) { return static_cast<char*>(*(extra+1)); } } return nullptr; } public: /// If kernel_params is nonnull, then args_selector will /// extract arguments from kernel_params. Otherwise, it /// will extract them from extra. /// \param [in] kernel_params Array of pointers to arguments /// a or null pointer. /// \param [in] extra Array containing pointer to argument buffer. args_selector(void **kernel_params, void **extra) : kernel_params(kernel_params), args_buffer(get_args_buffer(extra)) {} /// Get a reference to the ith argument extracted from kernel_params /// or extra. /// \param [in] i Index of argument to get /// \returns Reference to the ith argument template <int i> arg_type<i> &get() { if (kernel_params) { return *static_cast<arg_type<i>*>(kernel_params[i]); } else { return *reinterpret_cast<arg_type<i>*>(args_buffer + get_offset<i>()); } } }; #ifdef _WIN32 #define DPCT_EXPORT __declspec(dllexport) #else #define DPCT_EXPORT #endif } // namespace dpct #endif // __DPCT_UTIL_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/image.hpp
//==---- image.hpp --------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_IMAGE_HPP__ #define __DPCT_IMAGE_HPP__ #include <sycl/sycl.hpp> #include "memory.hpp" #include "util.hpp" namespace dpct { enum class image_channel_data_type { signed_int, unsigned_int, fp, }; class image_channel; class image_wrapper_base; namespace detail { /// Image object type traits, with accessor type and sampled data type defined. /// The data type of an image accessor must be one of sycl::int4, sycl::uint4, /// sycl::float4 and sycl::half4. The data type of accessors with 8bits/16bits /// channel width will be 32 bits. sycl::half is an exception. template <class T> struct image_trait { using acc_data_t = sycl::vec<T, 4>; template <int dimensions> using accessor_t = sycl::accessor<acc_data_t, dimensions, sycl::access_mode::read, sycl::access::target::image>; template <int dimensions> using array_accessor_t = sycl::accessor<acc_data_t, dimensions, sycl::access_mode::read, sycl::access::target::image_array>; using data_t = T; using elem_t = T; static constexpr image_channel_data_type data_type = std::is_integral<T>::value ? (std::is_signed<T>::value ? image_channel_data_type::signed_int : image_channel_data_type::unsigned_int) : image_channel_data_type::fp; static constexpr int channel_num = 1; }; template <> struct image_trait<std::uint8_t> : public image_trait<std::uint32_t> { using data_t = std::uint8_t; using elem_t = data_t; }; template <> struct image_trait<std::uint16_t> : public image_trait<std::uint32_t> { using data_t = std::uint16_t; using elem_t = data_t; }; template <> struct image_trait<std::int8_t> : public image_trait<std::int32_t> { using data_t = std::int8_t; using elem_t = data_t; }; template <> struct image_trait<std::int16_t> : public image_trait<std::int32_t> { using data_t = std::int16_t; using elem_t = data_t; }; template <> struct image_trait<char> : public image_trait<typename std::conditional< std::is_signed<char>::value, signed char, unsigned char>::type> {}; template <class T> struct image_trait<sycl::vec<T, 1>> : public image_trait<T> {}; template <class T> struct image_trait<sycl::vec<T, 2>> : public image_trait<T> { using data_t = sycl::vec<T, 2>; static constexpr int channel_num = 2; }; template <class T> struct image_trait<sycl::vec<T, 3>> : public image_trait<sycl::vec<T, 4>> { static constexpr int channel_num = 3; }; template <class T> struct image_trait<sycl::vec<T, 4>> : public image_trait<T> { using data_t = sycl::vec<T, 4>; static constexpr int channel_num = 4; }; /// Functor to fetch data from read result of an image accessor. template <class T> struct fetch_data { using return_t = typename image_trait<T>::data_t; using acc_data_t = typename image_trait<T>::acc_data_t; return_t operator()(acc_data_t &&original_data) { return (return_t)original_data.r(); } }; template <class T> struct fetch_data<sycl::vec<T, 1>> : public fetch_data<T> {}; template <class T> struct fetch_data<sycl::vec<T, 2>> { using return_t = typename image_trait<sycl::vec<T, 2>>::data_t; using acc_data_t = typename image_trait<sycl::vec<T, 2>>::acc_data_t; return_t operator()(acc_data_t &&origin_data) { return return_t(origin_data.r(), origin_data.g()); } }; template <class T> struct fetch_data<sycl::vec<T, 3>> : public fetch_data<sycl::vec<T, 4>> {}; template <class T> struct fetch_data<sycl::vec<T, 4>> { using return_t = typename image_trait<sycl::vec<T, 4>>::data_t; using acc_data_t = typename image_trait<sycl::vec<T, 4>>::acc_data_t; return_t operator()(acc_data_t &&origin_data) { return return_t(origin_data.r(), origin_data.g(), origin_data.b(), origin_data.a()); } }; /// Create image according with given type \p T and \p dims. template <class T> static image_wrapper_base *create_image_wrapper(int dims); /// Create image with given data type \p T, channel order and dims template <class T> static image_wrapper_base *create_image_wrapper(unsigned channel_num, int dims); /// Create image with channel info and specified dimensions. static image_wrapper_base *create_image_wrapper(image_channel channel, int dims); } // namespace detail /// Image channel info, include channel number, order, data width and type class image_channel { image_channel_data_type _type = image_channel_data_type::signed_int; /// Number of channels. unsigned _channel_num = 0; /// Total size of all channels in bytes. unsigned _total_size = 0; /// Size of each channel in bytes. unsigned _channel_size = 0; public: /// Create image channel info according to template argument \p T. template <class T> static image_channel create() { image_channel channel; channel.set_channel_size(detail::image_trait<T>::channel_num, sizeof(typename detail::image_trait<T>::elem_t) * 8); channel.set_channel_data_type(detail::image_trait<T>::data_type); return channel; } image_channel() = default; image_channel_data_type get_channel_data_type() { return _type; } void set_channel_data_type(image_channel_data_type type) { _type = type; } unsigned get_total_size() { return _total_size; } unsigned get_channel_num() { return _channel_num; } void set_channel_num(unsigned channel_num) { _channel_num = channel_num; _total_size = _channel_size * _channel_num; } /// image_channel constructor. /// \param r Channel r width in bits. /// \param g Channel g width in bits. Should be same with \p r, or zero. /// \param b Channel b width in bits. Should be same with \p g, or zero. /// \param a Channel a width in bits. Should be same with \p b, or zero. /// \param data_type Image channel data type: signed_nt, unsigned_int or fp. image_channel(int r, int g, int b, int a, image_channel_data_type data_type) { _type = data_type; if (a) { assert(r == a && "SYCL doesn't support different channel size"); assert(r == b && "SYCL doesn't support different channel size"); assert(r == g && "SYCL doesn't support different channel size"); set_channel_size(4, a); } else if (b) { assert(r == b && "SYCL doesn't support different channel size"); assert(r == g && "SYCL doesn't support different channel size"); set_channel_size(3, b); } else if (g) { assert(r == g && "SYCL doesn't support different channel size"); set_channel_size(2, g); } else { set_channel_size(1, r); } } sycl::image_channel_type get_channel_type() const { if (_channel_size == 4) { if (_type == image_channel_data_type::signed_int) return sycl::image_channel_type::signed_int32; else if (_type == image_channel_data_type::unsigned_int) return sycl::image_channel_type::unsigned_int32; else if (_type == image_channel_data_type::fp) return sycl::image_channel_type::fp32; } else if (_channel_size == 2) { if (_type == image_channel_data_type::signed_int) return sycl::image_channel_type::signed_int16; else if (_type == image_channel_data_type::unsigned_int) return sycl::image_channel_type::unsigned_int16; else if (_type == image_channel_data_type::fp) return sycl::image_channel_type::fp16; } else { if (_type == image_channel_data_type::signed_int) return sycl::image_channel_type::signed_int8; else if (_type == image_channel_data_type::unsigned_int) return sycl::image_channel_type::unsigned_int8; } assert(false && "unexpected channel data kind and channel size"); return sycl::image_channel_type::signed_int32; } void set_channel_type(sycl::image_channel_type type) { switch (type) { case sycl::image_channel_type::unsigned_int8: _type = image_channel_data_type::unsigned_int; _channel_size = 1; break; case sycl::image_channel_type::unsigned_int16: _type = image_channel_data_type::unsigned_int; _channel_size = 2; break; case sycl::image_channel_type::unsigned_int32: _type = image_channel_data_type::unsigned_int; _channel_size = 4; break; case sycl::image_channel_type::signed_int8: _type = image_channel_data_type::signed_int; _channel_size = 1; break; case sycl::image_channel_type::signed_int16: _type = image_channel_data_type::signed_int; _channel_size = 2; break; case sycl::image_channel_type::signed_int32: _type = image_channel_data_type::signed_int; _channel_size = 4; break; case sycl::image_channel_type::fp16: _type = image_channel_data_type::fp; _channel_size = 2; break; case sycl::image_channel_type::fp32: _type = image_channel_data_type::fp; _channel_size = 4; break; default: break; } _total_size = _channel_size * _channel_num; } sycl::image_channel_order get_channel_order() const { switch (_channel_num) { case 1: return sycl::image_channel_order::r; case 2: return sycl::image_channel_order::rg; case 3: return sycl::image_channel_order::rgb; case 4: return sycl::image_channel_order::rgba; default: return sycl::image_channel_order::r; } } /// Get the size for each channel in bits. unsigned get_channel_size() const { return _channel_size * 8; } /// Set channel size. /// \param in_channel_num Channels number to set. /// \param channel_size Size for each channel in bits. void set_channel_size(unsigned in_channel_num, unsigned channel_size) { if (in_channel_num < _channel_num) return; _channel_num = in_channel_num; _channel_size = channel_size / 8; _total_size = _channel_size * _channel_num; } }; /// 2D or 3D matrix data for image. class image_matrix { image_channel _channel; int _range[3] = {1, 1, 1}; int _dims = 0; void *_host_data = nullptr; /// Set range of each dimension. template <int dimensions> void set_range(sycl::range<dimensions> range) { for (int i = 0; i < dimensions; ++i) _range[i] = range[i]; _dims = dimensions; } template <int... DimIdx> sycl::range<sizeof...(DimIdx)> get_range(integer_sequence<DimIdx...>) { return sycl::range<sizeof...(DimIdx)>(_range[DimIdx]...); } public: /// Constructor with channel info and dimension size info. template <int dimensions> image_matrix(image_channel channel, sycl::range<dimensions> range) : _channel(channel) { set_range(range); _host_data = std::malloc(range.size() * _channel.get_total_size()); } image_matrix(sycl::image_channel_type channel_type, unsigned channel_num, size_t x, size_t y) { _channel.set_channel_type(channel_type); _channel.set_channel_num(channel_num); _dims = 1; _range[0] = x; if (y) { _dims = 2; _range[1] = y; } _host_data = std::malloc(_range[0] * _range[1] * _channel.get_total_size()); } /// Construct a new image class with the matrix data. template <int dimensions> sycl::image<dimensions> *create_image() { return create_image<dimensions>(_channel); } /// Construct a new image class with the matrix data. template <int dimensions> sycl::image<dimensions> *create_image(image_channel channel) { return new sycl::image<dimensions>( _host_data, channel.get_channel_order(), channel.get_channel_type(), get_range(make_index_sequence<dimensions>()), sycl::property::image::use_host_ptr()); } /// Get channel info. inline image_channel get_channel() { return _channel; } /// Get range of the image. sycl::range<3> get_range() { return sycl::range<3>(_range[0], _range[1], _range[2]); } /// Get matrix dims. inline int get_dims() { return _dims; } /// Convert to pitched data. pitched_data to_pitched_data() { return pitched_data(_host_data, _range[0], _range[0], _range[1]); } ~image_matrix() { if (_host_data) std::free(_host_data); _host_data = nullptr; } }; using image_matrix_p = image_matrix *; enum class image_data_type { matrix, linear, pitch, unsupport }; /// Image data info. class image_data { public: image_data() { _type = image_data_type::unsupport; } image_data(image_matrix_p matrix_data) { set_data(matrix_data); } image_data(void *data_ptr, size_t x_size, image_channel channel) { set_data(data_ptr, x_size, channel); } image_data(void *data_ptr, size_t x_size, size_t y_size, size_t pitch_size, image_channel channel) { set_data(data_ptr, x_size, y_size, pitch_size, channel); } void set_data(image_matrix_p matrix_data) { _type = image_data_type::matrix; _data = matrix_data; _channel = matrix_data->get_channel(); } void set_data(void *data_ptr, size_t x_size, image_channel channel) { _type = image_data_type::linear; _data = data_ptr; _x = x_size; _channel = channel; } void set_data(void *data_ptr, size_t x_size, size_t y_size, size_t pitch_size, image_channel channel) { _type = image_data_type::pitch; _data = data_ptr; _x = x_size; _y = y_size; _pitch = pitch_size; _channel = channel; } image_data_type get_data_type() const { return _type; } void set_data_type(image_data_type type) { _type = type; } void *get_data_ptr() const { return _data; } void set_data_ptr(void *data) { _data = data; } size_t get_x() const { return _x; } void set_x(size_t x) { _x = x; } size_t get_y() const { return _y; } void set_y(size_t y) { _y = y; } size_t get_pitch() const { return _pitch; } void set_pitch(size_t pitch) { _pitch = pitch; } image_channel get_channel() const { return _channel; } void set_channel(image_channel channel) { _channel = channel; } image_channel_data_type get_channel_data_type() { return _channel.get_channel_data_type(); } void set_channel_data_type(image_channel_data_type type) { _channel.set_channel_data_type(type); } unsigned get_channel_size() { return _channel.get_channel_size(); } void set_channel_size(unsigned channel_num, unsigned channel_size) { return _channel.set_channel_size(channel_num, channel_size); } unsigned get_channel_num() { return _channel.get_channel_num(); } void set_channel_num(unsigned num) { return _channel.set_channel_num(num); } sycl::image_channel_type get_channel_type() { return _channel.get_channel_type(); } void set_channel_type(sycl::image_channel_type type) { return _channel.set_channel_type(type); } private: image_data_type _type; void *_data = nullptr; size_t _x, _y, _pitch; image_channel _channel; }; /// Image sampling info, include addressing mode, filtering mode and /// normalization info. class sampling_info { sycl::addressing_mode _addressing_mode = sycl::addressing_mode::clamp_to_edge; sycl::filtering_mode _filtering_mode = sycl::filtering_mode::nearest; sycl::coordinate_normalization_mode _coordinate_normalization_mode = sycl::coordinate_normalization_mode::unnormalized; public: sycl::addressing_mode get_addressing_mode() { return _addressing_mode; } void set(sycl::addressing_mode addressing_mode) { _addressing_mode = addressing_mode; } sycl::filtering_mode get_filtering_mode() { return _filtering_mode; } void set(sycl::filtering_mode filtering_mode) { _filtering_mode = filtering_mode; } sycl::coordinate_normalization_mode get_coordinate_normalization_mode() { return _coordinate_normalization_mode; } void set(sycl::coordinate_normalization_mode coordinate_normalization_mode) { _coordinate_normalization_mode = coordinate_normalization_mode; } bool is_coordinate_normalized() { return _coordinate_normalization_mode == sycl::coordinate_normalization_mode::normalized; } void set_coordinate_normalization_mode(int is_normalized) { _coordinate_normalization_mode = is_normalized ? sycl::coordinate_normalization_mode::normalized : sycl::coordinate_normalization_mode::unnormalized; } void set(sycl::addressing_mode addressing_mode, sycl::filtering_mode filtering_mode, sycl::coordinate_normalization_mode coordinate_normalization_mode) { set(addressing_mode); set(filtering_mode); set(coordinate_normalization_mode); } void set(sycl::addressing_mode addressing_mode, sycl::filtering_mode filtering_mode, int is_normalized) { set(addressing_mode); set(filtering_mode); set_coordinate_normalization_mode(is_normalized); } sycl::sampler get_sampler() { return sycl::sampler(_coordinate_normalization_mode, _addressing_mode, _filtering_mode); } }; /// Image base class. class image_wrapper_base { sampling_info _sampling_info; image_data _data; public: virtual ~image_wrapper_base() = 0; void attach(image_data data) { set_data(data); } /// Attach matrix data to this class. void attach(image_matrix *matrix) { detach(); image_wrapper_base::set_data(image_data(matrix)); } /// Attach matrix data to this class. void attach(image_matrix *matrix, image_channel channel) { attach(matrix); image_wrapper_base::set_channel(channel); } /// Attach linear data to this class. void attach(const void *ptr, size_t count) { attach(ptr, count, get_channel()); } /// Attach linear data to this class. void attach(const void *ptr, size_t count, image_channel channel) { detach(); image_wrapper_base::set_data(image_data(const_cast<void *>(ptr), count, channel)); } /// Attach 2D data to this class. void attach(const void *data, size_t x, size_t y, size_t pitch) { attach(data, x, y, pitch, get_channel()); } /// Attach 2D data to this class. void attach(const void *data, size_t x, size_t y, size_t pitch, image_channel channel) { detach(); image_wrapper_base::set_data( image_data(const_cast<void *>(data), x, y, pitch, channel)); } /// Detach data. virtual void detach() {} sampling_info get_sampling_info() { return _sampling_info; } void set_sampling_info(sampling_info info) { _sampling_info = info; } const image_data &get_data() { return _data; } void set_data(image_data data) { _data = data; } image_channel get_channel() { return _data.get_channel(); } void set_channel(image_channel channel) { _data.set_channel(channel); } image_channel_data_type get_channel_data_type() { return _data.get_channel_data_type(); } void set_channel_data_type(image_channel_data_type type) { _data.set_channel_data_type(type); } unsigned get_channel_size() { return _data.get_channel_size(); } void set_channel_size(unsigned channel_num, unsigned channel_size) { return _data.set_channel_size(channel_num, channel_size); } sycl::addressing_mode get_addressing_mode() { return _sampling_info.get_addressing_mode(); } void set(sycl::addressing_mode addressing_mode) { _sampling_info.set(addressing_mode); } sycl::filtering_mode get_filtering_mode() { return _sampling_info.get_filtering_mode(); } void set(sycl::filtering_mode filtering_mode) { _sampling_info.set(filtering_mode); } sycl::coordinate_normalization_mode get_coordinate_normalization_mode() { return _sampling_info.get_coordinate_normalization_mode(); } void set(sycl::coordinate_normalization_mode coordinate_normalization_mode) { _sampling_info.set(coordinate_normalization_mode); } bool is_coordinate_normalized() { return _sampling_info.is_coordinate_normalized(); } void set_coordinate_normalization_mode(int is_normalized) { _sampling_info.set_coordinate_normalization_mode(is_normalized); } void set(sycl::addressing_mode addressing_mode, sycl::filtering_mode filtering_mode, sycl::coordinate_normalization_mode coordinate_normalization_mode) { set(addressing_mode); set(filtering_mode); set(coordinate_normalization_mode); } void set(sycl::addressing_mode addressing_mode, sycl::filtering_mode filtering_mode, int is_normalized) { set(addressing_mode); set(filtering_mode); set_coordinate_normalization_mode(is_normalized); } unsigned get_channel_num() { return _data.get_channel_num(); } void set_channel_num(unsigned num) { return _data.set_channel_num(num); } sycl::image_channel_type get_channel_type() { return _data.get_channel_type(); } void set_channel_type(sycl::image_channel_type type) { return _data.set_channel_type(type); } sycl::sampler get_sampler() { return _sampling_info.get_sampler(); } }; inline image_wrapper_base::~image_wrapper_base() {} using image_wrapper_base_p = image_wrapper_base *; template <class T, int dimensions, bool IsImageArray> class image_accessor_ext; /// Image class, wrapper of sycl::image. template <class T, int dimensions, bool IsImageArray = false> class image_wrapper : public image_wrapper_base { sycl::image<dimensions> *_image = nullptr; #ifndef DPCT_USM_LEVEL_NONE std::vector<char> _host_buffer; #endif void create_image(sycl::queue q) { auto &data = get_data(); if (data.get_data_type() == image_data_type::matrix) { _image = static_cast<image_matrix_p>(data.get_data_ptr()) ->create_image<dimensions>(data.get_channel()); return; } auto ptr = data.get_data_ptr(); auto channel = data.get_channel(); if (detail::get_pointer_attribute(q, ptr) == detail::pointer_access_attribute::device_only) { #ifdef DPCT_USM_LEVEL_NONE ptr = get_buffer(ptr) .template get_access<sycl::access_mode::read_write>() .get_pointer(); #else auto sz = data.get_x(); if (data.get_data_type() == image_data_type::pitch) sz *= channel.get_total_size() * data.get_y(); _host_buffer.resize(sz); q.memcpy(_host_buffer.data(), ptr, sz).wait(); ptr = _host_buffer.data(); #endif } if constexpr (dimensions == 1) { assert(data.get_data_type() == image_data_type::linear); _image = new sycl::image<1>( ptr, channel.get_channel_order(), channel.get_channel_type(), sycl::range<1>(data.get_x() / channel.get_total_size())); } else if constexpr (dimensions == 2) { assert(data.get_data_type() == image_data_type::pitch); _image = new sycl::image<2>(ptr, channel.get_channel_order(), channel.get_channel_type(), sycl::range<2>(data.get_x(), data.get_y()), sycl::range<1>(data.get_pitch())); } else { throw std::runtime_error("3D image only support matrix data"); } return; } public: using acc_data_t = typename detail::image_trait<T>::acc_data_t; using accessor_t = typename image_accessor_ext<T, IsImageArray ? (dimensions - 1) : dimensions, IsImageArray>::accessor_t; image_wrapper() { set_channel(image_channel::create<T>()); } ~image_wrapper() { detach(); } /// Get image accessor. accessor_t get_access(sycl::handler &cgh, sycl::queue &q = get_default_queue()) { if (!_image) create_image(q); return accessor_t(*_image, cgh); } /// Detach data. void detach() override { if (_image) delete _image; _image = nullptr; } }; /// Wrap sampler and image accessor together. template <class T, int dimensions, bool IsImageArray = false> class image_accessor_ext { public: using accessor_t = typename detail::image_trait<T>::template accessor_t<dimensions>; using data_t = typename detail::image_trait<T>::data_t; sycl::sampler _sampler; accessor_t _img_acc; public: image_accessor_ext(sycl::sampler sampler, accessor_t acc) : _sampler(sampler), _img_acc(acc) {} /// Read data from accessor. template <bool Available = dimensions == 3> typename std::enable_if<Available, data_t>::type read(float x, float y, float z) { return detail::fetch_data<T>()( _img_acc.read(sycl::float4(x, y, z, 0), _sampler)); } /// Read data from accessor. template <class Coord0, class Coord1, class Coord2, bool Available = dimensions == 3 && std::is_integral<Coord0>::value &&std::is_integral<Coord1>::value &&std::is_integral<Coord2>::value> typename std::enable_if<Available, data_t>::type read(Coord0 x, Coord1 y, Coord2 z) { return detail::fetch_data<T>()( _img_acc.read(sycl::int4(x, y, z, 0), _sampler)); } /// Read data from accessor. template <bool Available = dimensions == 2> typename std::enable_if<Available, data_t>::type read(float x, float y) { return detail::fetch_data<T>()( _img_acc.read(sycl::float2(x, y), _sampler)); } /// Read data from accessor. template <class Coord0, class Coord1, bool Available = dimensions == 2 && std::is_integral<Coord0>::value &&std::is_integral<Coord1>::value> typename std::enable_if<Available, data_t>::type read(Coord0 x, Coord1 y) { return detail::fetch_data<T>()( _img_acc.read(sycl::int2(x, y), _sampler)); } /// Read data from accessor. template <bool Available = dimensions == 1> typename std::enable_if<Available, data_t>::type read(float x) { return detail::fetch_data<T>()(_img_acc.read(x, _sampler)); } /// Read data from accessor. template <class CoordT, bool Available = dimensions == 1 && std::is_integral<CoordT>::value> typename std::enable_if<Available, data_t>::type read(CoordT x) { return detail::fetch_data<T>()(_img_acc.read(x, _sampler)); } }; template <class T, int dimensions> class image_accessor_ext<T, dimensions, true> { public: using accessor_t = typename detail::image_trait<T>::template array_accessor_t<dimensions>; using data_t = typename detail::image_trait<T>::data_t; sycl::sampler _sampler; accessor_t _img_acc; public: image_accessor_ext(sycl::sampler sampler, accessor_t acc) : _sampler(sampler), _img_acc(acc) {} /// Read data from accessor. template <bool Available = dimensions == 2> typename std::enable_if<Available, data_t>::type read(int index, float x, float y) { return detail::fetch_data<T>()( _img_acc[index].read(sycl::float2(x, y), _sampler)); } /// Read data from accessor. template <bool Available = dimensions == 2> typename std::enable_if<Available, data_t>::type read(int index, int x, int y) { return detail::fetch_data<T>()( _img_acc[index].read(sycl::int2(x, y), _sampler)); } /// Read data from accessor. template <bool Available = dimensions == 1> typename std::enable_if<Available, data_t>::type read(int index, float x) { return detail::fetch_data<T>()( _img_acc[index].read(x, _sampler)); } /// Read data from accessor. template <bool Available = dimensions == 1> typename std::enable_if<Available, data_t>::type read(int index, int x) { return detail::fetch_data<T>()( _img_acc[index].read(x, _sampler)); } }; /// Create image wrapper according to image data and sampling info. /// \return Pointer to image wrapper base class. /// \param data Image data used to create image wrapper. /// \param info Image sampling info used to create image wrapper. /// \returns Pointer to base class of created image wrapper object. static inline image_wrapper_base *create_image_wrapper(image_data data, sampling_info info) { image_channel channel; int dims = 1; if (data.get_data_type() == image_data_type::matrix) { auto matrix = (image_matrix_p)data.get_data_ptr(); channel = matrix->get_channel(); dims = matrix->get_dims(); } else { if (data.get_data_type() == image_data_type::pitch) { dims = 2; } channel = data.get_channel(); } if (auto ret = detail::create_image_wrapper(channel, dims)) { ret->set_sampling_info(info); ret->set_data(data); return ret; } return nullptr; } namespace detail { /// Create image according with given type \p T and \p dims. template <class T> static image_wrapper_base *create_image_wrapper(int dims) { switch (dims) { case 1: return new image_wrapper<T, 1>(); case 2: return new image_wrapper<T, 2>(); case 3: return new image_wrapper<T, 3>(); default: return nullptr; } } /// Create image with given data type \p T, channel order and dims template <class T> static image_wrapper_base *create_image_wrapper(unsigned channel_num, int dims) { switch (channel_num) { case 1: return create_image_wrapper<T>(dims); case 2: return create_image_wrapper<sycl::vec<T, 2>>(dims); case 3: return create_image_wrapper<sycl::vec<T, 3>>(dims); case 4: return create_image_wrapper<sycl::vec<T, 4>>(dims); default: return nullptr; } } /// Create image with channel info and specified dimensions. static image_wrapper_base *create_image_wrapper(image_channel channel, int dims) { switch (channel.get_channel_type()) { case sycl::image_channel_type::fp16: return create_image_wrapper<sycl::half>(channel.get_channel_num(), dims); case sycl::image_channel_type::fp32: return create_image_wrapper<float>(channel.get_channel_num(), dims); case sycl::image_channel_type::signed_int8: return create_image_wrapper<std::int8_t>(channel.get_channel_num(), dims); case sycl::image_channel_type::signed_int16: return create_image_wrapper<std::int16_t>(channel.get_channel_num(), dims); case sycl::image_channel_type::signed_int32: return create_image_wrapper<std::int32_t>(channel.get_channel_num(), dims); case sycl::image_channel_type::unsigned_int8: return create_image_wrapper<std::uint8_t>(channel.get_channel_num(), dims); case sycl::image_channel_type::unsigned_int16: return create_image_wrapper<std::uint16_t>(channel.get_channel_num(), dims); case sycl::image_channel_type::unsigned_int32: return create_image_wrapper<std::uint32_t>(channel.get_channel_num(), dims); default: return nullptr; } } } // namespace detail } // namespace dpct #endif // !__DPCT_IMAGE_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/kernel.hpp
//==---- kernel.hpp -------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_KERNEL_HPP__ #define __DPCT_KERNEL_HPP__ #include <sycl/sycl.hpp> #ifdef _WIN32 #include <unordered_set> #include <windows.h> #else #include <dlfcn.h> #endif #if defined(__has_include) && __has_include(<filesystem>) #include <filesystem> #elif defined(__has_include) && __has_include(<experimental/filesystem>) #include <experimental/filesystem> #else #error "SYCLomatic runtime requires C++ filesystem support" #endif #include <random> #include <image.hpp> namespace dpct { typedef void (*kernel_functor)(sycl::queue &, const sycl::nd_range<3> &, unsigned int, void **, void **); struct kernel_function_info { int max_work_group_size = 0; }; static inline void get_kernel_function_info(kernel_function_info *kernel_info, const void *function) { kernel_info->max_work_group_size = dpct::dev_mgr::instance() .current_device() .get_info<sycl::info::device::max_work_group_size>(); } static inline kernel_function_info get_kernel_function_info(const void *function) { kernel_function_info kernel_info; kernel_info.max_work_group_size = dpct::dev_mgr::instance() .current_device() .get_info<sycl::info::device::max_work_group_size>(); return kernel_info; } namespace detail { #if defined(__has_include) && __has_include(<filesystem>) namespace fs = std::filesystem; #else namespace fs = std::experimental::filesystem; #endif /// Write data to temporary file and return absolute path to temporary file. /// Temporary file is created in a temporary directory both of which have random /// names with only the user having access permissions. Only one temporary file /// will be created in the temporary directory. static inline fs::path write_data_to_file(char const *const data, size_t size) { std::error_code ec; if (sizeof(size_t) >= sizeof(std::streamsize) && size > (std::numeric_limits<std::streamsize>::max)()) throw std::runtime_error("data file too large"); // random number generator std::random_device dev; std::mt19937 prng(dev()); std::uniform_int_distribution<uint64_t> rand(0); // find temporary directory auto tmp_dir = fs::temp_directory_path(ec); if (ec) throw std::runtime_error("could not find temporary directory"); // create private directory std::stringstream directory; fs::path directory_path; constexpr int max_attempts = 5; int i; for (i = 0; i < max_attempts; i++) { directory << std::hex << rand(prng); directory_path = tmp_dir / directory.str(); if (fs::create_directory(directory_path)) { break; } } if (i == max_attempts) throw std::runtime_error("could not create directory"); // only allow owner permissions to private directory fs::permissions(directory_path, fs::perms::owner_all, ec); if (ec) throw std::runtime_error("could not set directory permissions"); // random filename in private directory std::stringstream filename; filename << std::hex << rand(prng); #ifdef _WIN32 auto filepath = directory_path / (filename.str() + ".dll"); #else auto filepath = directory_path / filename.str(); #endif // write data to temporary file auto outfile = std::ofstream(filepath, std::ios::out | std::ios::binary); if (outfile) { // only allow program to write file fs::permissions(filepath, fs::perms::owner_write, ec); if (ec) throw std::runtime_error("could not set permissions"); outfile.write(data, size); if (!outfile.good()) throw std::runtime_error("could not write data"); outfile.close(); // only allow program to read/execute file fs::permissions(filepath, fs::perms::owner_read | fs::perms::owner_exec, ec); if (ec) throw std::runtime_error("could not set permissions"); } else throw std::runtime_error("could not write data"); // check temporary file contents auto infile = std::ifstream(filepath, std::ios::in | std::ios::binary); if (infile) { bool mismatch = false; size_t cnt = 0; while (1) { char c; infile.get(c); if (infile.eof()) break; if (c != data[cnt++]) mismatch = true; } if (cnt != size || mismatch) throw std::runtime_error("file contents not written correctly"); } else throw std::runtime_error("could not validate file"); if (!filepath.is_absolute()) throw std::runtime_error("temporary filepath is not absolute"); return filepath; } static inline uint16_t extract16(unsigned char const *const ptr) { uint16_t ret = 0; ret |= static_cast<uint16_t>(ptr[0]) << 0; ret |= static_cast<uint16_t>(ptr[1]) << 8; return (ret); } static inline uint32_t extract32(unsigned char const *const ptr) { uint32_t ret = 0; ret |= static_cast<uint32_t>(ptr[0]) << 0; ret |= static_cast<uint32_t>(ptr[1]) << 8; ret |= static_cast<uint32_t>(ptr[2]) << 16; ret |= static_cast<uint32_t>(ptr[3]) << 24; return (ret); } static inline uint64_t extract64(unsigned char const *const ptr) { uint64_t ret = 0; ret |= static_cast<uint64_t>(ptr[0]) << 0; ret |= static_cast<uint64_t>(ptr[1]) << 8; ret |= static_cast<uint64_t>(ptr[2]) << 16; ret |= static_cast<uint64_t>(ptr[3]) << 24; ret |= static_cast<uint64_t>(ptr[4]) << 32; ret |= static_cast<uint64_t>(ptr[5]) << 40; ret |= static_cast<uint64_t>(ptr[6]) << 48; ret |= static_cast<uint64_t>(ptr[7]) << 56; return (ret); } static inline uint64_t get_lib_size(char const *const blob) { #ifdef _WIN32 /////////////////////////////////////////////////////////////////////// // Analyze DOS stub unsigned char const *const ublob = reinterpret_cast<unsigned char const *const>(blob); if (ublob[0] != 0x4d || ublob[1] != 0x5a) { throw std::runtime_error("Blob is not a Windows DLL."); } uint32_t pe_header_offset = extract32(ublob + 0x3c); /////////////////////////////////////////////////////////////////////// // Ananlyze PE-header unsigned char const *const pe_header = ublob + pe_header_offset; // signature uint32_t pe_signature = extract32(pe_header + 0); if (pe_signature != 0x00004550) { throw std::runtime_error("PE-header signature is not 0x00004550"); } // machine uint16_t machine = extract16(pe_header + 4); if (machine != 0x8664) { throw std::runtime_error("Only DLLs for x64 supported"); } // number of sections uint16_t number_of_sections = extract16(pe_header + 6); // sizeof optional header uint16_t sizeof_optional_header = extract16(pe_header + 20); // magic uint16_t magic = extract16(pe_header + 24); if (magic != 0x10b && magic != 0x20b) { throw std::runtime_error("MAGIC is not 0x010b or 0x020b"); } /////////////////////////////////////////////////////////////////////// // Analyze tail of optional header constexpr int coff_header_size = 24; unsigned char const *const tail_of_optional_header = pe_header + coff_header_size + sizeof_optional_header; if (extract64(tail_of_optional_header - 8) != 0) { throw std::runtime_error("Optional header not zero-padded"); } /////////////////////////////////////////////////////////////////////// // Analyze last section header constexpr int section_header_size = 40; unsigned char const *const last_section_header = tail_of_optional_header + section_header_size * (number_of_sections - 1); uint32_t sizeof_raw_data = extract32(last_section_header + 16); uint32_t pointer_to_raw_data = extract32(last_section_header + 20); return sizeof_raw_data + pointer_to_raw_data; #else if (blob[0] != 0x7F || blob[1] != 'E' || blob[2] != 'L' || blob[3] != 'F') throw std::runtime_error("Blob is not in ELF format"); if (blob[4] != 0x02) throw std::runtime_error("Only 64-bit headers are supported"); if (blob[5] != 0x01) throw std::runtime_error("Only little-endian headers are supported"); unsigned char const *const ublob = reinterpret_cast<unsigned char const *const>(blob); uint64_t e_shoff = extract64(ublob + 0x28); uint16_t e_shentsize = extract16(ublob + 0x3A); uint16_t e_shnum = extract16(ublob + 0x3C); return e_shoff + (e_shentsize * e_shnum); #endif } #ifdef _WIN32 class path_lib_record { public: void operator=(const path_lib_record &) = delete; ~path_lib_record() { for (auto entry : lib_to_path) { FreeLibrary(static_cast<HMODULE>(entry.first)); fs::permissions(entry.second, fs::perms::owner_all); fs::remove_all(entry.second.remove_filename()); } } static void record_lib_path(fs::path path, void *library) { lib_to_path[library] = path; } static void remove_lib(void *library) { auto path = lib_to_path[library]; std::error_code ec; FreeLibrary(static_cast<HMODULE>(library)); fs::permissions(path, fs::perms::owner_all); if (fs::remove_all(path.remove_filename(), ec) != 2 || ec) // one directory and one temporary file should have been deleted throw std::runtime_error("Directory delete failed"); lib_to_path.erase(library); } private: static inline std::unordered_map<void *, fs::path> lib_to_path; }; #endif } // namespace detail class kernel_library { public: kernel_library() : ptr{nullptr} {} kernel_library(void *ptr) : ptr{ptr} {} operator void *() const { return ptr; } private: void *ptr; #ifdef _WIN32 static inline detail::path_lib_record single_instance_to_trigger_destructor; #endif }; namespace detail { static inline kernel_library load_dl_from_data(char const *const data, size_t size) { fs::path filename = write_data_to_file(data, size); #ifdef _WIN32 void *so = LoadLibraryW(filename.wstring().c_str()); #else void *so = dlopen(filename.c_str(), RTLD_LAZY); #endif if (so == nullptr) throw std::runtime_error("Failed to load kernel library"); #ifdef _WIN32 detail::path_lib_record::record_lib_path(filename, so); #else std::error_code ec; // Windows DLL cannot be deleted while in use if (fs::remove_all(filename.remove_filename(), ec) != 2 || ec) // one directory and one temporary file should have been deleted throw std::runtime_error("Directory delete failed"); #endif return so; } } // namespace detail /// Load kernel library and return a handle to use the library. /// \param [in] name The name of the library. static inline kernel_library load_kernel_library(const std::string &name) { std::ifstream ifs; ifs.open(name, std::ios::in | std::ios::binary); std::stringstream buffer; buffer << ifs.rdbuf(); const std::string buffer_string = buffer.str(); return detail::load_dl_from_data(buffer_string.c_str(), buffer_string.size()); } /// Load kernel library whose image is alreay in memory and return a handle to /// use the library. /// \param [in] image A pointer to the image in memory. static inline kernel_library load_kernel_library_mem(char const *const image) { const size_t size = detail::get_lib_size(image); return detail::load_dl_from_data(image, size); } /// Unload kernel library. /// \param [in,out] library Handle to the library to be closed. static inline void unload_kernel_library(const kernel_library &library) { #ifdef _WIN32 detail::path_lib_record::remove_lib(library); #else dlclose(library); #endif } class kernel_function { public: kernel_function() : ptr{nullptr} {} kernel_function(dpct::kernel_functor ptr) : ptr{ptr} {} operator void *() const { return ((void *)ptr); } void operator()(sycl::queue &q, const sycl::nd_range<3> &range, unsigned int a, void **args, void **extra) { ptr(q, range, a, args, extra); } private: dpct::kernel_functor ptr; }; /// Find kernel function in a kernel library and return its address. /// \param [in] library Handle to the kernel library. /// \param [in] name Name of the kernel function. static inline dpct::kernel_function get_kernel_function(kernel_library &library, const std::string &name) { #ifdef _WIN32 dpct::kernel_functor fn = reinterpret_cast<dpct::kernel_functor>( GetProcAddress(static_cast<HMODULE>(static_cast<void *>(library)), (name + std::string("_wrapper")).c_str())); #else dpct::kernel_functor fn = reinterpret_cast<dpct::kernel_functor>( dlsym(library, (name + std::string("_wrapper")).c_str())); #endif if (fn == nullptr) throw std::runtime_error("Failed to get function"); return fn; } /// Invoke a kernel function. /// \param [in] function kernel function. /// \param [in] queue SYCL queue used to execute kernel /// \param [in] groupRange SYCL group range /// \param [in] localRange SYCL local range /// \param [in] localMemSize The size of local memory required by the kernel /// function. /// \param [in] kernelParams Array of pointers to kernel arguments. /// \param [in] extra Extra arguments. static inline void invoke_kernel_function(dpct::kernel_function &function, sycl::queue &queue, sycl::range<3> groupRange, sycl::range<3> localRange, unsigned int localMemSize, void **kernelParams, void **extra) { function(queue, sycl::nd_range<3>(groupRange * localRange, localRange), localMemSize, kernelParams, extra); } /// Find image wrapper in a kernel library and return its address. /// \param [in] library Handle to the kernel library. /// \param [in] name Name of the target image wrapper. static inline dpct::image_wrapper_base_p get_image_wrapper(dpct::kernel_library &library, const std::string &name) { #ifdef _WIN32 dpct::image_wrapper_base_p fn = reinterpret_cast<dpct::image_wrapper_base_p>(GetProcAddress( static_cast<HMODULE>(static_cast<void *>(library)), name.c_str())); #else dpct::image_wrapper_base_p fn = reinterpret_cast<dpct::image_wrapper_base_p>( dlsym(library, name.c_str())); #endif if (fn == nullptr) throw std::runtime_error("Failed to get image"); return fn; } } // namespace dpct #endif // __DPCT_KERNEL_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/dpct.hpp
//==---- dpct.hpp ---------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_HPP__ #define __DPCT_HPP__ #include <sycl/sycl.hpp> #include <iostream> #include <limits.h> #include <math.h> template <class... Args> class dpct_kernel_name; template <int Arg> class dpct_kernel_scalar; #include "atomic.hpp" #include "device.hpp" #include "image.hpp" #include "kernel.hpp" #include "math.hpp" #include "memory.hpp" #include "util.hpp" #if defined(_MSC_VER) #define __dpct_align__(n) __declspec(align(n)) #define __dpct_inline__ __forceinline #else #define __dpct_align__(n) __attribute__((aligned(n))) #define __dpct_inline__ __inline__ __attribute__((always_inline)) #endif #if defined(_MSC_VER) #define __dpct_noinline__ __declspec(noinline) #else #define __dpct_noinline__ __attribute__((noinline)) #endif #define DPCT_COMPATIBILITY_TEMP (600) namespace dpct{ enum error_code { success = 0, default_error = 999 }; } #define DPCT_CHECK_ERROR(expr) \ [&]() { \ try { \ expr; \ return dpct::success; \ } catch (std::exception const &e) { \ std::cerr << e.what() << std::endl; \ return dpct::default_error; \ } \ }() #define DPCT_PI_F (3.14159274101257f) #define DPCT_PI (3.141592653589793115998) #endif // __DPCT_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/dnnl_utils.hpp
//==---- dnnl_utils.hpp ---------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_DNNL_UTILS_HPP__ #define __DPCT_DNNL_UTILS_HPP__ #include <oneapi/dpl/algorithm> #include <oneapi/dpl/execution> #include <oneapi/dpl/numeric> #include <oneapi/mkl.hpp> #include <oneapi/mkl/rng/device.hpp> #include <sycl/sycl.hpp> #include <oneapi/dnnl/dnnl.hpp> #include <oneapi/dnnl/dnnl_sycl.hpp> #include <unordered_map> #include <algorithm> #include <list> #include "memory.hpp" #include "device.hpp" #include "lib_common_utils.hpp" namespace dpct { namespace dnnl { /// Get concatenated library version as an integer. static inline size_t get_version() { const ::dnnl::version_t *ver = ::dnnl::version(); return ver->major * 1000 + ver->minor * 100 + ver->patch; } class engine_ext; typedef oneapi::mkl::rng::philox4x32x10 rng_engine_t; /// An enum class representing memory layout. Used by /// memory_desc_ext to create a memory with pre-defined layout. enum class memory_format_tag { nchw, nhwc, nchw_blocked }; /// An enum class representing RNN data memory layout. Used by /// memory_desc_ext to create a memory with pre-defined layout. enum class rnn_memory_format_tag { tnc, ntc }; /// A class holding the description of an N-dimensions memory. class memory_desc_ext { ::dnnl::memory::desc _desc; public: /// Convert dpct::library_data_t to dnnl::memory::data_type. static ::dnnl::memory::data_type to_dnnl_data_type(dpct::library_data_t dt); /// Convert dnnl::memory::data_type to dpct::library_data_t. static dpct::library_data_t to_dpct_library_data_t(::dnnl::memory::data_type dt, unsigned block_size); /// Convert dpct::dnnl::memory_format_tag to dnnl::memory::format_tag. static ::dnnl::memory::format_tag to_dnnl_format_tag(dpct::library_data_t dt, memory_format_tag tag); memory_desc_ext() = default; memory_desc_ext(::dnnl::memory::desc &desc) : _desc(desc) {} memory_desc_ext(::dnnl::memory::desc &&desc) : _desc(std::move(desc)) {} /// Setting a 4D memory with given parameters. /// \param [in] tag Format tag. /// \param [in] dt Data type. /// \param [in] n Number of images. /// \param [in] c Number of channels. /// \param [in] h Height of images. /// \param [in] w Width of images. void set(memory_format_tag tag, dpct::library_data_t dt, int n, int c, int h, int w); /// Setting a 3D RNN data memory with given parameters. /// \param [in] tag RNN data format tag. /// \param [in] dt Data type. /// \param [in] t Number of sequence length. /// \param [in] n Number of batch. /// \param [in] c Height of input channel. void set(rnn_memory_format_tag tag, dpct::library_data_t dt, int t, int n, int c); /// Setting a 4D memory with given parameters. /// \param [in] dt Data type. /// \param [in] n Number of images. /// \param [in] c Number of channels. /// \param [in] h Height of images. /// \param [in] w Width of images. /// \param [in] n_stride Stride between two continuous images. /// \param [in] c_stride Stride between two continuous channels. /// \param [in] h_stride Stride between two continuous rows. /// \param [in] w_stride Stride between two continuous columns. void set(dpct::library_data_t dt, int n, int c, int h, int w, int n_stride, int c_stride, int h_stride, int w_stride); /// Setting a ND memory with given parameters. /// \param [in] dt Data type. /// \param [in] ndims Dimension of the memory. /// \param [in] dims Array of dimension ndims that contain the size of each /// memory dimension. \param [in] strides Array of dimension ndims that /// contain the stride of each memory dimension. void set(dpct::library_data_t dt, int ndims, const int dims[], const int strides[]); /// Setting a ND memory with given parameters. /// \param [in] tag Format tag. /// \param [in] dt Data type. /// \param [in] ndims Dimension of the memory. /// \param [in] dims Array of dimension ndims that contain the size of each /// memory dimension. void set(memory_format_tag tag, dpct::library_data_t dt, int ndims, const int dims[]); /// Getting a ::dnnl::memory::desc from a memory_desc_ext. /// \returns The ::dnnl::memory::desc. const ::dnnl::memory::desc &get_desc() const { return _desc; } /// Setting holding desc with given dnnl memory descriptor. void set_desc(::dnnl::memory::desc desc) { _desc = desc; } /// Getting a size of a memory_desc_ext in bytes. /// \returns The size. size_t get_size() const { return _desc.get_size(); } /// Getting parameters from a 4D memory. /// \param [out] dt Data type. /// \param [out] n Number of images. /// \param [out] c Number of channels. /// \param [out] h Height of images. /// \param [out] w Width of images. /// \param [out] n_stride Stride between two continuous images. /// \param [out] c_stride Stride between two continuous channels. /// \param [out] h_stride Stride between two continuous rows. /// \param [out] w_stride Stride between two continuous columns. void get(dpct::library_data_t *dt, int *n, int *c, int *h, int *w, int *n_stride, int *c_stride, int *h_stride, int *w_stride) const; /// Getting parameters from a 4D memory. /// \param [out] dt Data type. /// \param [out] tag Format tag. /// \param [out] n Number of images. /// \param [out] c Number of channels. /// \param [out] h Height of images. /// \param [out] w Width of images. void get(dpct::library_data_t *dt, memory_format_tag *tag, int *n, int *c, int *h, int *w) const; /// Getting parameters from a 3D RNN data memory. /// \param [out] dt Data type. /// \param [out] tag RNN data format tag. /// \param [out] t Number of sequence length. /// \param [out] n Number of batch. /// \param [out] c Height of input channel. void get(dpct::library_data_t *dt, rnn_memory_format_tag *tag, int *t, int *n, int *c) const; /// Getting parameters from a ND memory. /// \param [in] requested_ndims Requested number of dimensions to get from a /// given memory descriptor. /// \param [out] dt Data type. /// \param [out] ndims Dimension of the memory. /// \param [out] dims Array of dimension requested_ndims that contain the /// size of each memory dimension. /// \param [out] strides Array of dimension requested_ndims that contain the /// stride of each memory dimension. void get(int requested_ndims, dpct::library_data_t *dt, int *ndims, int dims[], int strides[]) const; /// Getting parameters from a ND memory. /// \param [in] requested_ndims Requested number of dimensions to get from a /// given memory descriptor. /// \param [out] dt Data type. /// \param [out] tag Format tag. /// \param [out] ndims Dimension of the memory. /// \param [out] dims Array of dimension requested_ndims that contain the /// size of each memory dimension. void get(int requested_ndims, dpct::library_data_t *dt, memory_format_tag *tag, int *ndims, int dims[]) const; /// Getting dims from a ND memory. /// \return The dims. std::vector<int64_t> get_dims() const { return _desc.get_dims(); } /// Getting strides from a ND memory. /// \return The strides. std::vector<int64_t> get_strides() const { return _desc.get_strides(); } /// Getting element num from a ND memory. /// \return The element number. size_t get_element_num() const { auto dims = _desc.get_dims(); if (dims.empty()) { return 0; } size_t result = 1; for (auto &dim : dims) { result *= dim; } return result; } operator bool() const { return bool(_desc); } memory_desc_ext &operator=(std::nullptr_t) { _desc.reset(nullptr); return *this; } }; /// A class holding description for an activation operation. class activation_desc { ::dnnl::algorithm _alg; float _alpha; float _beta; public: /// Setting an activation descriptor with given parameters. /// \param [in] alg Activation algorithm. /// \param [in] alpha Value of alpha parameter. void set(::dnnl::algorithm alg, float alpha) { _alg = alg; if(alg == ::dnnl::algorithm::eltwise_clip) { _alpha = 0; _beta = alpha; } else { _alpha = alpha; } } /// Getting parameters form an activation descriptor. /// \param [out] alg Activation algorithm. /// \param [out] alpha Value of alpha parameter. void get(::dnnl::algorithm *alg, float *alpha) const { *alg = _alg; if(_alg == ::dnnl::algorithm::eltwise_clip) { *alpha = _beta; } else { *alpha = _alpha; } } /// Setting the alpha parameter of an activation descriptor. /// \param [in] alpha Value of alpha parameter. void set_alpha(float alpha) { _alpha = alpha; } /// Setting the beta parameter of an activation descriptor. /// \param [in] beta Value of beta parameter. void set_beta(float beta) { _beta = beta; } /// Setting the algorithm parameter of an activation descriptor. /// \param [in] alg Activation algorithm. void set_algorithm(::dnnl::algorithm alg) { _alg = alg; } /// Getting the alpha parameter from an activation descriptor. /// \param [out] alpha Value of alpha parameter. float get_alpha() const { return _alpha; } /// Getting the beta parameter from an activation descriptor. /// \param [out] beta Value of beta parameter. float get_beta() const { return _beta; } /// Getting the algorithm parameter from an activation descriptor. /// \param [out] alg Activation algorithm. ::dnnl::algorithm get_algorithm() const { return _alg; } }; /// A class holding description for a local response normalization operation. class lrn_desc { unsigned int _local_size; float _alpha; float _beta; float _k; public: /// Setting a local response normalization descriptor with given parameters. /// \param [in] local_size Value of local_size parameter. /// \param [in] alpha Value of alpha parameter. /// \param [in] beta Value of beta parameter. /// \param [in] k Value of k parameter. void set(unsigned int local_size, float alpha, float beta, float k) { _local_size = local_size; _alpha = alpha; _beta = beta; _k = k; } /// Getting parameters form a local response normalization descriptor. /// \param [out] local_size Value of local_size parameter. /// \param [out] alpha Value of alpha parameter. /// \param [out] beta Value of beta parameter. /// \param [out] k Value of k parameter. void get(unsigned int *local_size, float *alpha, float *beta, float *k) const { *local_size = _local_size; *alpha = _alpha; *beta = _beta; *k = _k; } /// Setting the local size parameter of a local response normalization /// descriptor. /// \param [in] local_size Value of local_size parameter. void set_local_size(unsigned int local_size) { _local_size = local_size; } /// Setting the alpha parameter of a local response normalization descriptor. /// \param [in] alpha Value of alpha parameter. void set_alpha(float alpha) { _alpha = alpha; } /// Setting the beta parameter of a local response normalization descriptor. /// \param [in] beta Value of beta parameter. void set_beta(float beta) { _beta = beta; } /// Setting the k parameter of a local response normalization descriptor. /// \param [in] k Value of k parameter. void set_k(float k) { _k = k; } /// Getting the local size parameter from a local response normalization /// descriptor. /// \param [out] local_size Value of local_size parameter. unsigned int get_local_size() const { return _local_size; } /// Getting the alpha parameter from a local response normalization /// descriptor. /// \param [out] alpha Value of alpha parameter. float get_alpha() const { return _alpha; } /// Getting the beta parameter from a local response normalization descriptor. /// \param [out] beta Value of beta parameter. float get_beta() const { return _beta; } /// Getting the k parameter from a local response normalization descriptor. /// \param [out] k Value of k parameter. float get_k() const { return _k; } }; /// An enum class representing softmax algorithm. enum class softmax_algorithm { normal, log }; /// An enum class representing softmax mode. enum class softmax_mode { instance, channel }; /// A class holding description for a pooling operation. class pooling_desc { ::dnnl::algorithm _alg; std::vector<int64_t> _stride; std::vector<int64_t> _kernel; std::vector<int64_t> _padding; public: /// Setting a 2D pooling descriptor with given parameters. /// \param [in] alg Pooling algorithm. /// \param [in] kernel_h Value of height of kernel. /// \param [in] kernel_w Value of width of kernel. /// \param [in] padding_h Value of height of padding. /// \param [in] padding_w Value of width of padding. /// \param [in] stride_h Value of height of stride. /// \param [in] stride_w Value of width of stride. void set(::dnnl::algorithm alg, int kernel_h, int kernel_w, int padding_h, int padding_w, int stride_h, int stride_w) { _alg = alg; _stride = {stride_h, stride_w}; _kernel = {kernel_h, kernel_w}; _padding = {padding_h, padding_w}; } /// Setting a ND pooling descriptor with given parameters. /// \param [in] alg Pooling algorithm. /// \param [in] ndims Dimension of the pooling operation. /// \param [in] kernel Array of dimension ndims containing the kernel size of /// each dimension. /// \param [in] padding Array of dimension ndims containing the padding size of /// each dimension. /// \param [in] stride Array of dimension ndims containing the stride size of /// each dimension. void set(::dnnl::algorithm alg, int ndims, int kernel[], int padding[], int stride[]) { _alg = alg; _stride = std::vector<int64_t>(stride, stride + ndims); _kernel = std::vector<int64_t>(kernel, kernel + ndims); _padding = std::vector<int64_t>(padding, padding + ndims); } /// Getting parameters from a 2D pooling descriptor. /// \param [out] alg Pooling algorithm. /// \param [out] kernel_h Value of height of kernel. /// \param [out] kernel_w Value of width of kernel. /// \param [out] padding_h Value of height of padding. /// \param [out] padding_w Value of width of padding. /// \param [out] stride_h Value of height of stride. /// \param [out] stride_w Value of width of stride. void get(::dnnl::algorithm *alg, int *kernel_h, int *kernel_w, int *padding_h, int *padding_w, int *stride_h, int *stride_w) const { *alg = _alg; *kernel_h = _kernel[0]; *kernel_w = _kernel[1]; *padding_h = _padding[0]; *padding_w = _padding[1]; *stride_h = _stride[0]; *stride_w = _stride[1]; } /// Getting parameters from a ND pooling descriptor. /// \param [in] requested_ndims Requested number of dimensions to get from a /// given pooling descriptor. /// \param [out] alg Pooling algorithm. /// \param [out] ndims Dimension of the pooling operation. /// \param [out] kernel Array of dimension ndims containing the kernel size of /// each dimension. /// \param [out] padding Array of dimension ndims containing the padding size /// of each dimension. /// \param [out] stride Array of dimension ndims containing the stride size of /// each dimension. void get(int requested_ndims, ::dnnl::algorithm *alg, int *ndims, int kernel[], int padding[], int stride[]) const { *alg = _alg; *ndims = _stride.size(); for (int i = 0; i < requested_ndims; i++) { kernel[i] = _kernel[i]; padding[i] = _padding[i]; stride[i] = _stride[i]; } } /// Setting the algorithm parameter of a pooling descriptor. /// \param [in] alg Pooling algorithm. void set_algorithm(::dnnl::algorithm alg) { _alg = alg; } /// Setting the stride parameter of a pooling descriptor. /// \param [in] stride Array of dimension ndims containing the stride size of /// each dimension. void set_stride(const std::vector<int64_t> &stride) { _stride = stride; } /// Setting the kernel parameter of a pooling descriptor. /// \param [in] kernel Array of dimension ndims containing the kernel size of /// each dimension. void set_kernel(const std::vector<int64_t> &kernel) { _kernel = kernel; } /// Setting the padding parameter of a pooling descriptor. /// \param [in] padding Array of dimension ndims containing the padding size /// of each dimension. void set_padding(const std::vector<int64_t> &padding) { _padding = padding; } /// Getting the algorithm parameter from a pooling descriptor. /// \param [out] alg Pooling algorithm. ::dnnl::algorithm get_algorithm() const { return _alg; } /// Getting the stride parameter from a pooling descriptor. /// \returns Array of dimension ndims containing the stride size of each /// dimension. const std::vector<int64_t> &get_stride() const { return _stride; } /// Getting the kernel parameter from a pooling descriptor. /// \returns Array of dimension ndims containing the kernel size of each /// dimension. const std::vector<int64_t> &get_kernel() const { return _kernel; } /// Getting the padding parameter from a pooling descriptor. /// \returns Array of dimension ndims containing the padding size of each /// dimension. const std::vector<int64_t> &get_padding() const { return _padding; } /// Getting the output dimensions of a memory after 2D pooling has been /// applied. /// \param [in] desc Input memory descriptor. /// \param [out] out_n Number of images. /// \param [out] out_c Number of channels. /// \param [out] out_h Height of images. /// \param [out] out_w Width of images. void get_forward_output_dim(const memory_desc_ext &desc, int *out_n, int *out_c, int *out_h, int *out_w) const { auto dims = desc.get_dims(); *out_n = dims[0]; *out_c = dims[1]; *out_h = 1 + (dims[2] + 2 * _padding[0] - _kernel[0]) / _stride[0]; *out_w = 1 + (dims[3] + 2 * _padding[1] - _kernel[1]) / _stride[1]; } /// Getting the output dimensions of a memory after ND pooling has been /// applied. /// \param [in] desc Input memory descriptor. /// \param [out] ndims Dimension of the memory. /// \param [out] out_dims Array of dimension requested_ndims that contain /// the size of each memory dimension. void get_forward_output_dim(const memory_desc_ext &desc, int ndims, int out_dims[]) const { assert(ndims >= 4 && "ndims is at least 4."); auto dims = desc.get_dims(); out_dims[0] = dims[0]; out_dims[1] = dims[1]; for (int i = 2; i < ndims; i++) { out_dims[i] = 1 + (dims[i] + 2 * _padding[i - 2] - _kernel[i - 2]) / _stride[i - 2]; } } }; /// An enum class representing reduction operations. enum class reduction_op { max, min, sum, mul, mean, amax, mul_no_zeros, norm1, norm2 }; /// An enum class representing batch normalization mode. enum class batch_normalization_mode { per_activation, spatial }; /// An enum class representing batch normalization operations. enum class batch_normalization_ops { none, activation, add_activation }; /// An enum class representing binary operations. enum class binary_op { add, sub, mul, div, min, max, sqrt, neg }; /// An struct representing convolution algorithm infomation. struct convolution_algorithm_info { ::dnnl::algorithm algo = ::dnnl::algorithm::convolution_auto; int status = 0; }; /// A class holding description for a convolution operation. class convolution_desc { std::vector<int64_t> _strides; std::vector<int64_t> _dilates; std::vector<int64_t> _paddings; int _group_count = 1; ::dnnl::fpmath_mode _math_mode = ::dnnl::fpmath_mode::strict; public: /// Setting a group count to be used in the convolution. /// \param [in] group_count Value of group count. void set_group_count(int group_count) { _group_count = group_count; } /// Getting a group count specified in the given convolution descriptor. /// \returns Value of group count. int get_group_count() { return _group_count; } /// Setting floating point math mode to be used in the convolution. /// \param [in] math_mode Value of math_mode. void set_math_mode(::dnnl::fpmath_mode math_mode) { _math_mode = math_mode; } /// Getting floating point math mode specified in the given convolution descriptor. /// \returns Value of math mode. ::dnnl::fpmath_mode get_math_mode() { return _math_mode; } /// Setting a 2D convolution descriptor with given parameters. /// \param [in] padding_h Value of height of padding. /// \param [in] padding_w Value of width of padding. /// \param [in] stride_h Value of height of stride. /// \param [in] stride_w Value of width of stride. /// \param [in] dilate_h Value of height of dilate. /// \param [in] dilate_w Value of width of dilate. void set(int padding_h, int padding_w, int stride_h, int stride_w, int dilate_h, int dilate_w) { _strides = {stride_h, stride_w}; _dilates = {dilate_h - 1, dilate_w - 1}; _paddings = {padding_h, padding_w}; } /// Setting a ND convolution descriptor with given parameters. /// \param [in] ndims Dimension of the convolution operation. /// \param [in] paddings Array of dimension ndims containing the padding size of /// each dimension. /// \param [in] strides Array of dimension ndims containing the stride size of /// each dimension. /// \param [in] dilates Array of dimension ndims containing the kernel size of /// each dimension. void set(int ndims, int paddings[], int strides[], int dilates[]) { _strides = std::vector<int64_t>(strides, strides + ndims); _paddings = std::vector<int64_t>(paddings, paddings + ndims); _dilates = std::vector<int64_t>(dilates, dilates + ndims); for (auto &dilate : _dilates) { dilate--; } } /// Getting parameters from a 2D convolution descriptor. /// \param [out] padding_h Value of height of padding. /// \param [out] padding_w Value of width of padding. /// \param [out] stride_h Value of height of stride. /// \param [out] stride_w Value of width of stride. /// \param [out] dilate_h Value of height of dilate. /// \param [out] dilate_w Value of width of dilate. void get(int *padding_h, int *padding_w, int *stride_h, int *stride_w, int *dilate_h, int *dilate_w) const { *dilate_h = _dilates[0]; *dilate_w = _dilates[1]; *padding_h = _paddings[0]; *padding_w = _paddings[1]; *stride_h = _strides[0]; *stride_w = _strides[1]; } /// Getting parameters from a ND convolution descriptor. /// \param [in] requested_ndims Requested number of dimensions to get from a /// given convolution descriptor. /// \param [out] ndims Dimension of the pooling operation. /// \param [out] paddings Array of dimension ndims containing the padding size /// of each dimension. /// \param [out] strides Array of dimension ndims containing the stride size of /// each dimension. /// \param [out] dilates Array of dimension ndims containing the dilate size of /// each dimension. void get(int requested_ndims, int *ndims, int paddings[], int strides[], int dilates[]) const { *ndims = _strides.size(); for (int i = 0; i < requested_ndims; i++) { dilates[i] = _dilates[i]; paddings[i] = _paddings[i]; strides[i] = _strides[i]; } } /// Getting the stride parameter from a convolution descriptor. /// \returns Array of dimension ndims containing the stride size of each /// dimension. const std::vector<int64_t> &get_stride() const { return _strides; } /// Getting the kernel parameter from a convolution descriptor. /// \returns Array of dimension ndims containing the dilate size of each /// dimension. const std::vector<int64_t> &get_dilate() const { return _dilates; } /// Getting the padding parameter from a convolution descriptor. /// \returns Array of dimension ndims containing the padding size of each /// dimension. const std::vector<int64_t> &get_padding() const { return _paddings; } /// Getting the output dimensions of a memory after 2D convolution has been /// applied. /// \param [in] desc Input memory descriptor. /// \param [in] weight_desc Input weight memory descriptor. /// \param [out] out_n Number of images. /// \param [out] out_c Number of channels. /// \param [out] out_h Height of images. /// \param [out] out_w Width of images. void get_forward_output_dim(const memory_desc_ext &desc, const memory_desc_ext &weight_desc, int *out_n, int *out_c, int *out_h, int *out_w) const { auto dims = desc.get_dims(); auto weight_dims = weight_desc.get_dims(); *out_n = dims[0]; *out_c = weight_dims[0]; *out_h = 1 + (dims[2] + 2 * _paddings[0] - (1 + (_dilates[0] * (weight_dims[2] - 1)))) / _strides[0]; *out_w = 1 + (dims[3] + 2 * _paddings[1] - (1 + (_dilates[1] * (weight_dims[3] - 1)))) / _strides[1]; } /// Getting the output dimensions of a memory after ND convolution has been /// applied. /// \param [in] desc Input memory descriptor. /// \param [in] weight_desc Input weight memory descriptor. /// \param [out] ndims Dimension of the memory. /// \param [out] out_dims Array of dimension requested_ndims that contain /// the size of each memory dimension. void get_forward_output_dim(const memory_desc_ext &desc, const memory_desc_ext &weight_desc, int ndims, int out_dims[]) const { assert(ndims >= 4 && "ndims is at least 4."); auto dims = desc.get_dims(); auto weight_dims = weight_desc.get_dims(); out_dims[0] = dims[0]; out_dims[1] = weight_dims[1]; for (int i = 2; i < ndims; i++) { out_dims[i] = 1 + (dims[i] + 2 * _paddings[i - 2] - (1 + (_dilates[i - 2] * (weight_dims[i] - 1)))) / _strides[i - 2]; } } convolution_desc &operator=(std::nullptr_t) { return *this = convolution_desc(); } operator bool() const { return !(_strides.size() == 0 && _dilates.size() == 0 && _paddings.size() == 0); } }; /// An enum class representing rnn mode. enum class rnn_mode { vanilla_relu, vanilla_tanh, lstm, gru }; /// An enum class representing rnn bias mode. enum class rnn_bias_mode { none, single }; /// An enum class representing rnn direction. enum class rnn_direction {unidirectional, bidirectional}; /// A class holding description for a RNN operation. class rnn_desc { rnn_mode _mode; rnn_bias_mode _bias_mode; rnn_direction _direction; dpct::library_data_t _dt; int _input_size; int _hidden_size; int _projection_size; int _layer_size; public: void set(rnn_mode mode, rnn_bias_mode bias_mode, rnn_direction direction, dpct::library_data_t dt, int input_size, int hidden_size, int projection_size, int layer_size) { _mode = mode; _bias_mode = bias_mode; _direction = direction; _input_size = input_size; _hidden_size = hidden_size; _projection_size = projection_size; _layer_size = layer_size; _dt = dt; } void get(rnn_mode *mode, rnn_bias_mode *bias_mode, rnn_direction *direction, dpct::library_data_t *dt, int *input_size, int *hidden_size, int *projection_size, int *layer_size) const { *mode = _mode; *bias_mode = _bias_mode; *direction = _direction; *input_size = _input_size; *hidden_size = _hidden_size; *projection_size = _projection_size; *layer_size = _layer_size; *dt = _dt; } }; /// A class holding description for a Dropout operation. class dropout_desc { struct dropout_desc_imp { float _p = 0.5f; unsigned long long _seed = 1; void *_state = nullptr; std::vector<std::uint8_t> _host_state; rng_engine_t _rng_engine; dropout_desc_imp() : _rng_engine(dpct::get_default_queue(), 1) {} }; std::shared_ptr<dropout_desc_imp> _imp; void generate(sycl::queue *q, std::int64_t required_state_size, std::int64_t num, void *buffer) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) " "Interfaces Project does not support this API."); #else sycl::event e_gen = oneapi::mkl::rng::generate( oneapi::mkl::rng::bernoulli<std::int32_t>(1.f - _imp->_p), _imp->_rng_engine, num, (std::int32_t *)buffer); sycl::event e_save = q->submit([&](sycl::handler &cgh) { cgh.depends_on(e_gen); cgh.host_task([=] { oneapi::mkl::rng::save_state(_imp->_rng_engine, _imp->_host_state.data()); }); }); q->memcpy(_imp->_state, _imp->_host_state.data(), required_state_size, e_save); #endif } public: operator bool() const { return bool(_imp); } dropout_desc &operator=(std::nullptr_t) { _imp.reset(); return *this; } /// Initializing a dropout descriptor. void init(){ _imp = std::make_shared<dropout_desc_imp>(); } /// Setting a dropout descriptor with given parameters. /// \param [in] engine Engine of the dropout operation. /// \param [in] p Probability of value set to zero. /// \param [in] state Memory that store random generator state. /// \param [in] state_size Required size to store random generator state. /// \param [in] seed Seed to initialize conditions of the generator state. void set(engine_ext &engine, float p, void *state, size_t state_size, unsigned long long seed); /// Getting parameters from a dropout descriptor. /// \param [in] engine Engine of the dropout operation. /// \param [in] p Probability of value set to zero. /// \param [in] state Memory that store random generator state. /// \param [in] seed Seed to initialize conditions of the generator state. void get(float *p, void **states, unsigned long long *seed) const noexcept { *seed = _imp->_seed; *states = _imp->_state; *p = _imp->_p; } /// Getting the probability of value set to zero. /// \returns Probability. float get_probability() const noexcept { return _imp->_p; } /// Restoreing a dropout descriptor from stored state. /// \param [in] engine Engine of the dropout operation. /// \param [in] p Probability of value set to zero. /// \param [in] state Memory that store random generator state. /// \param [in] state_size Required size to store random generator state. /// \param [in] seed Seed to initialize conditions of the generator state. void restore(engine_ext &engine, float p, void *state, size_t state_size, unsigned long long seed); friend class engine_ext; }; namespace detail { typedef std::string primitive_cache_key_type; typedef std::list<primitive_cache_key_type> usage_list_type; typedef struct { ::dnnl::primitive *primitive; usage_list_type::iterator usage_it; std::function<void(::dnnl::primitive *)> destructor; sycl::event e; } primitive_cache_value_type; typedef std::unordered_map<primitive_cache_key_type, primitive_cache_value_type> cache_map_type; // The primitive cache uses LRU replacement policy, and the default cache // capacity is 1024. class primitive_cache { int _capacity = 1024; usage_list_type usage; cache_map_type cache_map; void touch(cache_map_type::iterator it, sycl::event e = {}, bool update_event = false) { if (it->second.usage_it != usage.begin()) { const primitive_cache_key_type &key = it->first; usage.erase(it->second.usage_it); usage.push_front(key); it->second.usage_it = usage.begin(); } if (update_event) { it->second.e = e; } } void async_destruct_primitive(const primitive_cache_value_type &value) { dpct::get_current_device().default_queue().submit([&](sycl::handler &cgh) { cgh.depends_on(value.e); cgh.host_task([=] { value.destructor(value.primitive); }); }); } public: ::dnnl::primitive *get(const primitive_cache_key_type &key) { auto it = cache_map.find(key); if (it == cache_map.end()) { return nullptr; } touch(it); return it->second.primitive; } void put(const primitive_cache_key_type &key, ::dnnl::primitive *value, std::function<void(::dnnl::primitive *)> destructor, sycl::event e) { auto it = cache_map.find(key); if (it != cache_map.end()) { touch(it, e, true); } else { if (cache_map.size() == _capacity) { auto last_primitive = cache_map.find(usage.back()); async_destruct_primitive(last_primitive->second); cache_map.erase(usage.back()); usage.pop_back(); } usage.push_front(key); cache_map[key] = {value, usage.begin(), destructor, e}; } } ~primitive_cache() { for (auto &v : cache_map) { async_destruct_primitive(v.second); } } }; } // namespace detail /// A class holding the oneDNN engine. class engine_ext { struct output_argument_info { float _alpha; float _beta; int _name; memory_desc_ext _desc; void *_data; output_argument_info(float alpha, float beta, int name, memory_desc_ext desc, void *data) : _alpha(alpha), _beta(beta), _name(name), _desc(desc), _data(data) {} output_argument_info(float alpha, float beta, memory_desc_ext desc, void *data) : _alpha(alpha), _beta(beta), _name(0), _desc(desc), _data(data) {} }; ::dnnl::engine _eng; ::dnnl::stream _s; sycl::queue *_q = nullptr; std::map<void *, ::dnnl::memory> workspace_map; std::int64_t _random_engine_state_size = -1; detail::primitive_cache _primitive_cache; ::dnnl::memory &get_workspace(void *key) { return workspace_map[key]; } void insert_workspace(void *key, ::dnnl::memory workspace) { workspace_map[key] = workspace; } const ::dnnl::stream &get_stream() const { return _s; } const ::dnnl::engine &get_engine() const { return _eng; } void *allocate(const memory_desc_ext &desc, int count = 1) const; ::dnnl::memory::desc compress_spatial_dimensions_to_channel(const ::dnnl::memory::desc &desc); ::dnnl::memory::desc get_bn_scale_bias_mean_var_desc(const ::dnnl::memory::desc &desc, batch_normalization_mode mode); sycl::event batch_normalization_backward_internal( batch_normalization_mode mode, float epsilon, float alpha_data, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param, const memory_desc_ext &diff_scale_bias_desc, void *scale, void *bias, float beta_param, void *diff_scale, void *diff_bias, const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var); sycl::event batch_normalization_forward_internal( bool is_infer, batch_normalization_mode mode, float epsilon, float factor, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &scale_bias_desc, void *scale, void *bias, const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var, void *running_mean, void *running_var); ::dnnl::memory::desc transfer_memory_desc_to_channel_major_format(const ::dnnl::memory::desc &desc); ::dnnl::memory::desc bn_reorder_memory_to_channel_major_format(bool is_input, ::dnnl::memory::desc &desc, void *src, void **cache, std::vector<void *> &caches); ::dnnl::memory::desc transfer_memory_desc_to_format_tag_any(const ::dnnl::memory::desc &desc){ return ::dnnl::memory::desc(desc.get_dims(), desc.get_data_type(), ::dnnl::memory::format_tag::any); } void allocate_and_reorder_memory_to_optimal(::dnnl::memory::desc &from_desc, void *&from, ::dnnl::memory::desc &to_desc, void *&to, std::vector<void *> &caches) { if (from_desc != to_desc) { to = allocate(to_desc); caches.push_back(to); async_reorder(1.f, from_desc, from, 0.f, to_desc, to); } } template <typename primitive_type, typename... args_type> std::pair<detail::primitive_cache_key_type, primitive_type *> create_primitive(args_type &&...args); template <typename primitive_type> std::pair<detail::primitive_cache_key_type, primitive_type *> create_primitive_with_pd(const typename primitive_type::primitive_desc &pd); template <typename primitive_type, typename... args_type> typename primitive_type::primitive_desc create_primitive_desc(args_type &&...args); template <typename primitive_desc_type> std::string generate_cache_key(const primitive_desc_type &pd); void serialize_dims(std::stringstream &ss, const std::vector<int64_t> &dims) { ss.write((char *)dims.data(), dims.size() * sizeof(int64_t)); }; void serialize_mem_desc(std::stringstream &ss, const ::dnnl::memory::desc &desc) { if (desc.is_zero()) { return; } auto format_kind = desc.get_format_kind(); ss << desc.get_ndims() << (std::uint8_t)desc.get_data_type() << (std::uint8_t)format_kind; serialize_dims(ss, desc.get_dims()); serialize_dims(ss, desc.get_strides()); if (format_kind == ::dnnl::memory::format_kind::blocked) { ss << desc.get_inner_nblks(); serialize_dims(ss, desc.get_inner_blks()); serialize_dims(ss, desc.get_inner_idxs()); } }; sycl::event execute_rnn_forward_primitive( rnn_mode mode, ::dnnl::prop_kind kind, ::dnnl::rnn_direction direction, rnn_bias_mode bias_mode, ::dnnl::memory::data_type dt, ::dnnl::memory::format_tag tag, int seq_length, int batch_size, int src_c, int dst_c, int layer_size, int direction_num, int hidden_size, int gate_num, int projection_size, std::vector<void *> &data, std::vector<int> &offset, int iter_num, size_t *weight_size = nullptr, size_t *workspace_size = nullptr, size_t *scratchpad_size = nullptr); sycl::event rnn_forward_internal( const rnn_desc &desc, ::dnnl::prop_kind kind, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter, const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c, size_t weight_size, void *weight, size_t workspace_size, void *workspace, size_t scratchpad_size, void *scratchpad, bool is_get_execution_args, size_t *weight_size_query, size_t *workspace_size_query, size_t *scratchpad_size_query); sycl::event execute_rnn_backward_primitive( rnn_mode mode, ::dnnl::rnn_direction direction, rnn_bias_mode bias_mode, ::dnnl::memory::data_type dt, ::dnnl::memory::format_tag tag, int seq_length, int batch_size, int src_c, int dst_c, int layer_size, int direction_num, int hidden_size, int gate_num, int projection_size, std::vector<void *> &data, std::vector<int> &offset, int iter_num); void async_free(sycl::queue *q, sycl::event e, std::unordered_map<int, ::dnnl::memory> *args, std::vector<void *> device_ptrs = {}) { q->submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { if (args) { delete args; } for (auto ptr : device_ptrs) { if (ptr) { sycl::free(ptr, *_q); } } }); }); }; bool scale_parameter_preprocess(const std::vector<output_argument_info> &args); template <typename primitive_type> sycl::event execute_primitive(const std::pair<detail::primitive_cache_key_type, primitive_type *> &primitive, std::unordered_map<int, ::dnnl::memory> *args, const std::vector<output_argument_info> &extra_args = {}, const std::vector<void *> &device_ptrs = {}); template <typename T> sycl::event fill_with_type(sycl::queue *q, void *src, const void *value, size_t size_with_byte) { return q->fill<T>(static_cast<T *>(src), *static_cast<const T *>(value), size_with_byte / sizeof(T)); } template <typename T> struct no_zero_op { T operator()(T e) { if (!e) { return 1; } return e; } }; template <typename T> void transform_no_zero_with_type(sycl::queue *q, void *src, void *dst, size_t num) { std::transform(oneapi::dpl::execution::make_device_policy(*q), static_cast<T *>(src), static_cast<T *>(src) + num, static_cast<T *>(dst), no_zero_op<T>()); } void transform_no_zero(const memory_desc_ext &desc, void *src, void *dst); ::dnnl::memory::desc get_group_weight_desc(int group_count, const memory_desc_ext &weight_desc); void get_rnn_configuration(const ::dnnl::memory::desc &desc, rnn_direction direction, rnn_mode mode, dpct::library_data_t dt, int hidden_size, ::dnnl::memory::data_type *dnnl_dt, ::dnnl::memory::format_tag *tag, int *projection_size, int *output_size, int *seq_length, int *batch_size, int *direction_num, int *gate_num); public: engine_ext() {} operator bool() const { return bool(_eng) && bool(_s) && bool(_q); } engine_ext &operator=(std::nullptr_t) { _eng.reset(nullptr); _s.reset(nullptr); _q = nullptr; return *this; } /// Creating oneDNN engine. void create_engine() { _eng = ::dnnl::sycl_interop::make_engine( dpct::get_current_device(), dpct::get_current_device().get_context()); _s = ::dnnl::sycl_interop::make_stream( _eng, dpct::get_current_device().default_queue()); _q = &dpct::get_current_device().default_queue(); } /// Setting the user's SYCL queue for an oneDNN engine. /// \param [in] q Pointer to the SYCL queue. void set_queue(sycl::queue *q) { if (!q) { throw std::runtime_error("set_queue: pointer must not be nullptr."); } if (!_eng) { throw std::runtime_error("set_queue: current engine is invalid."); } if (q->get_context() != ::dnnl::sycl_interop::get_context(_eng)) { throw std::runtime_error( "set_queue: queue is mismatch with current engine context."); } _q = q; _s = ::dnnl::sycl_interop::make_stream(_eng, *q); } /// Retrieving the user's SYCL queue set in the oneDNN engine. /// \returns Pointer to the SYCL queue. sycl::queue *get_queue() const { return _q; } /// Setting all elements of a memory to a given value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] valuePtr Pointer to a single value. void fill(const memory_desc_ext &src_desc, void *src, const void *valuePtr); /// Coping the scaled data from a memory to another memory with a different /// description. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. void reorder(float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst); /// Scaling all the elements of a memory by a given factor. /// \param [in] alpha Value to scaling factors. /// \param [in] src_desc Source memory descriptor. /// \param [out] src Pointer to source data. void scale(float alpha, const memory_desc_ext &src_desc, void *src); /// Adding the scaled values of a memory to another memory. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. void sum(float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst); /// Computing a specified activation function value. /// \param [in] desc Activation descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. void activation_forward(activation_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst); /// Computing the gradient of a specified activation function. /// \param [in] desc Activation descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] dst_desc Destination memory descriptor. /// \param [in] dst Pointer to destination data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the differential destination memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential source data. void activation_backward(activation_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src); /// Computing a specified pooling function value. /// \param [in] desc Pooling descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \param [out] workspace Pointer to workspace generated from forward propagation. void pooling_forward(pooling_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, ::dnnl::memory *workspace = nullptr); /// Computing the gradient of a specified pooling function. /// \param [in] desc Activation descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] dst_desc Destination memory descriptor. /// \param [in] dst Pointer to destination data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the differential destination memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential /// source data. /// \param [in] workspace Pointer to workspace used for backward /// propagation. void pooling_backward(pooling_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src, ::dnnl::memory *workspace = nullptr); /// Computing a specified softmax function value. /// \param [in] alg Softmax algorithm. /// \param [in] mode Softmax mode. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. void softmax_forward(softmax_algorithm alg, softmax_mode mode, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst); /// Computing the gradient of a specified softmax function. /// \param [in] alg Softmax algorithm. /// \param [in] mode Softmax mode. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] dst_desc Destination memory descriptor. /// \param [in] dst Pointer to destination data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the differential destination memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential source data. void softmax_backward(softmax_algorithm alg, softmax_mode mode, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta, const memory_desc_ext &diff_src_desc, void *diff_src); /// Computing a specified local response normalization function value. /// \param [in] desc Local response normalization descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \param [out] workspace Pointer to workspace generated from forward /// propagation. void lrn_forward(lrn_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, ::dnnl::memory *workspace = nullptr); /// Computing the gradient of a specified local response normalization /// function. /// \param [in] desc Local response normalization descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed value. /// \param [in] dst_desc Destination memory descriptor. /// \param [in] dst Pointer to destination data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the differential destination memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential source data. /// \param [in] workspace Pointer to workspace used for backward propagation. void lrn_backward(lrn_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src, ::dnnl::memory *workspace = nullptr); /// Setting all elements of a memory to a given value asynchronously. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] valuePtr Pointer to a single value. /// \returns An event representing the fill operations. sycl::event async_fill(const memory_desc_ext &src_desc, void *src, const void *valuePtr); /// Coping the scaled data from a memory to another memory with a different /// description asynchronously. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \returns An event representing the reorder operations. sycl::event async_reorder(float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst); /// Scaling all the elements of a memory by a given factor asynchronously. /// \param [in] alpha Value to scaling factors. /// \param [in] src_desc Source memory descriptor. /// \param [out] src Pointer to source data. /// \returns An event representing the scale operations. sycl::event async_scale(float alpha, const memory_desc_ext &src_desc, void *src); /// Adding the scaled values of a memory to another memory asynchronously. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \returns An event representing the sum operations. sycl::event async_sum(float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst); /// Perform specified binary operation asynchronously. /// \param [in] op Specified binary operation. /// \param [in] alpha_0 Value to scaling factors used to scale the src_0 /// value. /// \param [in] src_desc_0 Source 0 memory descriptor. /// \param [in] src_0 Pointer to source 0 data. /// \param [in] alpha_1 Value to scaling factors used to scale the src_1 /// value. /// \param [in] src_desc_1 Source 1 memory descriptor. /// \param [in] src_1 Pointer to source 1 data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \returns An event representing the binary operations. sycl::event async_binary(binary_op op, float alpha_0, const memory_desc_ext &src_desc_0, void *src_0, float alpha_1, const memory_desc_ext &src_desc_1, void *src_1, float beta, const memory_desc_ext &dst_desc, void *dst); /// Perform specified binary operation asynchronously. /// \param [in] op Specified reduction operation. /// \param [in] alpha Value to scaling factors used to scale the data /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \returns An event representing the reduction operations. sycl::event async_reduction(reduction_op op, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst); /// Computing a specified activation function value asynchronously. /// \param [in] desc Activation descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \returns An event representing the activation forward operations. sycl::event async_activation_forward(activation_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst); /// Computing the gradient of a specified activation function asynchronously. /// \param [in] desc Activation descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] dst_desc Destination memory descriptor. /// \param [in] dst Pointer to destination data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the differential destination memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential source data. /// \returns An event representing the activation backward operations. sycl::event async_activation_backward(activation_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src); /// Computing a specified pooling function value asynchronously. /// \param [in] desc Pooling descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \param [out] workspace Pointer to workspace generated from forward propagation. /// \returns An event representing the pooling forward operations. sycl::event async_pooling_forward(pooling_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, ::dnnl::memory *workspace = nullptr); /// Computing the gradient of a specified pooling function asynchronously. /// \param [in] desc Activation descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] dst_desc Destination memory descriptor. /// \param [in] dst Pointer to destination data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the differential destination memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential /// source data. /// \param [in] workspace Pointer to workspace used for backward /// propagation. /// \returns An event representing the pooling backward operations. sycl::event async_pooling_backward(pooling_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src, ::dnnl::memory *workspace = nullptr); /// Computing a specified softmax function value asynchronously. /// \param [in] alg Softmax algorithm. /// \param [in] mode Softmax mode. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \returns An event representing the softmax forward operations. sycl::event async_softmax_forward(softmax_algorithm alg, softmax_mode mode, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst); /// Computing the gradient of a specified softmax function asynchronously. /// \param [in] alg Softmax algorithm. /// \param [in] mode Softmax mode. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] dst_desc Destination memory descriptor. /// \param [in] dst Pointer to destination data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the differential destination memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential source data. /// \returns An event representing the softmax backward operations. sycl::event async_softmax_backward(softmax_algorithm alg, softmax_mode mode, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta, const memory_desc_ext &diff_src_desc, void *diff_src); /// Computing a specified local response normalization function value /// asynchronously. /// \param [in] desc Local response normalization descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \param [out] workspace Pointer to workspace generated from forward /// propagation. /// \returns An event representing the lrn forward operations. sycl::event async_lrn_forward(lrn_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, ::dnnl::memory *workspace = nullptr); /// Computing the gradient of a specified local response normalization /// function asynchronously. /// \param [in] desc Local response normalization descriptor. /// \param [in] alpha Value to scaling factors used to scale the computed value. /// \param [in] dst_desc Destination memory descriptor. /// \param [in] dst Pointer to destination data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the differential destination memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential source data. /// \param [in] workspace Pointer to workspace used for backward propagation. /// \returns An event representing the lrn backward operations. sycl::event async_lrn_backward(lrn_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src, ::dnnl::memory *workspace = nullptr); /// Derives a memory descriptor for the batch normalization scale, bias, mean, /// variance from the source memory descriptor and batch normalization mode. /// \param [out] desc Derived memory descriptor. /// \param [in] src_desc Source memory descriptor. /// \param [in] mode Batch normalization mode. static void derive_batch_normalization_memory_desc(memory_desc_ext &desc, const memory_desc_ext &src_desc, batch_normalization_mode mode); /// Derives a memory descriptor for the batch normalization scale, bias, mean, /// variance from the source memory descriptor and batch normalization mode. /// \param [out] scale_bias_desc Derived scale and bias memory descriptor. /// \param [out] mean_var_desc Derived mean and var memory descriptor. /// \param [in] src_desc Source memory descriptor. /// \param [in] mode Batch normalization mode. static void derive_batch_normalization_memory_desc(memory_desc_ext &scale_bias_desc, memory_desc_ext &mean_var_desc, const memory_desc_ext &src_desc, batch_normalization_mode mode); /// Get the size of workspace that needed by batch normalization. The data stored /// in workspace must be preserved between forward and backward. /// \param [in] ops Batch normalization operation mode. This mode can set to /// perform only batch normalization, or batch normalization followed by /// activation, or batch normalization followed by element-wise addition and /// activation. /// \param [in] src_desc Source memory descriptor. /// \returns Size of workspace. size_t get_batch_normalization_workspace_size( batch_normalization_ops ops, const memory_desc_ext &src_desc); /// Computing a specified batch normalization inference stage function value /// asynchronously. /// \param [in] mode Batch normalization mode. /// \param [in] epsilon Epsilon value used in computation. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory /// descriptor. /// \param [in] scale Pointer to scale data. /// \param [in] bias Pointer to bias data. /// \param [in] mean Pointer to mean data. /// \param [in] var Pointer to variance data. /// \returns An event representing the batch normalization forward operations. sycl::event async_batch_normalization_forward_inference( batch_normalization_mode mode, float epsilon, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias, void *mean, void *var); /// Computing a specified batch normalization inference stage function value /// asynchronously. /// \param [in] mode Batch normalization mode. /// \param [in] ops Batch normalization operation mode. This mode can set to /// perform only batch normalization, or batch normalization followed by /// activation, or batch normalization followed by element-wise addition and /// activation. /// \param [in] adesc Activation operation descriptor. /// \param [in] epsilon Epsilon value used in computation. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \param [in] summand_desc Summand memory descriptor. /// \param [in] summand Pointer to summand data. /// \param [in] scale_bias_desc Scale, bias memory descriptor. /// \param [in] scale Pointer to scale data. /// \param [in] bias Pointer to bias data. /// \param [in] mean_var_desc Mean, variance memory descriptor. /// \param [in] mean Pointer to mean data. /// \param [in] var Pointer to variance data. /// \returns An event representing the batch normalization forward operations. sycl::event async_batch_normalization_forward_inference( batch_normalization_mode mode, batch_normalization_ops ops, activation_desc &adesc, float epsilon, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &summand_desc, void *summand, const memory_desc_ext &scale_bias_desc, void *scale, void *bias, const memory_desc_ext &mean_var_desc, void *mean, void *var); /// Computing a specified batch normalization training stage function value /// asynchronously. /// \param [in] mode Batch normalization mode. /// \param [in] epsilon Epsilon value used in computation. /// \param [in] factor Factor value used in running mean and variance /// computation. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory /// descriptor. /// \param [in] scale Pointer to scale data. /// \param [in] bias Pointer to bias data. /// \param [out] running_mean Pointer to running mean data. /// \param [out] running_var Pointer to running variance data. /// \param [out] saved_mean Pointer to optional cache to save mean data. /// \param [out] saved_var Pointer to optional cache to save variance data. /// \returns An event representing the batch normalization forward operations. sycl::event async_batch_normalization_forward_training( batch_normalization_mode mode, float epsilon, float factor, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias, void *running_mean, void *running_var, void *saved_mean, void *saved_var); /// Computing a specified batch normalization training stage function value /// asynchronously. /// \param [in] mode Batch normalization mode. /// \param [in] ops Batch normalization operation mode. This mode can set to /// perform only batch normalization, or batch normalization followed by /// activation, or batch normalization followed by element-wise addition and /// activation. /// \param [in] adesc Activation operation descriptor. /// \param [in] epsilon Epsilon value used in computation. /// \param [in] factor Factor value used in running mean and variance /// computation. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \param [in] summand_desc Summand memory descriptor. /// \param [in] summand Pointer to summand data. /// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory /// descriptor. /// \param [in] scale Pointer to scale data. /// \param [in] bias Pointer to bias data. /// \param [out] running_mean Pointer to running mean data. /// \param [out] running_var Pointer to running variance data. /// \param [out] saved_mean Pointer to optional cache to save mean data. /// \param [out] saved_var Pointer to optional cache to save variance data. /// \param [in] workspace_size Size of workspace. /// \param [out] workspace Pointer to workspace generated from forward /// propagation. /// \returns An event representing the batch normalization forward operations. sycl::event async_batch_normalization_forward_training( batch_normalization_mode mode, batch_normalization_ops ops, activation_desc &adesc, float epsilon, float factor, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &summand_desc, void *summand, const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias, void *running_mean, void *running_var, void *saved_mean, void *saved_var, size_t workspace_size, void *workspace); /// Computing a specified batch normalization training stage function value /// asynchronously. /// \param [in] mode Batch normalization mode. /// \param [in] ops Batch normalization operation mode. This mode can set to /// perform only batch normalization, or batch normalization followed by /// activation, or batch normalization followed by element-wise addition and /// activation. /// \param [in] adesc Activation operation descriptor. /// \param [in] epsilon Epsilon value used in computation. /// \param [in] factor Factor value used in running mean and variance /// computation. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \param [in] summand_desc Summand memory descriptor. /// \param [in] summand Pointer to summand data. /// \param [in] scale_bias_desc Scale, bias memory descriptor. /// \param [in] scale Pointer to scale data. /// \param [in] bias Pointer to bias data. /// \param [in] mean_var_desc Mean, variance memory descriptor. /// \param [out] running_mean Pointer to running mean data. /// \param [out] running_var Pointer to running variance data. /// \param [out] saved_mean Pointer to optional cache to save mean data. /// \param [out] saved_var Pointer to optional cache to save variance data. /// \param [in] workspace_size Size of workspace. /// \param [out] workspace Pointer to workspace generated from forward /// propagation. /// \returns An event representing the batch normalization forward operations. sycl::event async_batch_normalization_forward_training( batch_normalization_mode mode, batch_normalization_ops ops, activation_desc &adesc, float epsilon, float factor, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &summand_desc, void *summand, const memory_desc_ext &scale_bias_desc, void *scale, void *bias, const memory_desc_ext &mean_var_desc, void *running_mean, void *running_var, void *saved_mean, void *saved_var, size_t workspace_size, void *workspace); /// Computing the gradient of a specified batch normalization function asynchronously. /// \param [in] mode Batch normalization mode. /// \param [in] epsilon Epsilon value used in computation. /// \param [in] alpha_data Value to scaling factors used to scale the computed /// data value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] beta_data Value to scaling factors used to scale the prior value /// in the data memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential source data. /// \param [in] alpha_param Value to scaling factors used to scale the computed /// parameter value. /// \param [in] diff_scale_bias_mean_var_desc Differential scale, bias, mean, /// variance memory descriptor. /// \param [in] scale Pointer to scale data. /// \param [in] beta_param Value to scaling factors used to scale the prior value /// in the parameter memory. /// \param [in] diff_scale Pointer to differential scale data. /// \param [in] diff_bias Pointer to differential bias data. /// \param [in] saved_mean Pointer to optional cache saved mean data in forward. /// \param [in] saved_var Pointer to optional cache saved variance data in forward. /// \returns An event representing the batch normalization backward operations. sycl::event async_batch_normalization_backward( batch_normalization_mode mode, float epsilon, float alpha_data, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param, const memory_desc_ext &diff_scale_bias_mean_var_desc, void *scale, float beta_param, void *diff_scale, void *diff_bias, void *saved_mean, void *saved_var); /// Computing the gradient of a specified batch normalization function /// asynchronously. /// \param [in] mode Batch normalization mode. /// \param [in] ops Batch normalization operation mode. This mode can set to /// perform only batch normalization, or batch normalization followed by /// activation, or batch normalization followed by element-wise addition and /// activation. /// \param [in] adesc Activation operation descriptor. /// \param [in] epsilon Epsilon value used in computation. /// \param [in] alpha_data Value to scaling factors used to scale the computed /// data value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] dst_desc Destination memory descriptor. /// \param [in] dst Pointer to destination data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] beta_data Value to scaling factors used to scale the prior value /// in the data memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential source data. /// \param [in] diff_summand_desc Differential summand memory descriptor. /// \param [out] diff_summand Pointer to differential summand data. /// \param [in] alpha_param Value to scaling factors used to scale the computed /// parameter value. /// \param [in] diff_scale_bias_mean_var_desc Differential scale, bias, mean, /// variance memory descriptor. /// \param [in] scale Pointer to scale data. /// \param [in] bias Pointer to bias data. /// \param [in] beta_param Value to scaling factors used to scale the prior value /// in the parameter memory. /// \param [out] diff_scale Pointer to differential scale data. /// \param [out] diff_bias Pointer to differential bias data. /// \param [in] saved_mean Pointer to optional cache saved mean data in forward. /// \param [in] saved_var Pointer to optional cache saved variance data in forward. /// \param [in] workspace_size Size of workspace. /// \param [in] workspace Pointer to workspace used for backward propagation. /// \returns An event representing the batch normalization backward operations. sycl::event async_batch_normalization_backward( batch_normalization_mode mode, batch_normalization_ops ops, activation_desc &adesc, float epsilon, float alpha_data, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src, const memory_desc_ext &diff_summand_desc, void *diff_summand, float alpha_param, const memory_desc_ext &diff_scale_bias_mean_var_desc, void *scale, void *bias, float beta_param, void *diff_scale, void *diff_bias, void *saved_mean, void *saved_var, size_t workspace_size, void *workspace); /// Computing the gradient of a specified batch normalization function /// asynchronously. /// \param [in] mode Batch normalization mode. /// \param [in] ops Batch normalization operation mode. This mode can set to /// perform only batch normalization, or batch normalization followed by /// activation, or batch normalization followed by element-wise addition and /// activation. /// \param [in] adesc Activation operation descriptor. /// \param [in] epsilon Epsilon value used in computation. /// \param [in] alpha_data Value to scaling factors used to scale the computed /// data value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] dst_desc Destination memory descriptor. /// \param [in] dst Pointer to destination data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] beta_data Value to scaling factors used to scale the prior value /// in the data memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential source data. /// \param [in] diff_summand_desc Differential summand memory descriptor. /// \param [out] diff_summand Pointer to differential summand data. /// \param [in] alpha_param Value to scaling factors used to scale the computed /// parameter value. /// \param [in] diff_scale_bias_desc Differential scale, bias memory descriptor. /// \param [in] scale Pointer to scale data. /// \param [in] bias Pointer to bias data. /// \param [in] beta_param Value to scaling factors used to scale the prior value /// in the parameter memory. /// \param [out] diff_scale Pointer to differential scale data. /// \param [out] diff_bias Pointer to differential bias data. /// \param [in] mean_var_desc Differential mean, variance memory descriptor. /// \param [in] saved_mean Pointer to optional cache saved mean data in forward. /// \param [in] saved_var Pointer to optional cache saved variance data in forward. /// \param [in] workspace_size Size of workspace. /// \param [in] workspace Pointer to workspace used for backward propagation. /// \returns An event representing the batch normalization backward operations. sycl::event async_batch_normalization_backward( batch_normalization_mode mode, batch_normalization_ops ops, activation_desc &adesc, float epsilon, float alpha_data, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src, const memory_desc_ext &diff_summand_desc, void *diff_summand, float alpha_param, const memory_desc_ext &diff_scale_bias_desc, void *scale, void *bias, float beta_param, void *diff_scale, void *diff_bias, const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var, size_t workspace_size, void *workspace); /// Computing a specified convolution function value asynchronously. /// \param [in] desc Convolution descriptor. /// \param [in] alg Convolution algorithm. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] weight_desc Weight memory descriptor. /// \param [in] weight Pointer to weight data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \returns An event representing the convolution forward operations. sycl::event async_convolution_forward(convolution_desc &desc, ::dnnl::algorithm alg, float alpha, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &weight_desc, void *weight, float beta, const memory_desc_ext &dst_desc, void *dst); /// Computing a specified convolution function value asynchronously. /// \param [in] desc Convolution descriptor. /// \param [in] alg Convolution algorithm. /// \param [in] adesc Activation operation descriptor. /// \param [in] alpha_0 Value to scaling factors used to scale the data /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] weight_desc Weight memory descriptor. /// \param [in] weight Pointer to weight data. /// \param [in] alpha_1 Value to scaling factors used to scale the summand /// value. /// \param [in] summand_desc Summand memory descriptor. /// \param [in] summand Pointer to summand data. /// \param [in] bias_desc Bias memory descriptor. /// \param [in] bias Pointer to bias data. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \returns An event representing the convolution forward operations. sycl::event async_convolution_forward( convolution_desc &desc, ::dnnl::algorithm alg, activation_desc &adesc, float alpha_0, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &weight_desc, void *weight, float alpha_1, const memory_desc_ext &summand_desc, void *summand, const memory_desc_ext &bias_desc, void *bias, const memory_desc_ext &dst_desc, void *dst); /// Computing the data gradient of a specified convolution function asynchronously. /// \param [in] desc Convolution descriptor. /// \param [in] alg Convolution algorithm. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] weight_desc Weight memory descriptor. /// \param [in] weight Pointer to weight data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential source data. /// \returns An event representing the convolution backward data operations. sycl::event async_convolution_backward_data( convolution_desc &desc, ::dnnl::algorithm alg, float alpha, const memory_desc_ext &weight_desc, void *weight, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta, const memory_desc_ext &diff_src_desc, void *diff_src); /// Computing the weight gradient of a specified convolution function /// asynchronously. /// \param [in] desc Convolution descriptor. /// \param [in] alg Convolution algorithm. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] diff_weight_desc Differential weight memory descriptor. /// \param [out] diff_weight Pointer to differential weight data. /// \returns An event representing the convolution backward weight operations. sycl::event async_convolution_backward_weight( convolution_desc &desc, ::dnnl::algorithm alg, float alpha, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta, const memory_desc_ext &diff_weight_desc, void *diff_weight); /// Computing the bias gradient of a specified convolution function /// asynchronously. /// \param [in] alpha Value to scaling factors used to scale the computed /// value. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] beta Value to scaling factors used to scale the prior value /// in the destination memory. /// \param [in] diff_bias_desc Differential bias memory descriptor. /// \param [out] diff_bias Pointer to differential bias data. /// \returns An event representing the convolution backward bias operations. sycl::event async_convolution_backward_bias(float alpha, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta, const memory_desc_ext &diff_bias_desc, void *diff_bias); /// Getting the required weight space size for specified rnn operation. /// \param [in] desc RNN descriptor. /// \param [out] weight_space_size Size of required weight space. void rnn_get_weight_space_size(const rnn_desc &desc, size_t *weight_space_size); /// Getting the required scratchpad size and workspace size for specified rnn operation. /// \param [in] desc RNN descriptor. /// \param [in] kind Propagation kind. /// \param [in] src_desc Source memory descriptor. /// \param [out] scratchpad_size Size of required scratchpad. /// \param [out] workspace_size Size of required workspace. void rnn_get_scratchpad_workspace_size(const rnn_desc &desc, ::dnnl::prop_kind kind, const memory_desc_ext &src_desc, size_t *scratchpad_size, size_t *workspace_size); /// Computing a specified rnn function value asynchronously. /// \param [in] desc RNN descriptor. /// \param [in] kind Propagation kind. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \param [in] iter_desc Recurrent hidden state data memory descriptor. /// \param [in] src_iter Pointer to input recurrent hidden state data. /// \param [in] dst_iter Pointer to output recurrent hidden state data. /// \param [in] iter_c_desc Recurrent cell state data memory descriptor. /// \param [in] src_c_iter Pointer to input recurrent cell state data. /// \param [in] dst_c_iter Pointer to output recurrent cell state data. /// \param [in] weight_size Size of weight memory. /// \param [in] weight Pointer to weight data. /// \param [in] scratchpad_size Size of scratchpad memory. /// \param [in] scratchpad Pointer to scratchpad data. /// \param [in] workspace_size Size of workspace memory. /// \param [in] workspace Pointer to workspace data. /// \returns An event representing the status of rnn forward operations. sycl::event async_rnn_forward(const rnn_desc &desc, ::dnnl::prop_kind kind, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter, const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c, size_t weight_size, void *weight, size_t scratchpad_size, void *scratchpad, size_t workspace_size, void *workspace); /// Computing the data and weight gradient of a specified rnn function /// asynchronously. /// \param [in] desc RNN descriptor. /// \param [in] dst_desc Destination memory descriptor. /// \param [in] dst Pointer to destination data. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [out] diff_src Pointer to differential source data. /// \param [in] iter_desc Recurrent hidden state data memory descriptor. /// \param [in] src_iter Pointer to input recurrent hidden state data. /// \param [in] diff_dst_iter Pointer to differential output recurrent hidden state data. /// \param [out] diff_src_iter Pointer to differential input recurrent hidden state data. /// \param [in] iter_c_desc Recurrent cell state data memory descriptor. /// \param [in] src_c_iter Pointer to input recurrent cell state data. /// \param [in] diff_dst_c_iter Pointer to differential output recurrent cell state data. /// \param [out] diff_src_c_iter Pointer to differential input recurrent cell state data. /// \param [in] weight_size Size of weight memory. /// \param [in] weight Pointer to weight data. /// \param [out] diff_weight Pointer to differential weight data. /// \param [in] scratchpad_size Size of scratchpad memory. /// \param [in] scratchpad Pointer to scratchpad data. /// \param [in] workspace_size Size of workspace memory. /// \param [in] workspace Pointer to workspace data. /// \returns An event representing the status of rnn backward operations. sycl::event async_rnn_backward( const rnn_desc &desc, const memory_desc_ext &dst_desc, void *dst, void *diff_dst, const memory_desc_ext &src_desc, void *src, void *diff_src, const memory_desc_ext &iter_desc, void *src_iter, void *diff_dst_iter, void *diff_src_iter, const memory_desc_ext &iter_c_desc, void *src_iter_c, void *diff_dst_iter_c, void *diff_src_iter_c, size_t weight_size, void *weight, void *diff_weight, size_t scratchpad_size, void *scratchpad, size_t workspace_size, void *workspace); /// Getting the required state size for specified dropout operation. /// \param [in] src_desc Source memory descriptor. /// \returns Required size of state. size_t get_dropout_state_size(); /// Getting the required workspace size for dropout operation. /// \param [in] src_desc Source memory descriptor. /// \returns Required size of workspace. static size_t get_dropout_workspace_size(const memory_desc_ext &src_desc); /// Computing a specified dropout function value asynchronously. /// \param [in] desc Dropout descriptor. /// \param [in] src_desc Source memory descriptor. /// \param [in] src Pointer to source data. /// \param [in] dst_desc Destination memory descriptor. /// \param [out] dst Pointer to destination data. /// \param [in] workspace Pointer to workspace data. /// \param [in] workspace_size Size of workspace memory. /// \returns An event representing the dropout forward operations. sycl::event async_dropout_forward(dropout_desc &desc, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc, void *dst, void *workspace, size_t workspace_size); /// Computing the gradient of a specified dropout function asynchronously. /// \param [in] desc Dropout descriptor. /// \param [in] diff_dst_desc Differential destination memory descriptor. /// \param [in] diff_dst Pointer to differential destination data. /// \param [in] diff_src_desc Differential source memory descriptor. /// \param [out] diff_src Pointer to differential source data. /// \param [in] workspace Pointer to workspace data. /// \param [in] workspace_size Size of workspace memory. /// \returns An event representing the dropout backward operations. sycl::event async_dropout_backward(dropout_desc &desc, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &diff_src_desc, void *diff_src, void *workspace, size_t workspace_size); }; inline void dropout_desc::restore(engine_ext &engine, float p, void *state, size_t state_size, unsigned long long seed) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) " "Interfaces Project does not support this API."); #else if (state) { std::int64_t required_state_size = engine.get_dropout_state_size(); if (state_size < required_state_size) { throw std::runtime_error("restore: state_size less than required state size."); } sycl::queue *q = engine.get_queue(); _imp->_p = p; _imp->_seed = seed; _imp->_state = state; _imp->_host_state = std::vector<std::uint8_t>(required_state_size); q->memcpy(_imp->_host_state.data(), _imp->_state, required_state_size).wait(); _imp->_rng_engine = oneapi::mkl::rng::load_state<rng_engine_t>( *q, _imp->_host_state.data()); } #endif } inline void dropout_desc::set(engine_ext &engine, float p, void *state, size_t state_size, unsigned long long seed) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) " "Interfaces Project does not support this API."); #else _imp->_p = p; if (state) { std::int64_t required_state_size = engine.get_dropout_state_size(); if (state_size < required_state_size) { throw std::runtime_error("set: no sufficient memory to save states."); } sycl::queue *q = engine.get_queue(); _imp->_seed = seed; _imp->_state = state; _imp->_host_state = std::vector<std::uint8_t>(required_state_size); _imp->_rng_engine = rng_engine_t(*q, seed); oneapi::mkl::rng::save_state(_imp->_rng_engine, _imp->_host_state.data()); q->memcpy(_imp->_state, _imp->_host_state.data(), required_state_size).wait(); } #endif } inline ::dnnl::memory::data_type memory_desc_ext::to_dnnl_data_type(dpct::library_data_t dt) { using dnnl_dt = ::dnnl::memory::data_type; switch (dt) { case dpct::library_data_t::real_half: return dnnl_dt::f16; case dpct::library_data_t::real_bfloat16: return dnnl_dt::bf16; case dpct::library_data_t::real_float: return dnnl_dt::f32; case dpct::library_data_t::real_int32: return dnnl_dt::s32; case dpct::library_data_t::real_int8: return dnnl_dt::s8; case dpct::library_data_t::real_uint8: return dnnl_dt::u8; case dpct::library_data_t::real_int8_4: return dnnl_dt::s8; case dpct::library_data_t::real_int8_32: return dnnl_dt::s8; case dpct::library_data_t::real_uint8_4: return dnnl_dt::u8; default: throw std::runtime_error("to_dnnl_data_type: unsupported data type."); } } inline dpct::library_data_t memory_desc_ext::to_dpct_library_data_t(::dnnl::memory::data_type dt, unsigned block_size) { using dpct_dt = dpct::library_data_t; using dnnl_dt = ::dnnl::memory::data_type; switch (dt) { case dnnl_dt::f16: return dpct_dt::real_half; case dnnl_dt::bf16: return dpct_dt::real_bfloat16; case dnnl_dt::f32: return dpct_dt::real_float; case dnnl_dt::s32: return dpct_dt::real_int32; case dnnl_dt::s8: if (block_size == 4) { return dpct_dt::real_int8_4; } else if (block_size == 32) { return dpct_dt::real_int8_32; } else { return dpct_dt::real_int8; } case dnnl_dt::u8: if (block_size == 4) { return dpct_dt::real_uint8_4; } else { return dpct_dt::real_uint8; } default: throw std::runtime_error("to_dpct_library_data_t: unsupported data type " "dnnl::memory::data_type::undef."); } } inline ::dnnl::memory::format_tag memory_desc_ext::to_dnnl_format_tag(dpct::library_data_t dt, memory_format_tag tag) { using dpct_dt = dpct::library_data_t; using dpct_tag = memory_format_tag; using dnnl_tag = ::dnnl::memory::format_tag; switch (tag) { case dpct_tag::nchw: return dnnl_tag::nchw; case dpct_tag::nhwc: return dnnl_tag::nhwc; default: if (dt == dpct_dt::real_int8_32) { return dnnl_tag::nChw32c; } else { return dnnl_tag::nChw4c; } } } inline void memory_desc_ext::set(memory_format_tag tag, dpct::library_data_t dt, int n, int c, int h, int w) { _desc = ::dnnl::memory::desc({n, c, h, w}, to_dnnl_data_type(dt), to_dnnl_format_tag(dt, tag)); } inline void memory_desc_ext::set(dpct::library_data_t dt, int n, int c, int h, int w, int n_stride, int c_stride, int h_stride, int w_stride) { _desc = ::dnnl::memory::desc({n, c, h, w}, to_dnnl_data_type(dt), {n_stride, c_stride, h_stride, w_stride}); } inline void memory_desc_ext::set(dpct::library_data_t dt, int ndims, const int dims[], const int strides[]) { _desc = ::dnnl::memory::desc({dims, dims + ndims}, to_dnnl_data_type(dt), {strides, strides + ndims}); } inline void memory_desc_ext::set(memory_format_tag tag, dpct::library_data_t dt, int ndims, const int dims[]) { _desc = ::dnnl::memory::desc({dims, dims + ndims}, to_dnnl_data_type(dt), to_dnnl_format_tag(dt, tag)); } inline void memory_desc_ext::set(rnn_memory_format_tag tag, dpct::library_data_t dt, int t, int n, int c) { if (tag == rnn_memory_format_tag::tnc) { _desc = ::dnnl::memory::desc({t, n, c}, to_dnnl_data_type(dt), ::dnnl::memory::format_tag::tnc); } else if(tag == rnn_memory_format_tag::ntc) { _desc = ::dnnl::memory::desc({t, n, c}, to_dnnl_data_type(dt), ::dnnl::memory::format_tag::ntc); } else { throw std::runtime_error("set: unsupported memory format tag."); } } inline void memory_desc_ext::get(dpct::library_data_t *dt, int *n, int *c, int *h, int *w, int *n_stride, int *c_stride, int *h_stride, int *w_stride) const { unsigned block_size = 1; auto dims = _desc.get_dims(); auto inner_blks = _desc.get_inner_blks(); auto strides = _desc.get_strides(); if (!inner_blks.empty()) { block_size = inner_blks[0]; } *dt = to_dpct_library_data_t(_desc.get_data_type(), block_size); *n = dims[0]; *c = dims[1]; *h = dims[2]; *w = dims[3]; *n_stride = strides[0] / block_size; *c_stride = strides[1] / block_size; *h_stride = strides[2] / block_size; *w_stride = strides[3] / block_size; } inline void memory_desc_ext::get(dpct::library_data_t *dt, memory_format_tag *tag, int *n, int *c, int *h, int *w) const { unsigned block_size = 1; *tag = memory_format_tag::nchw; auto dims = _desc.get_dims(); auto strides = _desc.get_strides(); auto inner_blks = _desc.get_inner_blks(); if (!inner_blks.empty()) { block_size = inner_blks[0]; *tag = memory_format_tag::nchw_blocked; } if (strides[1] == 1 && dims[1] != 1) { *tag = memory_format_tag::nhwc; } *dt = to_dpct_library_data_t(_desc.get_data_type(), block_size); *n = dims[0]; *c = dims[1]; *h = dims[2]; *w = dims[3]; } inline void memory_desc_ext::get(dpct::library_data_t *dt, rnn_memory_format_tag *tag, int *t, int *n, int *c) const { auto dims = _desc.get_dims(); auto strides = _desc.get_strides(); if (strides[0] >= strides[1]) { *tag = rnn_memory_format_tag::tnc; } else { *tag = rnn_memory_format_tag::ntc; } *dt = to_dpct_library_data_t(_desc.get_data_type(), 1); *t = dims[0]; *n = dims[1]; *c = dims[2]; } inline void memory_desc_ext::get(int requested_ndims, dpct::library_data_t *dt, int *ndims, int dims[], int strides[]) const { unsigned block_size = 1; auto inner_blks = _desc.get_inner_blks(); auto adims = _desc.get_dims(); auto astrides = _desc.get_strides(); if (!inner_blks.empty()) { block_size = inner_blks[0]; } *dt = to_dpct_library_data_t(_desc.get_data_type(), block_size); *ndims = _desc.get_ndims(); for (int index = 0; index < requested_ndims; index++) { dims[index] = adims[index]; strides[index] = astrides[index] / block_size; } } inline void memory_desc_ext::get(int requested_ndims, dpct::library_data_t *dt, memory_format_tag *tag, int *ndims, int dims[]) const { unsigned block_size = 1; *tag = memory_format_tag::nchw; auto inner_blks = _desc.get_inner_blks(); auto adims = _desc.get_dims(); auto astrides = _desc.get_strides(); if (!inner_blks.empty()) { block_size = inner_blks[0]; *tag = memory_format_tag::nchw_blocked; } if (astrides[1] == 1 && adims[1] != 1) { *tag = memory_format_tag::nhwc; } *dt = to_dpct_library_data_t(_desc.get_data_type(), block_size); *ndims = _desc.get_ndims(); for (int index = 0; index < requested_ndims; index++) { dims[index] = adims[index]; } } inline void engine_ext::get_rnn_configuration(const ::dnnl::memory::desc &desc, rnn_direction direction, rnn_mode mode, dpct::library_data_t dt, int hidden_size, ::dnnl::memory::data_type *dnnl_dt, ::dnnl::memory::format_tag *tag, int *projection_size, int *output_size, int *seq_length, int *batch_size, int *direction_num, int *gate_num) { if (!desc.is_zero()) { auto dims = desc.get_dims(); auto strides = desc.get_strides(); if (strides[0] >= strides[1]) { *tag = ::dnnl::memory::format_tag::tnc; *seq_length = dims[0]; *batch_size = dims[1]; } else { *tag = ::dnnl::memory::format_tag::ntc; *seq_length = dims[1]; *batch_size = dims[0]; } } if (direction == rnn_direction::bidirectional) { *direction_num = 2; } else { *direction_num = 1; } if (mode == rnn_mode::lstm) { *gate_num = 4; } else if (mode == rnn_mode::gru) { *gate_num = 3; } else { *gate_num = 1; } if (*projection_size != hidden_size) { *output_size = *projection_size; } else { *projection_size = 0; *output_size = hidden_size; } *dnnl_dt = memory_desc_ext::to_dnnl_data_type(dt); } inline void *engine_ext::allocate(const memory_desc_ext &data_desc, int count) const { size_t mem_size = data_desc.get_size(); void *mem = sycl::malloc_device(mem_size * count, *_q); return mem; } inline void engine_ext::transform_no_zero(const memory_desc_ext &desc, void *src, void *dst) { ::dnnl::memory::data_type dt = desc.get_desc().get_data_type(); size_t element_num = desc.get_element_num(); switch (dt) { case ::dnnl::memory::data_type::f32: transform_no_zero_with_type<float>(_q, src, dst, element_num); break; case ::dnnl::memory::data_type::f16: transform_no_zero_with_type<sycl::half>(_q, src, dst, element_num); break; case ::dnnl::memory::data_type::s32: transform_no_zero_with_type<int32_t>(_q, src, dst, element_num); break; case ::dnnl::memory::data_type::s8: transform_no_zero_with_type<int8_t>(_q, src, dst, element_num); break; case ::dnnl::memory::data_type::u8: transform_no_zero_with_type<uint8_t>(_q, src, dst, element_num); break; default: throw std::runtime_error("transform_no_zero: unsupported data type."); } } inline ::dnnl::memory::desc engine_ext::get_group_weight_desc(int group_count, const memory_desc_ext &weight_desc) { if (group_count == 1) { return weight_desc.get_desc(); } auto help_weight_desc = weight_desc.get_desc(); int ndims = help_weight_desc.get_ndims(); if (!help_weight_desc.get_inner_blks().empty()) { throw std::runtime_error("get_group_weight_desc: group convolution with " "blocked weight memory unimplemented."); } std::vector<int64_t> new_size; auto old_size = weight_desc.get_dims(); new_size.push_back(group_count); new_size.push_back(old_size[0] / group_count); for (int index = 1; index < old_size.size(); index++) { new_size.push_back(old_size[index]); } std::vector<int64_t> strides = help_weight_desc.get_strides(); ::dnnl::memory::format_tag tag; bool is_nhwc = (strides[1] == 1 && old_size[1] != 1); if (ndims == 4) { if (is_nhwc) { tag = ::dnnl::memory::format_tag::gohwi; } else { tag = ::dnnl::memory::format_tag::goihw; } } else if (ndims == 5) { if (is_nhwc) { tag = ::dnnl::memory::format_tag::godhwi; } else { tag = ::dnnl::memory::format_tag::goidhw; } } help_weight_desc = ::dnnl::memory::desc(new_size, weight_desc.get_desc().get_data_type(), tag); return help_weight_desc; } inline ::dnnl::memory::desc engine_ext::compress_spatial_dimensions_to_channel( const ::dnnl::memory::desc &desc) { int ndims = desc.get_ndims(); auto dims = desc.get_dims(); auto inner_blks = desc.get_inner_blks(); assert(ndims >= 4 && "ndims is at least 4."); std::vector<int64_t> compressed_dims(ndims); compressed_dims[0] = dims[0]; compressed_dims[1] = dims[1]; for (int index = 2; index < ndims; index++) { compressed_dims[1] = compressed_dims[1] * dims[index]; compressed_dims[index] = 1; } if (!inner_blks.empty() && inner_blks[0] == 4) { return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(), ::dnnl::memory::format_tag::nChw4c); } else if (!inner_blks.empty() && inner_blks[0] == 32) { return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(), ::dnnl::memory::format_tag::nChw32c); } std::vector<int64_t> strides(ndims, 1); strides[0] = compressed_dims[1]; return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(), strides); } inline ::dnnl::memory::desc engine_ext::get_bn_scale_bias_mean_var_desc(const ::dnnl::memory::desc &desc, batch_normalization_mode mode) { int ndims = desc.get_ndims(); auto dims = desc.get_dims(); assert(ndims >= 4 && "ndims is at least 4."); int channel_num = 1; if (mode == batch_normalization_mode::spatial) { channel_num = dims[1]; } else { for (int index = 1; index < ndims; index++) { channel_num = channel_num * dims[index]; } } return ::dnnl::memory::desc({channel_num}, desc.get_data_type(), ::dnnl::memory::format_tag::a); } inline ::dnnl::memory::desc engine_ext::transfer_memory_desc_to_channel_major_format( const ::dnnl::memory::desc &desc) { if (!desc.get_inner_blks().empty()) { return desc; } int ndims = desc.get_ndims(); auto dims = desc.get_dims(); if (ndims == 4) { return ::dnnl::memory::desc(dims, desc.get_data_type(), ::dnnl::memory::format_tag::nchw); } return ::dnnl::memory::desc(dims, desc.get_data_type(), ::dnnl::memory::format_tag::ncdhw); } /// If the alpha = 0 and beta = 1, then the destination (dst = alpha * out + /// beta * prior_dst) have no change. In this case this function returns true /// means the operation can exit directly. inline bool engine_ext::scale_parameter_preprocess( const std::vector<output_argument_info> &args) { bool direct_exit = true; for (auto &arg : args) { if (arg._alpha == 0.f) { if (arg._beta != 1.f) { async_scale(arg._beta, arg._desc, arg._data); } } else { direct_exit = false; } } return direct_exit; } inline void engine_ext::derive_batch_normalization_memory_desc( memory_desc_ext &scale_bias_desc, memory_desc_ext &mean_var_desc, const memory_desc_ext &src_desc, batch_normalization_mode mode) { derive_batch_normalization_memory_desc(scale_bias_desc, src_desc, mode); derive_batch_normalization_memory_desc(mean_var_desc, src_desc, mode); } inline void engine_ext::derive_batch_normalization_memory_desc( memory_desc_ext &desc, const memory_desc_ext &src_desc, batch_normalization_mode mode) { int src_ndims = src_desc.get_desc().get_ndims(); auto inner_blks = src_desc.get_desc().get_inner_blks(); if (src_desc.get_desc().get_ndims() != 4 || src_desc.get_desc().get_ndims() != 5) { throw std::runtime_error("derive_batch_normalization_memory_desc: only 4d " "and 5d memory descriptor supported."); } std::vector<int64_t> dims = src_desc.get_dims(); dims[0] = 1; if (mode == batch_normalization_mode::spatial) { dims[2] = 1; dims[3] = 1; if (src_ndims == 5) { dims[4] = 1; } } auto data_type = src_desc.get_desc().get_data_type(); if (data_type == ::dnnl::memory::data_type::f16) { data_type = ::dnnl::memory::data_type::f32; } if (!inner_blks.empty() && inner_blks[0] == 4) { desc.set_desc(::dnnl::memory::desc(dims, data_type, ::dnnl::memory::format_tag::nChw4c)); } else if (!inner_blks.empty() && inner_blks[0] == 32) { desc.set_desc(::dnnl::memory::desc(dims, data_type, ::dnnl::memory::format_tag::nChw32c)); } else { if (src_ndims == 4) { desc.set_desc(::dnnl::memory::desc(dims, data_type, ::dnnl::memory::format_tag::nchw)); } else { desc.set_desc(::dnnl::memory::desc(dims, data_type, ::dnnl::memory::format_tag::ncdhw)); } } } template <typename primitive_type> sycl::event engine_ext::execute_primitive( const std::pair<detail::primitive_cache_key_type, primitive_type *> &primitive, std::unordered_map<int, ::dnnl::memory> *args, const std::vector<output_argument_info> &output_args, const std::vector<void *> &device_ptrs) { std::vector<void *> caches; int output_arg_num = output_args.size(); for (int i = 0; i < output_arg_num; i++) { if (output_args[i]._beta != 0.f) { auto cache = allocate(output_args[i]._desc); caches.push_back(cache); args->insert( {output_args[i]._name, ::dnnl::memory(output_args[i]._desc.get_desc(), _eng, cache)}); } else { args->insert( {output_args[i]._name, ::dnnl::memory(output_args[i]._desc.get_desc(), _eng, output_args[i]._data)}); } } auto e = ::dnnl::sycl_interop::execute( *(static_cast<primitive_type *>(primitive.second)), _s, *args); _primitive_cache.put( primitive.first, primitive.second, [](::dnnl::primitive *p) { delete static_cast<primitive_type *>(p); }, e); int cache_index = 0; for (int i = 0; i < output_arg_num; i++) { if (output_args[i]._beta != 0.f) { e = async_sum(output_args[i]._alpha, output_args[i]._desc, caches[cache_index++], output_args[i]._beta, output_args[i]._desc, output_args[i]._data); } else { if (output_args[i]._alpha != 1.f) { e = async_scale(output_args[i]._alpha, output_args[i]._desc, output_args[i]._data); } } } caches.insert(caches.end(), device_ptrs.begin(), device_ptrs.end()); async_free(_q, e, args, caches); return e; } inline ::dnnl::memory::desc engine_ext::bn_reorder_memory_to_channel_major_format( bool is_input, ::dnnl::memory::desc &desc, void *src, void **cache, std::vector<void *> &caches) { ::dnnl::memory::desc result; result = transfer_memory_desc_to_channel_major_format(desc); if ((result != desc) || !src) { *cache = allocate(desc); if (is_input && src) { async_reorder(1.f, desc, src, 0.f, result, *cache); } caches.push_back(*cache); } return result; } inline sycl::event engine_ext::batch_normalization_backward_internal( batch_normalization_mode mode, float epsilon, float alpha_data, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param, const memory_desc_ext &diff_scale_bias_desc, void *scale, void *bias, float beta_param, void *diff_scale, void *diff_bias, const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var) { if (scale_parameter_preprocess( {{alpha_data, beta_data, diff_src_desc, diff_src}, {alpha_param, beta_param, diff_scale_bias_desc, diff_scale}, {alpha_param, beta_param, diff_scale_bias_desc, diff_bias}})) { return sycl::event(); } std::vector<void *> caches; void *reordered_src = nullptr, *reordered_diff_dst = nullptr, *reordered_diff_src = nullptr, *reordered_scale = nullptr, *reordered_bias = nullptr, *reordered_diff_scale = nullptr, *reordered_diff_bias = nullptr, *reordered_saved_mean = nullptr, *reordered_saved_var = nullptr; ::dnnl::memory::desc help_src_desc = src_desc.get_desc(); ::dnnl::memory::desc help_diff_dst_desc = diff_dst_desc.get_desc(); ::dnnl::memory::desc help_diff_src_desc = diff_src_desc.get_desc(); ::dnnl::memory::desc help_diff_scale_bias_desc = diff_scale_bias_desc.get_desc(); ::dnnl::memory::desc help_mean_var_desc = mean_var_desc.get_desc(); ::dnnl::memory::desc actual_diff_src_desc = help_diff_src_desc; ::dnnl::memory::desc actual_diff_scale_bias_desc = help_diff_scale_bias_desc; if (mode == batch_normalization_mode::per_activation) { help_src_desc = bn_reorder_memory_to_channel_major_format(true, help_src_desc, src, &reordered_src, caches); help_diff_dst_desc = bn_reorder_memory_to_channel_major_format( true, help_diff_dst_desc, diff_dst, &reordered_diff_dst, caches); help_diff_src_desc = bn_reorder_memory_to_channel_major_format( false, help_diff_src_desc, diff_src, &reordered_diff_src, caches); actual_diff_src_desc = help_diff_src_desc; help_diff_scale_bias_desc = bn_reorder_memory_to_channel_major_format( true, help_diff_scale_bias_desc, scale, &reordered_scale, caches); actual_diff_scale_bias_desc = help_diff_scale_bias_desc; if (bias) { bn_reorder_memory_to_channel_major_format(true, help_diff_scale_bias_desc, bias, &reordered_bias, caches); } bn_reorder_memory_to_channel_major_format(false, help_diff_scale_bias_desc, diff_scale, &reordered_diff_scale, caches); bn_reorder_memory_to_channel_major_format(false, help_diff_scale_bias_desc, diff_bias, &reordered_diff_bias, caches); help_mean_var_desc = bn_reorder_memory_to_channel_major_format( true, help_mean_var_desc, saved_mean, &reordered_saved_mean, caches); bn_reorder_memory_to_channel_major_format(true, help_mean_var_desc, saved_var, &reordered_saved_var, caches); help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc); help_diff_src_desc = compress_spatial_dimensions_to_channel(help_diff_src_desc); help_diff_dst_desc = compress_spatial_dimensions_to_channel(help_diff_dst_desc); } else { if ((help_src_desc != help_diff_dst_desc) || (help_src_desc != help_diff_src_desc) || (help_diff_dst_desc != help_diff_src_desc)) { help_src_desc = bn_reorder_memory_to_channel_major_format( true, help_src_desc, src, &reordered_src, caches); help_diff_dst_desc = bn_reorder_memory_to_channel_major_format( true, help_diff_dst_desc, diff_dst, &reordered_diff_dst, caches); help_diff_src_desc = bn_reorder_memory_to_channel_major_format( false, help_diff_src_desc, diff_src, &reordered_diff_src, caches); actual_diff_src_desc = help_diff_src_desc; } } help_diff_scale_bias_desc = get_bn_scale_bias_mean_var_desc(help_diff_scale_bias_desc, mode); help_mean_var_desc = get_bn_scale_bias_mean_var_desc(help_mean_var_desc, mode); auto forward_primitive = create_primitive_desc<::dnnl::batch_normalization_forward>( ::dnnl::prop_kind::forward_training, help_src_desc, help_diff_dst_desc, epsilon, ::dnnl::normalization_flags::use_scale | ::dnnl::normalization_flags::use_shift); auto primitive = create_primitive<::dnnl::batch_normalization_backward>( ::dnnl::prop_kind::backward, help_diff_src_desc, help_diff_dst_desc, help_src_desc, epsilon, ::dnnl::normalization_flags::use_scale | ::dnnl::normalization_flags::use_shift, forward_primitive); void *dst_cache = nullptr; if (!saved_mean && !saved_var) { dst_cache = allocate(diff_dst_desc); if (!reordered_saved_mean) { reordered_saved_mean = allocate(mean_var_desc); caches.push_back(reordered_saved_mean); } if (!reordered_saved_var) { reordered_saved_var = allocate(mean_var_desc); caches.push_back(reordered_saved_var); } if (!bias) { _q->fill(reordered_bias, 0, diff_scale_bias_desc.get_size()); } batch_normalization_forward_internal( true, mode, epsilon, 0.f, 1.f, src_desc, src, 0.f, diff_dst_desc, dst_cache, diff_scale_bias_desc, scale, bias ? bias : reordered_bias, mean_var_desc, reordered_saved_mean, reordered_saved_var, nullptr, nullptr); caches.push_back(dst_cache); } auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, {::dnnl::memory(help_src_desc, _eng, reordered_src ? reordered_src : src)}}, {DNNL_ARG_SCALE, {::dnnl::memory(help_diff_scale_bias_desc, _eng, reordered_scale ? reordered_scale : scale)}}, {DNNL_ARG_MEAN, {::dnnl::memory(help_mean_var_desc, _eng, reordered_saved_mean ? reordered_saved_mean : saved_mean)}}, {DNNL_ARG_VARIANCE, {::dnnl::memory(help_mean_var_desc, _eng, reordered_saved_var ? reordered_saved_var : saved_var)}}, {DNNL_ARG_DIFF_DST, {::dnnl::memory(help_diff_src_desc, _eng, reordered_diff_dst ? reordered_diff_dst : diff_dst)}}}; sycl::event e = execute_primitive( primitive, execution_args, {{alpha_data, beta_data, DNNL_ARG_DIFF_SRC, help_diff_src_desc, reordered_diff_src ? reordered_diff_src : diff_src}, {alpha_param, beta_param, DNNL_ARG_DIFF_SCALE, help_diff_scale_bias_desc, reordered_diff_scale ? reordered_diff_scale : diff_scale}, {alpha_param, beta_param, DNNL_ARG_DIFF_SHIFT, help_diff_scale_bias_desc, reordered_diff_bias ? reordered_diff_bias : diff_bias}}); if (actual_diff_src_desc != diff_src_desc.get_desc() && reordered_diff_src) { e = async_reorder(1.f, actual_diff_src_desc, reordered_diff_src, 0.f, diff_src_desc, diff_src); } if (actual_diff_scale_bias_desc != diff_scale_bias_desc.get_desc() && reordered_diff_scale && reordered_diff_bias) { async_reorder(1.f, actual_diff_scale_bias_desc, reordered_diff_scale, 0.f, diff_scale_bias_desc, diff_scale); e = async_reorder(1.f, actual_diff_scale_bias_desc, reordered_diff_bias, 0.f, diff_scale_bias_desc, diff_bias); } _q->submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { for (auto ptr : caches) { sycl::free(ptr, *_q); } }); }); return e; } inline sycl::event engine_ext::batch_normalization_forward_internal( bool is_infer, batch_normalization_mode mode, float epsilon, float factor, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &scale_bias_desc, void *scale, void *bias, const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var, void *running_mean, void *running_var) { if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) { return sycl::event(); } std::vector<void *> caches; void *reordered_src = nullptr, *reordered_dst = nullptr, *reordered_scale = nullptr, *reordered_bias = nullptr, *reordered_saved_mean = nullptr, *reordered_saved_var = nullptr; ::dnnl::memory::desc help_src_desc = src_desc.get_desc(); ::dnnl::memory::desc help_dst_desc = dst_desc.get_desc(); ::dnnl::memory::desc help_scale_bias_desc = scale_bias_desc.get_desc(); ::dnnl::memory::desc help_mean_var_desc = mean_var_desc.get_desc(); ::dnnl::memory::desc actual_dst_desc = help_dst_desc; ::dnnl::memory::desc actual_mean_var_desc = help_mean_var_desc; if (mode == batch_normalization_mode::per_activation) { help_src_desc = bn_reorder_memory_to_channel_major_format(true, help_src_desc, src, &reordered_src, caches); help_dst_desc = bn_reorder_memory_to_channel_major_format( false, help_dst_desc, dst, &reordered_dst, caches); actual_dst_desc = help_dst_desc; help_scale_bias_desc = bn_reorder_memory_to_channel_major_format( true, help_scale_bias_desc, scale, &reordered_scale, caches); bn_reorder_memory_to_channel_major_format(true, help_scale_bias_desc, bias, &reordered_bias, caches); help_mean_var_desc = bn_reorder_memory_to_channel_major_format( is_infer, help_mean_var_desc, saved_mean, &reordered_saved_mean, caches); actual_mean_var_desc = help_mean_var_desc; bn_reorder_memory_to_channel_major_format(is_infer, help_mean_var_desc, saved_var, &reordered_saved_var, caches); help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc); help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc); } else { if (help_src_desc != help_dst_desc) { help_src_desc = bn_reorder_memory_to_channel_major_format( true, help_src_desc, src, &reordered_src, caches); help_dst_desc = bn_reorder_memory_to_channel_major_format( false, help_dst_desc, dst, &reordered_dst, caches); actual_dst_desc = help_dst_desc; } } help_scale_bias_desc = get_bn_scale_bias_mean_var_desc(help_scale_bias_desc, mode); help_mean_var_desc = get_bn_scale_bias_mean_var_desc(help_mean_var_desc, mode); ::dnnl::prop_kind kind; ::dnnl::normalization_flags flag = ::dnnl::normalization_flags::use_scale | ::dnnl::normalization_flags::use_shift; if (is_infer) { kind = ::dnnl::prop_kind::forward_inference; flag = ::dnnl::normalization_flags::use_global_stats | flag; } else { kind = ::dnnl::prop_kind::forward_training; } auto primitive = create_primitive<::dnnl::batch_normalization_forward>( kind, help_src_desc, help_dst_desc, epsilon, flag); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, {::dnnl::memory(help_src_desc, _eng, reordered_src ? reordered_src : src)}}, {DNNL_ARG_SCALE, {::dnnl::memory(help_scale_bias_desc, _eng, reordered_scale ? reordered_scale : scale)}}, {DNNL_ARG_SHIFT, {::dnnl::memory(help_scale_bias_desc, _eng, reordered_bias ? reordered_bias : bias)}}, {DNNL_ARG_MEAN, {::dnnl::memory(help_mean_var_desc, _eng, reordered_saved_mean ? reordered_saved_mean : saved_mean)}}, {DNNL_ARG_VARIANCE, {::dnnl::memory(help_mean_var_desc, _eng, reordered_saved_var ? reordered_saved_var : saved_var)}}}; sycl::event e = execute_primitive(primitive, execution_args, {{alpha, beta, DNNL_ARG_DST, help_dst_desc, reordered_dst ? reordered_dst : dst}}); if (!is_infer && running_var) { auto src_ndim = src_desc.get_desc().get_ndims(); auto src_dims = src_desc.get_dims(); int element_num = src_dims[0]; if (mode == batch_normalization_mode::spatial) { for (int index = 2; index < src_ndim; index++) { element_num *= src_dims[index]; } } float unbias_factor = element_num / (element_num - 1.f); async_scale(1.f - factor, mean_var_desc, running_var); e = async_sum(factor * unbias_factor, mean_var_desc, reordered_saved_var ? reordered_saved_var : saved_var, 1.f, mean_var_desc, running_var); } if (!is_infer && running_mean) { e = async_sum(factor, mean_var_desc, reordered_saved_mean ? reordered_saved_mean : saved_mean, (1.f - factor), mean_var_desc, running_mean); } if (reordered_dst && (actual_dst_desc != dst_desc.get_desc())) { e = async_reorder(1.f, actual_dst_desc, reordered_dst, 0.f, dst_desc, dst); } if (!is_infer && reordered_saved_mean && reordered_saved_var && saved_mean && saved_var && (actual_mean_var_desc != mean_var_desc.get_desc())) { e = async_reorder(1.f, actual_mean_var_desc, reordered_saved_mean, 0.f, mean_var_desc, saved_mean); e = async_reorder(1.f, actual_mean_var_desc, reordered_saved_var, 0.f, mean_var_desc, saved_var); } _q->submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { for (auto ptr : caches) { sycl::free(ptr, *_q); } }); }); return e; } inline sycl::event engine_ext::rnn_forward_internal( const rnn_desc &desc, ::dnnl::prop_kind kind, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter, const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c, size_t weight_size, void *weight, size_t workspace_size, void *workspace, size_t scratchpad_size, void *scratchpad, bool is_get_execution_args, size_t *weight_size_query, size_t *workspace_size_query, size_t *scratchpad_size_query) { ::dnnl::memory::data_type src_dt; ::dnnl::memory::format_tag src_format_tag; rnn_mode mode; rnn_bias_mode bias_mode; rnn_direction direction; dpct::library_data_t dt; int direction_num = 1, input_size = 0, hidden_size = 0, projection_size = 0, layer_size = 0, gate_num = 1, output_size = 0, data_type_size = 0, seq_length = 1, batch_size = 1; std::vector<void *> data = {src, dst, src_iter, dst_iter, src_iter_c, dst_iter_c, weight, workspace, scratchpad}; std::vector<int> offset(6, 0); void *input_layer_cache = nullptr, *hidden_layer_cache = nullptr; sycl::event e; desc.get(&mode, &bias_mode, &direction, &dt, &input_size, &hidden_size, &projection_size, &layer_size); get_rnn_configuration(src_desc.get_desc(), direction, mode, dt, hidden_size, &src_dt, &src_format_tag, &projection_size, &output_size, &seq_length, &batch_size, &direction_num, &gate_num); if (direction == rnn_direction::bidirectional) { // Here to combine the oneDNN bidirectional_sum and // bidirectional_concat config, so call execute_rnn_forward_primitive // twice. if (layer_size > 1) { if (!is_get_execution_args) { input_layer_cache = allocate(src_desc); hidden_layer_cache = allocate(src_desc); _q->memcpy(input_layer_cache, src, src_desc.get_size()); } data[0] = input_layer_cache; data[1] = hidden_layer_cache; e = execute_rnn_forward_primitive( mode, kind, ::dnnl::rnn_direction::bidirectional_sum, bias_mode, src_dt, src_format_tag, seq_length, batch_size, output_size, output_size, 1, direction_num, hidden_size, gate_num, projection_size, data, offset, layer_size - 1, weight_size_query, workspace_size_query, scratchpad_size_query); data[0] = ((layer_size - 1) % 2 == 0) ? input_layer_cache : hidden_layer_cache; data[1] = dst; } e = execute_rnn_forward_primitive( mode, kind, ::dnnl::rnn_direction::bidirectional_concat, bias_mode, src_dt, src_format_tag, seq_length, batch_size, output_size, 2 * output_size, 1, direction_num, hidden_size, gate_num, projection_size, data, offset, 1, weight_size_query, workspace_size_query, scratchpad_size_query); } else { e = execute_rnn_forward_primitive( mode, kind, ::dnnl::rnn_direction::unidirectional_left2right, bias_mode, src_dt, src_format_tag, seq_length, batch_size, output_size, output_size, layer_size, direction_num, hidden_size, gate_num, projection_size, data, offset, 1, weight_size_query, workspace_size_query, scratchpad_size_query); } if (is_get_execution_args) { return e; } if (input_layer_cache && hidden_layer_cache) { _q->submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { sycl::free(input_layer_cache, *_q); sycl::free(hidden_layer_cache, *_q); }); }); } return e; } inline sycl::event engine_ext::execute_rnn_forward_primitive( rnn_mode mode, ::dnnl::prop_kind kind, ::dnnl::rnn_direction direction, rnn_bias_mode bias_mode, ::dnnl::memory::data_type dt, ::dnnl::memory::format_tag tag, int seq_length, int batch_size, int src_c, int dst_c, int layer_size, int direction_num, int hidden_size, int gate_num, int projection_size, std::vector<void *> &data, std::vector<int> &offset, int iter_num, size_t *weight_size, size_t *workspace_size, size_t *scratchpad_size) { sycl::event e; ::dnnl::primitive *p = nullptr; detail::primitive_cache_key_type key; std::unordered_map<int, ::dnnl::memory> *execution_args; ::dnnl::memory::desc bias_desc( {layer_size, direction_num, gate_num, hidden_size}, dt, ::dnnl::memory::format_tag::ldgo); ::dnnl::memory::desc weight_layer_desc( {layer_size, direction_num, projection_size ? projection_size : hidden_size, gate_num, hidden_size}, dt, ::dnnl::memory::format_tag::ldigo); ::dnnl::memory::desc weight_iter_desc( {layer_size, direction_num, projection_size ? projection_size : hidden_size, gate_num, hidden_size}, dt, ::dnnl::memory::format_tag::ldigo); ::dnnl::memory::desc projection_desc; if (projection_size) { projection_desc = ::dnnl::memory::desc( {layer_size, direction_num, hidden_size, projection_size}, dt, ::dnnl::memory::format_tag::ldio); } if (weight_size) { *weight_size += (weight_layer_desc.get_size() + weight_iter_desc.get_size() + projection_desc.get_size() + bias_desc.get_size()) * iter_num; return e; } ::dnnl::memory::desc src_desc({seq_length, batch_size, src_c}, dt, tag); ::dnnl::memory::desc dst_desc({seq_length, batch_size, dst_c}, dt, tag); ::dnnl::memory::desc iter_desc( {layer_size, direction_num, batch_size, projection_size ? projection_size : hidden_size}, dt, ::dnnl::memory::format_tag::ldnc); ::dnnl::memory::desc iter_c_desc( {layer_size, direction_num, batch_size, hidden_size}, dt, ::dnnl::memory::format_tag::ldnc); ::dnnl::memory::desc workspace_desc; ::dnnl::memory::desc scratchpad_desc; ::dnnl::primitive_attr attr; attr.set_scratchpad_mode(::dnnl::scratchpad_mode::user); if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) { auto pd = create_primitive_desc<::dnnl::vanilla_rnn_forward>( kind, mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu : ::dnnl::algorithm::eltwise_tanh, direction, src_desc, iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc, iter_desc, attr); workspace_desc = pd.workspace_desc(); scratchpad_desc = pd.scratchpad_desc(); if (workspace_size && scratchpad_size) { *workspace_size += workspace_desc.get_size() * iter_num; *scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size ? scratchpad_desc.get_size() : *scratchpad_size; } else { auto r = create_primitive_with_pd<::dnnl::vanilla_rnn_forward>(pd); key = r.first; p = r.second; } } else if (mode == rnn_mode::gru) { auto pd = create_primitive_desc<::dnnl::gru_forward>( kind, direction, src_desc, iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc, iter_desc, attr); workspace_desc = pd.workspace_desc(); scratchpad_desc = pd.scratchpad_desc(); if (workspace_size && scratchpad_size) { *workspace_size += workspace_desc.get_size() * iter_num; *scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size ? scratchpad_desc.get_size() : *scratchpad_size; } else { auto r = create_primitive_with_pd<::dnnl::gru_forward>(pd); key = r.first; p = r.second; } } else if (mode == rnn_mode::lstm) { auto pd = create_primitive_desc<::dnnl::lstm_forward>( kind, direction, src_desc, iter_desc, iter_c_desc, weight_layer_desc, weight_iter_desc, ::dnnl::memory::desc(), projection_desc, bias_desc, dst_desc, iter_desc, iter_c_desc, attr); workspace_desc = pd.workspace_desc(); scratchpad_desc = pd.scratchpad_desc(); if (workspace_size && scratchpad_size) { *workspace_size += workspace_desc.get_size() * iter_num; *scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size ? scratchpad_desc.get_size() : *scratchpad_size; } else { auto r = create_primitive_with_pd<::dnnl::lstm_forward>(pd); key = r.first; p = r.second; } } for (int i = 0; i < iter_num; i++) { void *in_cache = data[0], *out_cache = data[1], *dst_iter_c_cache = nullptr, *dst_iter_cache = ((uint8_t *)(data[3]) + offset[1]); if (mode == rnn_mode::lstm) { dst_iter_c_cache = (uint8_t *)(data[4]) + offset[2]; } if (!workspace_size) { execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC_LAYER, {::dnnl::memory(src_desc, _eng, data[0])}}, {DNNL_ARG_DST_LAYER, {::dnnl::memory(dst_desc, _eng, data[1])}}, {DNNL_ARG_SCRATCHPAD, {::dnnl::memory(scratchpad_desc, _eng, data[8])}}}; auto insert_args = [&](int arg_name, ::dnnl::memory::desc &d, void *data, int &offset) { execution_args->insert( {arg_name, {::dnnl::memory(d, _eng, (uint8_t *)data + offset)}}); offset += d.get_size(); }; insert_args(DNNL_ARG_SRC_ITER, iter_desc, data[2], offset[0]); insert_args(DNNL_ARG_DST_ITER, iter_desc, data[3], offset[1]); if (mode == rnn_mode::lstm) { insert_args(DNNL_ARG_SRC_ITER_C, iter_c_desc, data[4], offset[2]); insert_args(DNNL_ARG_DST_ITER_C, iter_c_desc, data[5], offset[3]); } insert_args(DNNL_ARG_WEIGHTS_LAYER, weight_layer_desc, data[6], offset[4]); insert_args(DNNL_ARG_WEIGHTS_ITER, weight_iter_desc, data[6], offset[4]); if (projection_size) { insert_args(DNNL_ARG_WEIGHTS_PROJECTION, projection_desc, data[6], offset[4]); } if (bias_mode == rnn_bias_mode::none) { _q->memset((uint8_t *)(data[6]) + offset[4], 0, bias_desc.get_size()); } insert_args(DNNL_ARG_BIAS, bias_desc, data[6], offset[4]); if (kind == ::dnnl::prop_kind::forward_training) { insert_args(DNNL_ARG_WORKSPACE, workspace_desc, data[7], offset[5]); } if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) { execute_primitive<::dnnl::vanilla_rnn_forward>( {key, static_cast<::dnnl::vanilla_rnn_forward *>(p)}, execution_args); } else if (mode == rnn_mode::gru) { execute_primitive<::dnnl::gru_forward>( {key, static_cast<::dnnl::gru_forward *>(p)}, execution_args); } else if (mode == rnn_mode::lstm) { execute_primitive<::dnnl::lstm_forward>( {key, static_cast<::dnnl::lstm_forward *>(p)}, execution_args); } if (i != iter_num - 1) { std::swap(data[0], data[1]); } } if (kind == ::dnnl::prop_kind::forward_training) { if (workspace_size) { *workspace_size += (src_desc.get_size() + dst_desc.get_size() + iter_desc.get_size()); if (mode == rnn_mode::lstm) { *workspace_size += iter_c_desc.get_size(); } } else { _q->memcpy((uint8_t *)(data[7]) + offset[5], in_cache, src_desc.get_size()); offset[5] += src_desc.get_size(); _q->memcpy((uint8_t *)(data[7]) + offset[5], out_cache, dst_desc.get_size()); offset[5] += dst_desc.get_size(); _q->memcpy((uint8_t *)(data[7]) + offset[5], dst_iter_cache, iter_desc.get_size()); offset[5] += iter_desc.get_size(); if (mode == rnn_mode::lstm) { _q->memcpy((uint8_t *)(data[7]) + offset[5], dst_iter_c_cache, iter_c_desc.get_size()); offset[5] += iter_c_desc.get_size(); } } } } return e; } inline sycl::event engine_ext::execute_rnn_backward_primitive( rnn_mode mode, ::dnnl::rnn_direction direction, rnn_bias_mode bias_mode, ::dnnl::memory::data_type dt, ::dnnl::memory::format_tag tag, int seq_length, int batch_size, int src_c, int dst_c, int layer_size, int direction_num, int hidden_size, int gate_num, int projection_size, std::vector<void *> &data, std::vector<int> &offset, int iter_num) { sycl::event e; ::dnnl::primitive *p = nullptr; detail::primitive_cache_key_type key; ::dnnl::prop_kind fkind = ::dnnl::prop_kind::forward_training; ::dnnl::prop_kind bkind = ::dnnl::prop_kind::backward; ::dnnl::memory::desc bias_desc( {layer_size, direction_num, gate_num, hidden_size}, dt, ::dnnl::memory::format_tag::ldgo); ::dnnl::memory::desc weight_layer_desc( {layer_size, direction_num, projection_size ? projection_size : hidden_size, gate_num, hidden_size}, dt, ::dnnl::memory::format_tag::ldigo); ::dnnl::memory::desc weight_iter_desc( {layer_size, direction_num, projection_size ? projection_size : hidden_size, gate_num, hidden_size}, dt, ::dnnl::memory::format_tag::ldigo); ::dnnl::memory::desc diff_weight_layer_desc( {layer_size, direction_num, projection_size ? projection_size : hidden_size, gate_num, hidden_size}, dt, ::dnnl::memory::format_tag::ldgoi); ::dnnl::memory::desc diff_weight_iter_desc( {layer_size, direction_num, projection_size ? projection_size : hidden_size, gate_num, hidden_size}, dt, ::dnnl::memory::format_tag::ldgoi); ::dnnl::memory::desc projection_desc, diff_projection_desc; if (projection_size) { projection_desc = ::dnnl::memory::desc( {layer_size, direction_num, hidden_size, projection_size}, dt, ::dnnl::memory::format_tag::ldio); diff_projection_desc = ::dnnl::memory::desc( {layer_size, direction_num, hidden_size, projection_size}, dt, ::dnnl::memory::format_tag::ldoi); } ::dnnl::memory::desc src_desc({seq_length, batch_size, src_c}, dt, tag); ::dnnl::memory::desc dst_desc({seq_length, batch_size, dst_c}, dt, tag); ::dnnl::memory::desc iter_desc( {layer_size, direction_num, batch_size, projection_size ? projection_size : hidden_size}, dt, ::dnnl::memory::format_tag::ldnc); ::dnnl::memory::desc iter_c_desc( {layer_size, direction_num, batch_size, hidden_size}, dt, ::dnnl::memory::format_tag::ldnc); ::dnnl::memory::desc workspace_desc; ::dnnl::memory::desc scratchpad_desc; ::dnnl::primitive_attr attr; attr.set_scratchpad_mode(::dnnl::scratchpad_mode::user); if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) { auto fpd = create_primitive_desc<::dnnl::vanilla_rnn_forward>( fkind, mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu : ::dnnl::algorithm::eltwise_tanh, direction, src_desc, iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc, iter_desc, attr); auto pd = create_primitive_desc<::dnnl::vanilla_rnn_backward>( bkind, mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu : ::dnnl::algorithm::eltwise_tanh, direction, src_desc, iter_desc, diff_weight_layer_desc, diff_weight_iter_desc, bias_desc, dst_desc, iter_desc, src_desc, iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc, iter_desc, fpd, attr); workspace_desc = pd.workspace_desc(); scratchpad_desc = pd.scratchpad_desc(); auto r = create_primitive_with_pd<::dnnl::vanilla_rnn_backward>(pd); key = r.first; p = r.second; } else if (mode == rnn_mode::gru) { auto fpd = create_primitive_desc<::dnnl::gru_forward>( fkind, direction, src_desc, iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc, iter_desc, attr); auto pd = create_primitive_desc<::dnnl::gru_backward>( bkind, direction, src_desc, iter_desc, diff_weight_layer_desc, diff_weight_iter_desc, bias_desc, dst_desc, iter_desc, src_desc, iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc, iter_desc, fpd, attr); workspace_desc = pd.workspace_desc(); scratchpad_desc = pd.scratchpad_desc(); auto r = create_primitive_with_pd<::dnnl::gru_backward>(pd); key = r.first; p = r.second; } else if (mode == rnn_mode::lstm) { auto fpd = create_primitive_desc<::dnnl::lstm_forward>( fkind, direction, src_desc, iter_desc, iter_c_desc, weight_layer_desc, weight_iter_desc, ::dnnl::memory::desc(), projection_desc, bias_desc, dst_desc, iter_desc, iter_c_desc, attr); auto pd = create_primitive_desc<::dnnl::lstm_backward>( bkind, direction, src_desc, iter_desc, iter_c_desc, diff_weight_layer_desc, diff_weight_iter_desc, ::dnnl::memory::desc(), diff_projection_desc, bias_desc, dst_desc, iter_desc, iter_c_desc, src_desc, iter_desc, iter_c_desc, weight_layer_desc, weight_iter_desc, ::dnnl::memory::desc(), projection_desc, bias_desc, dst_desc, iter_desc, iter_c_desc, fpd, attr); workspace_desc = pd.workspace_desc(); scratchpad_desc = pd.scratchpad_desc(); auto r = create_primitive_with_pd<::dnnl::lstm_backward>(pd); key = r.first; p = r.second; } for (int i = 0; i < iter_num; i++) { auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_DIFF_SRC_LAYER, {::dnnl::memory(src_desc, _eng, data[8])}}, {DNNL_ARG_DIFF_DST_LAYER, {::dnnl::memory(dst_desc, _eng, data[9])}}, {DNNL_ARG_SCRATCHPAD, {::dnnl::memory(scratchpad_desc, _eng, data[15])}}}; auto insert_args = [&](int arg_name, ::dnnl::memory::desc &d, void *data, int &offset) { offset += d.get_size(); execution_args->insert( {arg_name, {::dnnl::memory(d, _eng, (uint8_t *)data - offset)}}); }; if (mode == rnn_mode::lstm) { insert_args(DNNL_ARG_DST_ITER_C, iter_c_desc, data[7], offset[0]); insert_args(DNNL_ARG_SRC_ITER_C, iter_c_desc, data[4], offset[2]); } insert_args(DNNL_ARG_DST_ITER, iter_desc, data[7], offset[0]); insert_args(DNNL_ARG_DST_LAYER, dst_desc, data[7], offset[0]); insert_args(DNNL_ARG_SRC_LAYER, src_desc, data[7], offset[0]); insert_args(DNNL_ARG_WORKSPACE, workspace_desc, data[7], offset[0]); insert_args(DNNL_ARG_SRC_ITER, iter_desc, data[2], offset[1]); insert_args(DNNL_ARG_BIAS, bias_desc, data[6], offset[3]); if (projection_size) { insert_args(DNNL_ARG_WEIGHTS_PROJECTION, diff_projection_desc, data[6], offset[3]); } insert_args(DNNL_ARG_WEIGHTS_ITER, diff_weight_iter_desc, data[6], offset[3]); insert_args(DNNL_ARG_WEIGHTS_LAYER, diff_weight_layer_desc, data[6], offset[3]); insert_args(DNNL_ARG_DIFF_SRC_ITER, iter_desc, data[10], offset[4]); insert_args(DNNL_ARG_DIFF_DST_ITER, iter_desc, data[11], offset[5]); if (mode == rnn_mode::lstm) { insert_args(DNNL_ARG_DIFF_SRC_ITER_C, iter_c_desc, data[12], offset[6]); insert_args(DNNL_ARG_DIFF_DST_ITER_C, iter_c_desc, data[13], offset[7]); } insert_args(DNNL_ARG_DIFF_BIAS, bias_desc, data[14], offset[8]); if (bias_mode == rnn_bias_mode::none) { _q->memset((uint8_t *)(data[14]) - offset[8], 0, bias_desc.get_size()); } if (projection_size) { insert_args(DNNL_ARG_DIFF_WEIGHTS_PROJECTION, projection_desc, data[14], offset[8]); } insert_args(DNNL_ARG_DIFF_WEIGHTS_ITER, weight_iter_desc, data[14], offset[8]); insert_args(DNNL_ARG_DIFF_WEIGHTS_LAYER, weight_layer_desc, data[14], offset[8]); if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) { e = execute_primitive<::dnnl::vanilla_rnn_backward>( {key, static_cast<::dnnl::vanilla_rnn_backward *>(p)}, execution_args); } else if (mode == rnn_mode::gru) { e = execute_primitive<::dnnl::gru_backward>( {key, static_cast<::dnnl::gru_backward *>(p)}, execution_args); } else if (mode == rnn_mode::lstm) { e = execute_primitive<::dnnl::lstm_backward>( {key, static_cast<::dnnl::lstm_backward *>(p)}, execution_args); } if (i != iter_num - 1) { std::swap(data[8], data[9]); } } return e; } #define GENERATE_RNN_PRIMITIVE_KEY(name) \ template <> \ inline std::string \ engine_ext::generate_cache_key<::dnnl::name::primitive_desc>( \ const ::dnnl::name::primitive_desc &pd) { \ std::stringstream ss; \ ss << (std::uint8_t)pd.get_kind() << (std::uint8_t)pd.get_prop_kind() \ << (std::uint8_t)pd.get_cell_kind() << (std::uint8_t)pd.get_direction() \ << (std::uint8_t)pd.get_algorithm(); \ serialize_mem_desc(ss, pd.src_layer_desc()); \ serialize_mem_desc(ss, pd.src_iter_desc()); \ serialize_mem_desc(ss, pd.dst_layer_desc()); \ serialize_mem_desc(ss, pd.dst_iter_desc()); \ serialize_mem_desc(ss, pd.diff_src_layer_desc()); \ serialize_mem_desc(ss, pd.diff_src_iter_desc()); \ serialize_mem_desc(ss, pd.diff_dst_layer_desc()); \ serialize_mem_desc(ss, pd.diff_dst_iter_desc()); \ serialize_mem_desc(ss, pd.src_iter_c_desc()); \ serialize_mem_desc(ss, pd.dst_iter_c_desc()); \ serialize_mem_desc(ss, pd.diff_src_iter_c_desc()); \ serialize_mem_desc(ss, pd.diff_dst_iter_c_desc()); \ return ss.str(); \ } #define GENERATE_CONVOLUTION_PRIMITIVE_KEY(name, query_type) \ template <> \ inline std::string \ engine_ext::generate_cache_key<::dnnl::name::primitive_desc>( \ const ::dnnl::name::primitive_desc &pd) { \ std::stringstream ss; \ ss << (std::uint8_t)pd.get_kind() << (std::uint8_t)pd.get_prop_kind() \ << (std::uint8_t)pd.get_algorithm() \ << (std::uint8_t)pd.get_primitive_attr().get_fpmath_mode() \ << (std::uint8_t)pd.get_group_size(); \ serialize_dims(ss, pd.get_strides()); \ serialize_dims(ss, pd.get_dilations()); \ serialize_dims(ss, pd.get_padding_l()); \ serialize_mem_desc(ss, pd.src_desc()); \ serialize_mem_desc(ss, pd.diff_src_desc()); \ serialize_mem_desc(ss, pd.dst_desc()); \ serialize_mem_desc(ss, pd.diff_dst_desc()); \ serialize_mem_desc(ss, pd.query_type()); \ serialize_mem_desc(ss, pd.weights_desc()); \ serialize_mem_desc(ss, pd.diff_weights_desc()); \ return ss.str(); \ } GENERATE_RNN_PRIMITIVE_KEY(vanilla_rnn_forward) GENERATE_RNN_PRIMITIVE_KEY(vanilla_rnn_backward) GENERATE_RNN_PRIMITIVE_KEY(lstm_forward) GENERATE_RNN_PRIMITIVE_KEY(lstm_backward) GENERATE_RNN_PRIMITIVE_KEY(gru_forward) GENERATE_RNN_PRIMITIVE_KEY(gru_backward) GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_forward, bias_desc) GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_backward_data, scratchpad_desc) GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_backward_weights, diff_bias_desc) template <typename primitive_desc_type> std::string engine_ext::generate_cache_key(const primitive_desc_type &pd) { std::stringstream ss; auto kind = pd.get_kind(); ss << (std::uint8_t)kind << (std::uint8_t)pd.get_prop_kind() << (std::uint8_t)pd.get_algorithm(); serialize_mem_desc(ss, pd.src_desc()); serialize_mem_desc(ss, pd.diff_src_desc()); serialize_mem_desc(ss, pd.dst_desc()); serialize_mem_desc(ss, pd.diff_dst_desc()); switch (kind) { case ::dnnl::primitive::kind::batch_normalization: ss << pd.get_epsilon() << (std::uint8_t)pd.get_flags(); case ::dnnl::primitive::kind::reduction: ss << pd.get_p(); break; case ::dnnl::primitive::kind::eltwise: ss << pd.get_alpha() << pd.get_beta(); case ::dnnl::primitive::kind::lrn: ss << pd.get_k(); break; case ::dnnl::primitive::kind::pooling: serialize_dims(ss, pd.get_strides()); serialize_dims(ss, pd.get_dilations()); serialize_dims(ss, pd.get_padding_l()); serialize_dims(ss, pd.get_kernel()); break; case ::dnnl::primitive::kind::softmax: ss << pd.get_axis(); break; default: break; } return ss.str(); } template <typename primitive_type, typename... args_type> std::pair<detail::primitive_cache_key_type, primitive_type *> engine_ext::create_primitive(args_type &&...args) { auto pd = create_primitive_desc<primitive_type>(std::forward<args_type>(args)...); return create_primitive_with_pd<primitive_type>(pd); } template <typename primitive_type> std::pair<detail::primitive_cache_key_type, primitive_type *> engine_ext::create_primitive_with_pd( const typename primitive_type::primitive_desc &pd) { detail::primitive_cache_key_type key = generate_cache_key(pd); primitive_type *p = (primitive_type *)_primitive_cache.get(key); if (!p) { p = new primitive_type(pd); } return {key, p}; } template <typename primitive_type, typename... args_type> typename primitive_type::primitive_desc engine_ext::create_primitive_desc(args_type &&...args) { return typename primitive_type::primitive_desc( _eng, std::forward<args_type>(args)...); } inline void engine_ext::fill(const memory_desc_ext &src_desc, void *src, const void *valuePtr) { async_fill(src_desc, src, valuePtr).wait(); } inline void engine_ext::reorder(float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst) { async_reorder(alpha, src_desc, src, beta, dst_desc, dst).wait(); } inline void engine_ext::scale(float alpha, const memory_desc_ext &src_desc, void *src) { async_scale(alpha, src_desc, src).wait(); } inline void engine_ext::sum(float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst) { async_sum(alpha, src_desc, src, beta, dst_desc, dst).wait(); } inline void engine_ext::activation_forward(activation_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst) { async_activation_forward(desc, alpha, src_desc, src, beta, dst_desc, dst) .wait(); } inline void engine_ext::activation_backward( activation_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src) { async_activation_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst, src_desc, src, beta, diff_src_desc, diff_src) .wait(); } inline void engine_ext::pooling_forward(pooling_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, ::dnnl::memory *workspace) { async_pooling_forward(desc, alpha, src_desc, src, beta, dst_desc, dst, workspace).wait(); } inline void engine_ext::pooling_backward( pooling_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src, ::dnnl::memory *workspace) { async_pooling_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst, src_desc, src, beta, diff_src_desc, diff_src, workspace) .wait(); } inline void engine_ext::softmax_forward(softmax_algorithm alg, softmax_mode mode, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst) { async_softmax_forward(alg, mode, alpha, src_desc, src, beta, dst_desc, dst) .wait(); } inline void engine_ext::softmax_backward(softmax_algorithm alg, softmax_mode mode, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta, const memory_desc_ext &diff_src_desc, void *diff_src) { async_softmax_backward(alg, mode, alpha, dst_desc, dst, diff_dst_desc, diff_dst, beta, diff_src_desc, diff_src) .wait(); } inline void engine_ext::lrn_forward(lrn_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, ::dnnl::memory *workspace) { async_lrn_forward(desc, alpha, src_desc, src, beta, dst_desc, dst, workspace) .wait(); } inline void engine_ext::lrn_backward(lrn_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src, ::dnnl::memory *workspace) { async_lrn_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst, src_desc, src, beta, diff_src_desc, diff_src, workspace) .wait(); } inline sycl::event engine_ext::async_fill(const memory_desc_ext &src_desc, void *src, const void *valuePtr) { ::dnnl::memory::data_type dt = src_desc.get_desc().get_data_type(); unsigned mem_size = src_desc.get_size(); switch (dt) { case ::dnnl::memory::data_type::f32: return fill_with_type<float>(_q, src, valuePtr, mem_size); case ::dnnl::memory::data_type::f16: return fill_with_type<sycl::half>(_q, src, valuePtr, mem_size); case ::dnnl::memory::data_type::s32: return fill_with_type<int32_t>(_q, src, valuePtr, mem_size); case ::dnnl::memory::data_type::s8: return fill_with_type<int8_t>(_q, src, valuePtr, mem_size); case ::dnnl::memory::data_type::u8: return fill_with_type<uint8_t>(_q, src, valuePtr, mem_size); default: throw std::runtime_error("async_fill: unsupported data type."); } } inline sycl::event engine_ext::async_reorder(float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst) { if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) { return sycl::event(); } auto pd = ::dnnl::reorder::primitive_desc(_eng, src_desc.get_desc(), _eng, dst_desc.get_desc()); auto primitive = create_primitive_with_pd<::dnnl::reorder>(pd); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}}; return execute_primitive(primitive, execution_args, {{alpha, beta, DNNL_ARG_DST, dst_desc, dst}}); } inline sycl::event engine_ext::async_scale(float alpha, const memory_desc_ext &src_desc, void *src) { if (alpha == 1.f) { return sycl::event(); } void *src_cache = allocate(src_desc); _q->memcpy(src_cache, src, src_desc.get_size()); auto primitive = create_primitive<::dnnl::eltwise_forward>( ::dnnl::prop_kind::forward_inference, ::dnnl::algorithm::eltwise_linear, src_desc.get_desc(), src_desc.get_desc(), alpha, 0.f); auto args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_DST, ::dnnl::memory(src_desc.get_desc(), _eng, src)}, {DNNL_ARG_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src_cache)}}; return execute_primitive(primitive, args, {}, {src_cache}); } inline sycl::event engine_ext::async_sum(float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst) { if (alpha == 0.f && beta == 1.f) { return sycl::event(); } void *dst_cache = allocate(dst_desc); _q->memcpy(dst_cache, dst, dst_desc.get_size()); auto pd = create_primitive_desc<::dnnl::sum>( std::vector<float>{alpha, beta}, std::vector<::dnnl::memory::desc>{src_desc.get_desc(), dst_desc.get_desc()}); std::stringstream ss; ss << (std::uint8_t)pd.get_kind() << alpha << beta; serialize_mem_desc(ss, pd.src_desc(0)); serialize_mem_desc(ss, pd.src_desc(1)); detail::primitive_cache_key_type key = ss.str(); ::dnnl::sum *p = (::dnnl::sum *)_primitive_cache.get(key); if (!p) { p = new ::dnnl::sum(pd); } auto args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_DST, ::dnnl::memory(dst_desc.get_desc(), _eng, dst)}, {DNNL_ARG_MULTIPLE_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src)}, {DNNL_ARG_MULTIPLE_SRC + 1, ::dnnl::memory(dst_desc.get_desc(), _eng, dst_cache)}}; return execute_primitive<::dnnl::sum>({key, p}, args, {}, {dst_cache}); } inline sycl::event engine_ext::async_binary(binary_op op, float alpha_0, const memory_desc_ext &src_desc_0, void *src_0, float alpha_1, const memory_desc_ext &src_desc_1, void *src_1, float beta, const memory_desc_ext &dst_desc, void *dst) { ::dnnl::algorithm onednn_algorithm; switch (op) { case binary_op::max: onednn_algorithm = ::dnnl::algorithm::binary_max; break; case binary_op::min: onednn_algorithm = ::dnnl::algorithm::binary_min; break; case binary_op::add: onednn_algorithm = ::dnnl::algorithm::binary_add; break; case binary_op::sub: onednn_algorithm = ::dnnl::algorithm::binary_sub; break; case binary_op::mul: onednn_algorithm = ::dnnl::algorithm::binary_mul; break; case binary_op::div: onednn_algorithm = ::dnnl::algorithm::binary_div; break; case binary_op::sqrt: onednn_algorithm = ::dnnl::algorithm::eltwise_sqrt; break; case binary_op::neg: onednn_algorithm = ::dnnl::algorithm::eltwise_linear; break; } if (onednn_algorithm == ::dnnl::algorithm::eltwise_sqrt || onednn_algorithm == ::dnnl::algorithm::eltwise_linear) { void *src_cache = nullptr, *dst_cache = nullptr; src_cache = allocate(src_desc_0); dst_cache = allocate(dst_desc); _q->memcpy(src_cache, src_0, src_desc_0.get_size()); _q->memcpy(dst_cache, dst, dst_desc.get_size()); async_scale(alpha_0, src_desc_0, src_cache); async_scale(beta, dst_desc, dst_cache); // Let the output = 1 - input to simulate the behavior of neg. auto primitive = create_primitive<::dnnl::eltwise_forward>( ::dnnl::prop_kind::forward_inference, onednn_algorithm, src_desc_0.get_desc(), dst_desc.get_desc(), -1.f, 1.f); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, {::dnnl::memory(src_desc_0.get_desc(), _eng, src_cache)}}}; execute_primitive( primitive, execution_args, {{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}}); auto e = async_sum(1.f, dst_desc, dst_cache, 1.f, dst_desc, dst); _q->submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { sycl::free(src_cache, *_q); sycl::free(dst_cache, *_q); }); }); return e; } auto execution_args = new std::unordered_map<int, ::dnnl::memory>{}; void *src_0_cache = nullptr, *src_1_cache = nullptr, *dst_cache = nullptr; src_0_cache = allocate(src_desc_0); src_1_cache = allocate(src_desc_1); dst_cache = allocate(dst_desc); _q->memcpy(src_0_cache, src_0, src_desc_0.get_size()); _q->memcpy(src_1_cache, src_1, src_desc_1.get_size()); _q->memcpy(dst_cache, dst, dst_desc.get_size()); async_scale(alpha_0, src_desc_0, src_0_cache); async_scale(alpha_1, src_desc_1, src_1_cache); async_scale(beta, dst_desc, dst_cache); execution_args->insert({DNNL_ARG_SRC_0, ::dnnl::memory(src_desc_0.get_desc(), _eng, src_0_cache)}); execution_args->insert({DNNL_ARG_SRC_1, ::dnnl::memory(src_desc_1.get_desc(), _eng, src_1_cache)}); auto primitive = create_primitive<::dnnl::binary>( onednn_algorithm, src_desc_0.get_desc(), src_desc_1.get_desc(), dst_desc.get_desc()); auto e = execute_primitive(primitive, execution_args, {{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}}); async_sum(1.f, dst_desc, dst_cache, 1.f, dst_desc, dst); _q->submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { sycl::free(dst_cache, *_q); sycl::free(src_0_cache, *_q); sycl::free(src_1_cache, *_q); }); }); return e; } inline sycl::event engine_ext::async_reduction(reduction_op op, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst) { if (alpha == 0.f && beta == 1.f) { return sycl::event(); } float p = 2.f; ::dnnl::algorithm onednn_algorithm; void *cache = nullptr; switch (op) { case reduction_op::amax: cache = allocate(src_desc); activation_desc adesc; adesc.set_algorithm(::dnnl::algorithm::eltwise_abs); async_activation_forward(adesc, 1.f, src_desc, src, 0.f, src_desc, cache); onednn_algorithm = ::dnnl::algorithm::reduction_max; src = cache; break; case reduction_op::max: onednn_algorithm = ::dnnl::algorithm::reduction_max; break; case reduction_op::min: onednn_algorithm = ::dnnl::algorithm::reduction_min; break; case reduction_op::sum: onednn_algorithm = ::dnnl::algorithm::reduction_sum; break; case reduction_op::mean: onednn_algorithm = ::dnnl::algorithm::reduction_mean; break; case reduction_op::mul: onednn_algorithm = ::dnnl::algorithm::reduction_mul; break; case reduction_op::mul_no_zeros: cache = allocate(src_desc); transform_no_zero(src_desc, src, cache); onednn_algorithm = ::dnnl::algorithm::reduction_mul; src = cache; break; case reduction_op::norm1: p = 1.f; onednn_algorithm = ::dnnl::algorithm::reduction_norm_lp_power_p_sum; break; case reduction_op::norm2: onednn_algorithm = ::dnnl::algorithm::reduction_norm_lp_sum; break; } auto primitive = create_primitive<::dnnl::reduction>( onednn_algorithm, src_desc.get_desc(), dst_desc.get_desc(), p, 0.f); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src)}}; if (cache) { return execute_primitive(primitive, execution_args, {{alpha, beta, DNNL_ARG_DST, dst_desc, dst}}, {cache}); } return execute_primitive(primitive, execution_args, {{alpha, beta, DNNL_ARG_DST, dst_desc, dst}}); } inline sycl::event engine_ext::async_activation_forward(activation_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst) { if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) { return sycl::event(); } auto primitive = create_primitive<::dnnl::eltwise_forward>( ::dnnl::prop_kind::forward, desc.get_algorithm(), src_desc.get_desc(), dst_desc.get_desc(), desc.get_alpha(), desc.get_beta()); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}}; return execute_primitive(primitive, execution_args, {{alpha, beta, DNNL_ARG_DST, dst_desc, dst}}); } inline sycl::event engine_ext::async_activation_backward( activation_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src) { if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) { return sycl::event(); } ::dnnl::memory::desc data_desc = dst_desc.get_desc(); auto alg = desc.get_algorithm(); if ((alg == ::dnnl::algorithm::eltwise_clip) || (alg == ::dnnl::algorithm::eltwise_linear) || (alg == ::dnnl::algorithm::eltwise_swish)) { data_desc = src_desc.get_desc(); } auto primitive = create_primitive<::dnnl::eltwise_backward>( alg, diff_src_desc.get_desc(), diff_dst_desc.get_desc(), data_desc, desc.get_alpha(), desc.get_beta(), create_primitive_desc<::dnnl::eltwise_forward>( ::dnnl::prop_kind::forward, alg, src_desc.get_desc(), dst_desc.get_desc(), desc.get_alpha(), desc.get_beta())); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}}, {DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}, {DNNL_ARG_DIFF_DST, {::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}}; return execute_primitive( primitive, execution_args, {{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}}); } inline sycl::event engine_ext::async_pooling_forward(pooling_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, ::dnnl::memory *workspace) { if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) { return sycl::event(); } int pooling_dim = desc.get_stride().size(); std::vector<int64_t> dilation(pooling_dim, 0); auto primitive_desc = create_primitive_desc<::dnnl::pooling_forward>( ::dnnl::prop_kind::forward_training, desc.get_algorithm(), src_desc.get_desc(), dst_desc.get_desc(), desc.get_stride(), desc.get_kernel(), dilation, desc.get_padding(), desc.get_padding()); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}}; ::dnnl::memory ws_mem(primitive_desc.workspace_desc(), _eng); execution_args->insert({DNNL_ARG_WORKSPACE, ws_mem}); if (workspace) { *workspace = ws_mem; } else { insert_workspace(src, ws_mem); } auto primitive = create_primitive_with_pd<::dnnl::pooling_forward>(primitive_desc); return execute_primitive(primitive, execution_args, {{alpha, beta, DNNL_ARG_DST, dst_desc, dst}}); } inline sycl::event engine_ext::async_pooling_backward( pooling_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src, ::dnnl::memory *workspace) { if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) { return sycl::event(); } int pooling_dim = desc.get_stride().size(); std::vector<int64_t> dilation(pooling_dim, 0); auto primitive = create_primitive<::dnnl::pooling_backward>( desc.get_algorithm(), diff_src_desc.get_desc(), diff_dst_desc.get_desc(), desc.get_stride(), desc.get_kernel(), dilation, desc.get_padding(), desc.get_padding(), create_primitive_desc<::dnnl::pooling_forward>( ::dnnl::prop_kind::forward_training, desc.get_algorithm(), src_desc.get_desc(), dst_desc.get_desc(), desc.get_stride(), desc.get_kernel(), dilation, desc.get_padding(), desc.get_padding())); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}}, {DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}, {DNNL_ARG_DIFF_DST, {::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}}; if (workspace) { execution_args->insert({DNNL_ARG_WORKSPACE, *workspace}); } else { execution_args->insert({DNNL_ARG_WORKSPACE, get_workspace(src)}); } return execute_primitive( primitive, execution_args, {{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}}); } inline sycl::event engine_ext::async_softmax_forward(softmax_algorithm alg, softmax_mode mode, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst) { if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) { return sycl::event(); } ::dnnl::memory::desc help_src_desc = src_desc.get_desc(); ::dnnl::memory::desc help_dst_desc = dst_desc.get_desc(); if (mode == softmax_mode::instance) { help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc); help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc); } auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, {::dnnl::memory(help_src_desc, _eng, src)}}}; ::dnnl::algorithm softmax_alg = ::dnnl::algorithm::softmax_accurate; if (alg == softmax_algorithm::log) { softmax_alg = ::dnnl::algorithm::softmax_log; } auto primitive = create_primitive<::dnnl::softmax_forward>( ::dnnl::prop_kind::forward, softmax_alg, help_src_desc, help_dst_desc, 1); return execute_primitive( primitive, execution_args, {{alpha, beta, DNNL_ARG_DST, memory_desc_ext(help_dst_desc), dst}}); } inline sycl::event engine_ext::async_softmax_backward( softmax_algorithm alg, softmax_mode mode, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta, const memory_desc_ext &diff_src_desc, void *diff_src) { if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) { return sycl::event(); } ::dnnl::memory::desc help_diff_src_desc = diff_src_desc.get_desc(); ::dnnl::memory::desc help_dst_desc = dst_desc.get_desc(); ::dnnl::memory::desc help_diff_dst_desc = diff_dst_desc.get_desc(); if (mode == softmax_mode::instance) { help_diff_src_desc = compress_spatial_dimensions_to_channel(help_diff_src_desc); help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc); help_diff_dst_desc = compress_spatial_dimensions_to_channel(help_diff_dst_desc); } auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_DST, {::dnnl::memory(help_dst_desc, _eng, dst)}}, {DNNL_ARG_DIFF_DST, {::dnnl::memory(help_diff_dst_desc, _eng, diff_dst)}}}; ::dnnl::algorithm softmax_alg = ::dnnl::algorithm::softmax_accurate; if (alg == softmax_algorithm::log) { softmax_alg = ::dnnl::algorithm::softmax_log; } auto primitive = create_primitive<::dnnl::softmax_backward>( softmax_alg, help_diff_src_desc, help_diff_dst_desc, help_dst_desc, 1, create_primitive_desc<::dnnl::softmax_forward>( ::dnnl::prop_kind::forward, softmax_alg, help_diff_src_desc, help_dst_desc, 1)); return execute_primitive(primitive, execution_args, {{alpha, beta, DNNL_ARG_DIFF_SRC, memory_desc_ext(help_diff_src_desc), diff_src}}); } inline sycl::event engine_ext::async_lrn_forward(lrn_desc &desc, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, ::dnnl::memory *workspace) { if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) { return sycl::event(); } auto primitive_desc = create_primitive_desc<::dnnl::lrn_forward>( ::dnnl::prop_kind::forward_training, ::dnnl::algorithm::lrn_across_channels, src_desc.get_desc(), dst_desc.get_desc(), desc.get_local_size(), desc.get_alpha(), desc.get_beta(), desc.get_k()); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}}; ::dnnl::memory ws_mem(primitive_desc.workspace_desc(), _eng); execution_args->insert({DNNL_ARG_WORKSPACE, ws_mem}); if (workspace) { *workspace = ws_mem; } else { insert_workspace(src, ws_mem); } auto primitive = create_primitive_with_pd<::dnnl::lrn_forward>(primitive_desc); return execute_primitive(primitive, execution_args, {{alpha, beta, DNNL_ARG_DST, dst_desc, dst}}); } inline sycl::event engine_ext::async_lrn_backward(lrn_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &diff_src_desc, void *diff_src, ::dnnl::memory *workspace) { if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) { return sycl::event(); } auto primitive = create_primitive<::dnnl::lrn_backward>( ::dnnl::algorithm::lrn_across_channels, diff_src_desc.get_desc(), diff_dst_desc.get_desc(), src_desc.get_desc(), desc.get_local_size(), desc.get_alpha(), desc.get_beta(), desc.get_k(), create_primitive_desc<::dnnl::lrn_forward>( ::dnnl::prop_kind::forward_training, ::dnnl::algorithm::lrn_across_channels, src_desc.get_desc(), dst_desc.get_desc(), desc.get_local_size(), desc.get_alpha(), desc.get_beta(), desc.get_k())); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}}, {DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}, {DNNL_ARG_DIFF_DST, {::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}}; if (workspace) { execution_args->insert({DNNL_ARG_WORKSPACE, *workspace}); } else { execution_args->insert({DNNL_ARG_WORKSPACE, get_workspace(src)}); } return execute_primitive( primitive, execution_args, {{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}}); } inline size_t engine_ext::get_batch_normalization_workspace_size( batch_normalization_ops ops, const memory_desc_ext &src_desc) { if(ops == batch_normalization_ops::none) { return 0; } return src_desc.get_size(); } inline sycl::event engine_ext::async_batch_normalization_forward_inference( batch_normalization_mode mode, float epsilon, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias, void *mean, void *var) { return batch_normalization_forward_internal( true, mode, epsilon, 0.f, alpha, src_desc, src, beta, dst_desc, dst, scale_bias_mean_var_desc, scale, bias, scale_bias_mean_var_desc, mean, var, nullptr, nullptr); } inline sycl::event engine_ext::async_batch_normalization_forward_inference( batch_normalization_mode mode, batch_normalization_ops ops, activation_desc &adesc, float epsilon, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &summand_desc, void *summand, const memory_desc_ext &scale_bias_desc, void *scale, void *bias, const memory_desc_ext &mean_var_desc, void *mean, void *var) { bool has_post_op = (ops != batch_normalization_ops::none); sycl::event e; std::vector<void *> caches; if (has_post_op) { void *dst_cache = allocate(dst_desc); caches.push_back(dst_cache); batch_normalization_forward_internal( true, mode, epsilon, 0.f, 1.f, src_desc, src, 0.f, dst_desc, dst_cache, scale_bias_desc, scale, bias, mean_var_desc, mean, var, nullptr, nullptr); if (ops == batch_normalization_ops::add_activation) { async_sum(1.f, summand_desc, summand, 1.f, dst_desc, dst_cache); } async_activation_forward(adesc, 1.f, dst_desc, dst_cache, 0.f, dst_desc, dst_cache); e = async_sum(alpha, dst_desc, dst_cache, beta, dst_desc, dst); _q->submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { for (auto ptr : caches) { sycl::free(ptr, *_q); } }); }); return e; } return batch_normalization_forward_internal( true, mode, epsilon, 0.f, alpha, src_desc, src, beta, dst_desc, dst, scale_bias_desc, scale, bias, mean_var_desc, mean, var, nullptr, nullptr); } inline sycl::event engine_ext::async_batch_normalization_forward_training( batch_normalization_mode mode, float epsilon, float factor, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias, void *running_mean, void *running_var, void *saved_mean, void *saved_var) { return batch_normalization_forward_internal( false, mode, epsilon, factor, alpha, src_desc, src, beta, dst_desc, dst, scale_bias_mean_var_desc, scale, bias, scale_bias_mean_var_desc, saved_mean, saved_var, running_mean, running_var); } inline sycl::event engine_ext::async_batch_normalization_forward_training( batch_normalization_mode mode, batch_normalization_ops ops, activation_desc &adesc, float epsilon, float factor, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &summand_desc, void *summand, const memory_desc_ext &scale_bias_desc, void *scale, void *bias, const memory_desc_ext &mean_var_desc, void *running_mean, void *running_var, void *saved_mean, void *saved_var, size_t workspace_size, void *workspace) { bool has_post_op = (ops != batch_normalization_ops::none); sycl::event e; if (has_post_op) { if(workspace_size < dst_desc.get_desc().get_size()) { throw std::runtime_error("async_batch_normalization_forward_training_ex: " "no sufficient workspace."); } batch_normalization_forward_internal( false, mode, epsilon, factor, 1.f, src_desc, src, 0.f, dst_desc, workspace, scale_bias_desc, scale, bias, mean_var_desc, saved_mean, saved_var, running_mean, running_var); if (ops == batch_normalization_ops::add_activation) { async_sum(1.f, summand_desc, summand, 1.f, dst_desc, workspace); } return async_activation_forward(adesc, alpha, dst_desc, workspace, beta, dst_desc, dst); } return batch_normalization_forward_internal( false, mode, epsilon, factor, alpha, src_desc, src, beta, dst_desc, dst, scale_bias_desc, scale, bias, mean_var_desc, saved_mean, saved_var, running_mean, running_var); } inline sycl::event engine_ext::async_batch_normalization_forward_training( batch_normalization_mode mode, batch_normalization_ops ops, activation_desc &adesc, float epsilon, float factor, float alpha, const memory_desc_ext &src_desc, void *src, float beta, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &summand_desc, void *summand, const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias, void *running_mean, void *running_var, void *saved_mean, void *saved_var, size_t workspace_size, void *workspace) { return async_batch_normalization_forward_training( mode, ops, adesc, epsilon, factor, alpha, src_desc, src, beta, dst_desc, dst, summand_desc, summand, scale_bias_mean_var_desc, scale, bias, scale_bias_mean_var_desc, running_mean, running_var, saved_mean, saved_var, workspace_size, workspace); } inline sycl::event engine_ext::async_batch_normalization_backward( batch_normalization_mode mode, float epsilon, float alpha_data, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param, const memory_desc_ext &diff_scale_bias_mean_var_desc, void *scale, float beta_param, void *diff_scale, void *diff_bias, void *saved_mean, void *saved_var) { return batch_normalization_backward_internal( mode, epsilon, alpha_data, src_desc, src, diff_dst_desc, diff_dst, beta_data, diff_src_desc, diff_src, alpha_param, diff_scale_bias_mean_var_desc, scale, nullptr, beta_param, diff_scale, diff_bias, diff_scale_bias_mean_var_desc, saved_mean, saved_var); } inline sycl::event engine_ext::async_batch_normalization_backward( batch_normalization_mode mode, batch_normalization_ops ops, activation_desc &adesc, float epsilon, float alpha_data, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src, const memory_desc_ext &diff_summand_desc, void *diff_summand, float alpha_param, const memory_desc_ext &diff_scale_bias_desc, void *scale, void *bias, float beta_param, void *diff_scale, void *diff_bias, const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var, size_t workspace_size, void *workspace) { std::vector<void *> caches; ::dnnl::memory::desc real_diff_dst_desc = diff_dst_desc.get_desc(); void *real_diff_dst = diff_dst; if (ops != batch_normalization_ops::none && workspace_size < dst_desc.get_desc().get_size()) { throw std::runtime_error("async_batch_normalization_backward_ex: " "no sufficient workspace."); } if (ops == batch_normalization_ops::add_activation) { void *diff_summand_cache = allocate(diff_summand_desc); async_activation_backward(adesc, 1.f, dst_desc, dst, diff_dst_desc, diff_dst, dst_desc, workspace, 0.f, diff_summand_desc, diff_summand_cache); caches.push_back(diff_summand_cache); async_sum(alpha_data, diff_summand_desc, diff_summand_cache, beta_data, diff_summand_desc, diff_summand); real_diff_dst_desc = diff_summand_desc.get_desc(); real_diff_dst = diff_summand_cache; } else if (ops == batch_normalization_ops::activation) { void *diff_dst_cache = allocate(diff_dst_desc); caches.push_back(diff_dst_cache); async_activation_backward(adesc, 1.f, dst_desc, dst, diff_dst_desc, diff_dst, dst_desc, workspace, 0.f, diff_dst_desc, diff_dst_cache); real_diff_dst = diff_dst_cache; } sycl::event e = batch_normalization_backward_internal( mode, epsilon, alpha_data, src_desc, src, real_diff_dst_desc, real_diff_dst, beta_data, diff_src_desc, diff_src, alpha_param, diff_scale_bias_desc, scale, bias, beta_param, diff_scale, diff_bias, mean_var_desc, saved_mean, saved_var); _q->submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { for (auto ptr : caches) { sycl::free(ptr, *_q); } }); }); return e; } inline sycl::event engine_ext::async_batch_normalization_backward( batch_normalization_mode mode, batch_normalization_ops ops, activation_desc &adesc, float epsilon, float alpha_data, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src, const memory_desc_ext &diff_summand_desc, void *diff_summand, float alpha_param, const memory_desc_ext &diff_scale_bias_mean_var_desc, void *scale, void *bias, float beta_param, void *diff_scale, void *diff_bias, void *saved_mean, void *saved_var, size_t workspace_size, void *workspace) { return async_batch_normalization_backward( mode, ops, adesc, epsilon, alpha_data, src_desc, src, dst_desc, dst, diff_dst_desc, diff_dst, beta_data, diff_src_desc, diff_src, diff_summand_desc, diff_summand, alpha_param, diff_scale_bias_mean_var_desc, scale, bias, beta_param, diff_scale, diff_bias, diff_scale_bias_mean_var_desc, saved_mean, saved_var, workspace_size, workspace); } inline sycl::event engine_ext::async_convolution_forward(convolution_desc &desc, ::dnnl::algorithm alg, float alpha, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &weight_desc, void *weight, float beta, const memory_desc_ext &dst_desc, void *dst) { if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) { return sycl::event(); } auto help_weight_desc = get_group_weight_desc(desc.get_group_count(), weight_desc); ::dnnl::primitive_attr attr; attr.set_fpmath_mode(desc.get_math_mode()); auto origin_src_md = src_desc.get_desc(); auto origin_dst_md = dst_desc.get_desc(); auto origin_weight_md = help_weight_desc; auto src_md = transfer_memory_desc_to_format_tag_any(origin_src_md); auto dst_md = transfer_memory_desc_to_format_tag_any(origin_dst_md); auto weight_md = transfer_memory_desc_to_format_tag_any(origin_weight_md); auto primitive = create_primitive<::dnnl::convolution_forward>( ::dnnl::prop_kind::forward_training, alg, src_md, weight_md, dst_md, desc.get_stride(), desc.get_dilate(), desc.get_padding(), desc.get_padding(), attr); ::dnnl::convolution_forward::primitive_desc pd = ::dnnl::convolution_forward::primitive_desc( const_cast<dnnl_primitive_desc_t>( primitive.second->get_primitive_desc())); auto optimal_src_md = pd.src_desc(); auto optimal_dst_md = pd.dst_desc(); auto optimal_weight_md = pd.weights_desc(); void *optimal_src = src, *optimal_dst = dst, *optimal_weight = weight; std::vector<void *> input_caches, output_caches; allocate_and_reorder_memory_to_optimal(origin_src_md, src, optimal_src_md, optimal_src, input_caches); allocate_and_reorder_memory_to_optimal(origin_dst_md, dst, optimal_dst_md, optimal_dst, output_caches); allocate_and_reorder_memory_to_optimal(origin_weight_md, weight, optimal_weight_md, optimal_weight, input_caches); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, {::dnnl::memory(optimal_src_md, _eng, optimal_src)}}, {DNNL_ARG_WEIGHTS, {::dnnl::memory(optimal_weight_md, _eng, optimal_weight)}}}; auto e = execute_primitive(primitive, execution_args, {{alpha, beta, DNNL_ARG_DST, optimal_dst_md, optimal_dst}}, input_caches); if(origin_dst_md != optimal_dst_md){ e = async_reorder(1.f, optimal_dst_md, optimal_dst, 0.f, origin_dst_md, dst); } async_free(_q, e, nullptr, output_caches); return e; } inline sycl::event engine_ext::async_convolution_forward( convolution_desc &desc, ::dnnl::algorithm alg, activation_desc &adesc, float alpha_0, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &weight_desc, void *weight, float alpha_1, const memory_desc_ext &summand_desc, void *summand, const memory_desc_ext &bias_desc, void *bias, const memory_desc_ext &dst_desc, void *dst) { int channel_num = bias_desc.get_element_num(); auto help_weight_desc = get_group_weight_desc(desc.get_group_count(), weight_desc); ::dnnl::memory::desc help_bias_desc = {{channel_num}, bias_desc.get_desc().get_data_type(), ::dnnl::memory::format_tag::a}; ::dnnl::primitive_attr attr; attr.set_fpmath_mode(desc.get_math_mode()); auto primitive = create_primitive<::dnnl::convolution_forward>( ::dnnl::prop_kind::forward_training, alg, src_desc.get_desc(), help_weight_desc, help_bias_desc, dst_desc.get_desc(), desc.get_stride(), desc.get_dilate(), desc.get_padding(), desc.get_padding(), attr); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}, {DNNL_ARG_BIAS, {::dnnl::memory(help_bias_desc, _eng, bias)}}}; void *cache = nullptr; if (alpha_0 != 1.f) { cache = allocate(help_weight_desc); _q->memcpy(cache, weight, weight_desc.get_size()); async_scale(alpha_0, help_weight_desc, cache); execution_args->insert( {DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, cache)}}); execute_primitive(primitive, execution_args, {{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}}, {cache}); } else { execution_args->insert( {DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, weight)}}); execute_primitive(primitive, execution_args, {{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}}); } async_sum(alpha_1, summand_desc, summand, 1.f, dst_desc, dst); return async_activation_forward(adesc, 1.f, dst_desc, dst, 0.f, dst_desc, dst); } inline sycl::event engine_ext::async_convolution_backward_data( convolution_desc &desc, ::dnnl::algorithm alg, float alpha, const memory_desc_ext &weight_desc, void *weight, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta, const memory_desc_ext &diff_src_desc, void *diff_src) { if (scale_parameter_preprocess({{alpha, beta, diff_dst_desc, diff_dst}})) { return sycl::event(); } auto help_weight_desc = get_group_weight_desc(desc.get_group_count(), weight_desc); ::dnnl::primitive_attr attr; attr.set_fpmath_mode(desc.get_math_mode()); auto forward_primitive = create_primitive_desc<::dnnl::convolution_forward>( ::dnnl::prop_kind::forward_training, ::dnnl::algorithm::convolution_auto, diff_src_desc.get_desc(), help_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(), desc.get_dilate(), desc.get_padding(), desc.get_padding(), attr); auto primitive = create_primitive<::dnnl::convolution_backward_data>( ::dnnl::algorithm::convolution_auto, diff_src_desc.get_desc(), help_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(), desc.get_dilate(), desc.get_padding(), desc.get_padding(), forward_primitive, attr); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_DIFF_DST, {::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}, {DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, weight)}}}; return execute_primitive( primitive, execution_args, {{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}}); } inline sycl::event engine_ext::async_convolution_backward_weight( convolution_desc &desc, ::dnnl::algorithm alg, float alpha, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta, const memory_desc_ext &diff_weight_desc, void *diff_weight) { if (scale_parameter_preprocess( {{alpha, beta, diff_weight_desc, diff_weight}})) { return sycl::event(); } auto help_diff_weight_desc = get_group_weight_desc(desc.get_group_count(), diff_weight_desc); ::dnnl::primitive_attr attr; attr.set_fpmath_mode(desc.get_math_mode()); auto forward_primitive = create_primitive_desc<::dnnl::convolution_forward>( ::dnnl::prop_kind::forward_training, ::dnnl::algorithm::convolution_auto, src_desc.get_desc(), help_diff_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(), desc.get_dilate(), desc.get_padding(), desc.get_padding(), attr); auto primitive = create_primitive<::dnnl::convolution_backward_weights>( ::dnnl::algorithm::convolution_auto, src_desc.get_desc(), help_diff_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(), desc.get_dilate(), desc.get_padding(), desc.get_padding(), forward_primitive, attr); auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}, {DNNL_ARG_DIFF_DST, {::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}}; return execute_primitive(primitive, execution_args, {{alpha, beta, DNNL_ARG_DIFF_WEIGHTS, help_diff_weight_desc, diff_weight}}); } inline sycl::event engine_ext::async_convolution_backward_bias( float alpha, const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta, const memory_desc_ext &diff_bias_desc, void *diff_bias) { return async_reduction(reduction_op::sum, alpha, diff_dst_desc, diff_dst, beta, diff_bias_desc, diff_bias); } inline void engine_ext::rnn_get_weight_space_size(const rnn_desc &desc, size_t *weight_space_size) { *weight_space_size = 0; rnn_forward_internal(desc, ::dnnl::prop_kind::forward_inference, memory_desc_ext(), nullptr, memory_desc_ext(), nullptr, memory_desc_ext(), nullptr, nullptr, memory_desc_ext(), nullptr, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, true, weight_space_size, nullptr, nullptr); return; } inline void engine_ext::rnn_get_scratchpad_workspace_size( const rnn_desc &desc, ::dnnl::prop_kind kind, const memory_desc_ext &src_desc, size_t *scratchpad_size, size_t *workspace_size) { *workspace_size = 0; *scratchpad_size = 0; rnn_forward_internal(desc, kind, src_desc, nullptr, memory_desc_ext(), nullptr, memory_desc_ext(), nullptr, nullptr, memory_desc_ext(), nullptr, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, true, nullptr, workspace_size, scratchpad_size); return; } inline sycl::event engine_ext::async_rnn_forward( const rnn_desc &desc, ::dnnl::prop_kind kind, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc, void *dst, const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter, const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c, size_t weight_size, void *weight, size_t scratchpad_size, void *scratchpad, size_t workspace_size, void *workspace) { return rnn_forward_internal( desc, kind, src_desc, src, dst_desc, dst, iter_desc, src_iter, dst_iter, iter_c_desc, src_iter_c, dst_iter_c, weight_size, weight, workspace_size, workspace, scratchpad_size, scratchpad, false, nullptr, nullptr, nullptr); } inline sycl::event engine_ext::async_rnn_backward( const rnn_desc &desc, const memory_desc_ext &dst_desc, void *dst, void *diff_dst, const memory_desc_ext &src_desc, void *src, void *diff_src, const memory_desc_ext &iter_desc, void *src_iter, void *diff_dst_iter, void *diff_src_iter, const memory_desc_ext &iter_c_desc, void *src_iter_c, void *diff_dst_iter_c, void *diff_src_iter_c, size_t weight_size, void *weight, void *diff_weight, size_t scratchpad_size, void *scratchpad, size_t workspace_size, void *workspace) { ::dnnl::memory::data_type src_dt; ::dnnl::memory::format_tag src_format_tag; rnn_mode mode; rnn_memory_format_tag format_tag; rnn_bias_mode bias_mode; rnn_direction direction; dpct::library_data_t dt; int direction_num = 1, input_size = 0, hidden_size = 0, projection_size = 0, layer_size = 0, gate_num = 1, output_size = 0, data_type_size = 0, seq_length = 1, batch_size = 1; void *last_layer_cache = nullptr; void *hidden_layer_cache = nullptr; sycl::event e; std::vector<int> offset(9, 0); std::vector<void *> data = { src, dst, (uint8_t *)src_iter + iter_desc.get_size(), nullptr, (uint8_t *)src_iter_c + iter_c_desc.get_size(), nullptr, (uint8_t *)weight + weight_size, (uint8_t *)workspace + workspace_size, diff_src, diff_dst, (uint8_t *)diff_src_iter + iter_desc.get_size(), (uint8_t *)diff_dst_iter + iter_desc.get_size(), (uint8_t *)diff_src_iter_c + iter_c_desc.get_size(), (uint8_t *)diff_dst_iter_c + iter_c_desc.get_size(), (uint8_t *)diff_weight + weight_size, scratchpad}; desc.get(&mode, &bias_mode, &direction, &dt, &input_size, &hidden_size, &projection_size, &layer_size); get_rnn_configuration(src_desc.get_desc(), direction, mode, dt, hidden_size, &src_dt, &src_format_tag, &projection_size, &output_size, &seq_length, &batch_size, &direction_num, &gate_num); if (direction == rnn_direction::bidirectional) { if (layer_size > 1) { last_layer_cache = allocate(src_desc); hidden_layer_cache = allocate(src_desc); data[8] = last_layer_cache; } e = execute_rnn_backward_primitive( mode, ::dnnl::rnn_direction::bidirectional_concat, bias_mode, src_dt, src_format_tag, seq_length, batch_size, output_size, 2 * output_size, 1, direction_num, hidden_size, gate_num, projection_size, data, offset, 1); if (layer_size > 1) { data[8] = hidden_layer_cache; data[9] = last_layer_cache; e = execute_rnn_backward_primitive( mode, ::dnnl::rnn_direction::bidirectional_sum, bias_mode, src_dt, src_format_tag, seq_length, batch_size, output_size, output_size, 1, direction_num, hidden_size, gate_num, projection_size, data, offset, layer_size - 1); _q->memcpy(diff_src, ((layer_size - 1) % 2 == 0) ? last_layer_cache : hidden_layer_cache, src_desc.get_size()); } } else { e = execute_rnn_backward_primitive( mode, ::dnnl::rnn_direction::unidirectional_left2right, bias_mode, src_dt, src_format_tag, seq_length, batch_size, output_size, output_size, layer_size, direction_num, hidden_size, gate_num, projection_size, data, offset, 1); } if (last_layer_cache && hidden_layer_cache) { _q->submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { sycl::free(last_layer_cache, *_q); sycl::free(hidden_layer_cache, *_q); }); }); } return e; } inline size_t engine_ext::get_dropout_state_size(){ #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) " "Interfaces Project does not support this API."); #else sycl::queue q; if(_random_engine_state_size == -1) { if(_q){ q = *_q; } else { q = dpct::get_current_device().default_queue(); } auto rand_engine = rng_engine_t(q, 0); _random_engine_state_size = oneapi::mkl::rng::get_state_size(rand_engine); } return _random_engine_state_size; #endif } inline size_t engine_ext::get_dropout_workspace_size(const memory_desc_ext &src_desc) { return src_desc.get_size(); } inline sycl::event engine_ext::async_dropout_forward(dropout_desc &desc, const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc, void *dst, void *workspace, size_t workspace_size) { if (workspace_size < src_desc.get_size()) { throw std::runtime_error("async_dropout_forward: no sufficient workspace."); } float p = desc.get_probability(); if (p == 1.f) { return _q->memset(dst, 0, dst_desc.get_size()); } else if (p == 0.f) { return async_reorder(1.f, src_desc, src, 0.f, dst_desc, dst); } float scale_factor = 1.f / (1.f - p); void *cache = workspace; memory_desc_ext rng_data_desc( ::dnnl::memory::desc(src_desc.get_dims(), ::dnnl::memory::data_type::s32, src_desc.get_strides())); if (src_desc.get_desc().get_data_type() != ::dnnl::memory::data_type::s32) { cache = allocate(rng_data_desc); } desc.generate(_q, _random_engine_state_size, rng_data_desc.get_element_num(), (std::int32_t *)cache); if (cache == workspace) { async_scale(scale_factor, src_desc, workspace); } else { async_reorder(scale_factor, rng_data_desc, cache, 0.f, src_desc, workspace); } auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC_0, ::dnnl::memory(src_desc.get_desc(), _eng, src)}, {DNNL_ARG_SRC_1, ::dnnl::memory(src_desc.get_desc(), _eng, workspace)}, {DNNL_ARG_DST, ::dnnl::memory(dst_desc.get_desc(), _eng, dst)}}; auto primitive = create_primitive<::dnnl::binary>( ::dnnl::algorithm::binary_mul, src_desc.get_desc(), src_desc.get_desc(), dst_desc.get_desc()); auto e = execute_primitive(primitive, execution_args); if (cache != workspace) { _q->submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { sycl::free(cache, *_q); }); }); } return e; } inline sycl::event engine_ext::async_dropout_backward( dropout_desc &desc, const memory_desc_ext &diff_dst_desc, void *diff_dst, const memory_desc_ext &diff_src_desc, void *diff_src, void *workspace, size_t workspace_size) { float p = desc.get_probability(); if (p == 1.f) { return _q->memset(diff_src, 0, diff_src_desc.get_size()); } else if (p == 0.f) { return async_reorder(1.f, diff_dst_desc, diff_dst, 0.f, diff_src_desc, diff_src); } auto execution_args = new std::unordered_map<int, ::dnnl::memory>{ {DNNL_ARG_SRC_0, ::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}, {DNNL_ARG_SRC_1, ::dnnl::memory(diff_dst_desc.get_desc(), _eng, workspace)}, {DNNL_ARG_DST, ::dnnl::memory(diff_src_desc.get_desc(), _eng, diff_src)}}; auto primitive = create_primitive<::dnnl::binary>( ::dnnl::algorithm::binary_mul, diff_dst_desc.get_desc(), diff_dst_desc.get_desc(), diff_src_desc.get_desc()); return execute_primitive(primitive, execution_args); } } // namespace dnnl } // namespace dpct #endif // __DPCT_DNNL_UTILS_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/lapack_utils.hpp
//==---- lapack_utils.hpp -------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_LAPACK_UTILS_HPP__ #define __DPCT_LAPACK_UTILS_HPP__ #include "memory.hpp" #include "util.hpp" #include "lib_common_utils.hpp" #include <oneapi/mkl.hpp> #include <sycl/sycl.hpp> namespace dpct { namespace lapack { /// Computes all eigenvalues and, optionally, eigenvectors of a real generalized /// symmetric definite eigenproblem using a divide and conquer method. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] queue Device queue where calculations will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] itype Must be 1 or 2 or 3. Specifies the problem type to be solved. /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrices A and B. /// \param [in,out] a The symmetric matrix A. /// \param [in] lda The leading dimension of matrix A. /// \param [in,out] b The symmetric matrix B. /// \param [in] ldb The leading dimension of matrix B. /// \param [out] w Eigenvalues. /// \param [in] scratchpad Scratchpad memory to be used by the routine /// for storing intermediate results. /// \param [in] scratchpad_size Size of scratchpad memory as a number of /// floating point elements of type T. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. template <typename T> inline int sygvd(sycl::queue &queue, std::int64_t itype, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb, T *w, T *scratchpad, int scratchpad_size, int *info) { #ifdef DPCT_USM_LEVEL_NONE auto info_buf = get_buffer<int>(info); auto a_buffer = get_buffer<T>(a); auto b_buffer = get_buffer<T>(b); auto w_buffer = get_buffer<T>(w); auto scratchpad_buffer = get_buffer<T>(scratchpad); int info_val = 0; int ret_val = 0; try { oneapi::mkl::lapack::sygvd(queue, itype, jobz, uplo, n, a_buffer, lda, b_buffer, ldb, w_buffer, scratchpad_buffer, scratchpad_size); } catch (oneapi::mkl::lapack::exception const& e) { std::cerr << "Unexpected exception caught during call to LAPACK API: sygvd" << std::endl << "reason: " << e.what() << std::endl << "info: " << e.info() << std::endl; info_val = static_cast<int>(e.info()); ret_val = 1; } catch (sycl::exception const& e) { std::cerr << "Caught synchronous SYCL exception:" << std::endl << "reason: " << e.what() << std::endl; ret_val = 1; } queue.submit([&, info_val](sycl::handler &cgh) { auto info_acc = info_buf.get_access<sycl::access_mode::write>(cgh); cgh.single_task<dpct_kernel_name<class sygvd_set_info, T>>( [=]() { info_acc[0] = info_val; }); }); return ret_val; #else try { oneapi::mkl::lapack::sygvd(queue, itype, jobz, uplo, n, a, lda, b, ldb, w, scratchpad, scratchpad_size); } catch (oneapi::mkl::lapack::exception const& e) { std::cerr << "Unexpected exception caught during call to LAPACK API: sygvd" << std::endl << "reason: " << e.what() << std::endl << "info: " << e.info() << std::endl; int info_val = static_cast<int>(e.info()); queue.memcpy(info, &info_val, sizeof(int)).wait(); return 1; } catch (sycl::exception const& e) { std::cerr << "Caught synchronous SYCL exception:" << std::endl << "reason: " << e.what() << std::endl; queue.memset(info, 0, sizeof(int)).wait(); return 1; } queue.memset(info, 0, sizeof(int)); return 0; #endif } /// Computes all the eigenvalues, and optionally, the eigenvectors of a complex /// generalized Hermitian positive-definite eigenproblem using a divide and /// conquer method. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] queue Device queue where calculations will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] itype Must be 1 or 2 or 3. Specifies the problem type to be solved. /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrices A and B. /// \param [in,out] a The Hermitian matrix A. /// \param [in] lda The leading dimension of matrix A. /// \param [in,out] b The Hermitian matrix B. /// \param [in] ldb The leading dimension of matrix B. /// \param [in] w Eigenvalues. /// \param [in] scratchpad Scratchpad memory to be used by the routine /// for storing intermediate results. /// \param [in] scratchpad_size Size of scratchpad memory as a number of /// floating point elements of type T. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. template <typename T, typename Tw> inline int hegvd(sycl::queue &queue, std::int64_t itype, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb, Tw *w, T *scratchpad, int scratchpad_size, int *info) { using Ty = typename DataType<T>::T2; #ifdef DPCT_USM_LEVEL_NONE auto info_buf = get_buffer<int>(info); auto a_buffer = get_buffer<Ty>(a); auto b_buffer = get_buffer<Ty>(b); auto w_buffer = get_buffer<Tw>(w); auto scratchpad_buffer = get_buffer<Ty>(scratchpad); int info_val = 0; int ret_val = 0; try { oneapi::mkl::lapack::hegvd(queue, itype, jobz, uplo, n, a_buffer, lda, b_buffer, ldb, w_buffer, scratchpad_buffer, scratchpad_size); } catch (oneapi::mkl::lapack::exception const& e) { std::cerr << "Unexpected exception caught during call to LAPACK API: hegvd" << std::endl << "reason: " << e.what() << std::endl << "info: " << e.info() << std::endl; info_val = static_cast<int>(e.info()); ret_val = 1; } catch (sycl::exception const& e) { std::cerr << "Caught synchronous SYCL exception:" << std::endl << "reason: " << e.what() << std::endl; ret_val = 1; } queue.submit([&, info_val](sycl::handler &cgh) { auto info_acc = info_buf.get_access<sycl::access_mode::write>(cgh); cgh.single_task<dpct_kernel_name<class hegvd_set_info, T>>( [=]() { info_acc[0] = info_val; }); }); return ret_val; #else try { oneapi::mkl::lapack::hegvd(queue, itype, jobz, uplo, n, (Ty *)a, lda, (Ty *)b, ldb, w, (Ty *)scratchpad, scratchpad_size); } catch (oneapi::mkl::lapack::exception const& e) { std::cerr << "Unexpected exception caught during call to LAPACK API: hegvd" << std::endl << "reason: " << e.what() << std::endl << "info: " << e.info() << std::endl; int info_val = static_cast<int>(e.info()); queue.memcpy(info, &info_val, sizeof(int)).wait(); return 1; } catch (sycl::exception const& e) { std::cerr << "Caught synchronous SYCL exception:" << std::endl << "reason: " << e.what() << std::endl; queue.memset(info, 0, sizeof(int)).wait(); return 1; } queue.memset(info, 0, sizeof(int)); return 0; #endif } /// Computes the Cholesky factorizations of a batch of symmetric (or Hermitian, /// for complex data) positive-definite matrices. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] queue Device queue where calculations will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in,out] a Array of pointers to matrix A. /// \param [in] lda The leading dimension of matrix A. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. /// \param [in] group_size The batch size. template <typename T> inline int potrf_batch(sycl::queue &queue, oneapi::mkl::uplo uplo, int n, T *a[], int lda, int *info, int group_size) { #ifdef DPCT_USM_LEVEL_NONE throw std::runtime_error("this API is unsupported when USM level is none"); #else using Ty = typename DataType<T>::T2; struct matrix_info_t { oneapi::mkl::uplo uplo_info; std::int64_t n_info; std::int64_t lda_info; std::int64_t group_size_info; }; matrix_info_t *matrix_info = (matrix_info_t *)std::malloc(sizeof(matrix_info_t)); matrix_info->uplo_info = uplo; matrix_info->n_info = n; matrix_info->lda_info = lda; matrix_info->group_size_info = group_size; std::int64_t scratchpad_size = 0; sycl::event e; Ty *scratchpad = nullptr; try { scratchpad_size = oneapi::mkl::lapack::potrf_batch_scratchpad_size<Ty>( queue, &(matrix_info->uplo_info), &(matrix_info->n_info), &(matrix_info->lda_info), 1, &(matrix_info->group_size_info)); scratchpad = sycl::malloc_device<Ty>(scratchpad_size, queue); e = oneapi::mkl::lapack::potrf_batch( queue, &(matrix_info->uplo_info), &(matrix_info->n_info), (Ty **)a, &(matrix_info->lda_info), 1, &(matrix_info->group_size_info), scratchpad, scratchpad_size); } catch (oneapi::mkl::lapack::batch_error const &be) { std::cerr << "Unexpected exception caught during call to LAPACK API: " "potrf_batch_scratchpad_size/potrf_batch" << std::endl << "reason: " << be.what() << std::endl << "number: " << be.info() << std::endl; int i = 0; auto &ids = be.ids(); std::vector<int> info_vec(group_size); for (auto const &e : be.exceptions()) { try { std::rethrow_exception(e); } catch (oneapi::mkl::lapack::exception &e) { std::cerr << "Exception " << ids[i] << std::endl << "reason: " << e.what() << std::endl << "info: " << e.info() << std::endl; info_vec[i] = e.info(); i++; } } queue.memcpy(info, info_vec.data(), group_size * sizeof(int)).wait(); std::free(matrix_info); if (scratchpad) sycl::free(scratchpad, queue); return 1; } catch (sycl::exception const &e) { std::cerr << "Caught synchronous SYCL exception:" << std::endl << "reason: " << e.what() << std::endl; queue.memset(info, 0, group_size * sizeof(int)).wait(); std::free(matrix_info); if (scratchpad) sycl::free(scratchpad, queue); return 1; } queue.submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { std::free(matrix_info); sycl::free(scratchpad, queue); }); }); queue.memset(info, 0, group_size * sizeof(int)); return 0; #endif } /// Solves a batch of systems of linear equations with a Cholesky-factored /// symmetric (Hermitian) positive-definite coefficient matrices. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] queue Device queue where calculations will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in] nrhs The number of right-hand sides. /// \param [in,out] a Array of pointers to matrix A. /// \param [in] lda The leading dimension of matrix A. /// \param [in,out] b Array of pointers to matrix B. /// \param [in] ldb The leading dimension of matrix B. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. /// \param [in] group_size The batch size. template <typename T> inline int potrs_batch(sycl::queue &queue, oneapi::mkl::uplo uplo, int n, int nrhs, T *a[], int lda, T *b[], int ldb, int *info, int group_size) { #ifdef DPCT_USM_LEVEL_NONE throw std::runtime_error("this API is unsupported when USM level is none"); #else using Ty = typename DataType<T>::T2; struct matrix_info_t { oneapi::mkl::uplo uplo_info; std::int64_t n_info; std::int64_t nrhs_info; std::int64_t lda_info; std::int64_t ldb_info; std::int64_t group_size_info; }; matrix_info_t *matrix_info = (matrix_info_t *)std::malloc(sizeof(matrix_info_t)); matrix_info->uplo_info = uplo; matrix_info->n_info = n; matrix_info->nrhs_info = nrhs; matrix_info->lda_info = lda; matrix_info->ldb_info = ldb; matrix_info->group_size_info = group_size; std::int64_t scratchpad_size = 0; sycl::event e; Ty *scratchpad = nullptr; try { scratchpad_size = oneapi::mkl::lapack::potrs_batch_scratchpad_size<Ty>( queue, &(matrix_info->uplo_info), &(matrix_info->n_info), &(matrix_info->nrhs_info), &(matrix_info->lda_info), &(matrix_info->ldb_info), 1, &(matrix_info->group_size_info)); scratchpad = sycl::malloc_device<Ty>(scratchpad_size, queue); e = oneapi::mkl::lapack::potrs_batch( queue, &(matrix_info->uplo_info), &(matrix_info->n_info), &(matrix_info->nrhs_info), (Ty **)a, &(matrix_info->lda_info), (Ty **)b, &(matrix_info->ldb_info), 1, &(matrix_info->group_size_info), scratchpad, scratchpad_size); } catch (oneapi::mkl::lapack::batch_error const &be) { std::cerr << "Unexpected exception caught during call to LAPACK API: " "potrs_batch_scratchpad_size/potrs_batch" << std::endl << "reason: " << be.what() << std::endl << "number: " << be.info() << std::endl; int i = 0; auto &ids = be.ids(); std::vector<int> info_vec(group_size); for (auto const &e : be.exceptions()) { try { std::rethrow_exception(e); } catch (oneapi::mkl::lapack::exception &e) { std::cerr << "Exception " << ids[i] << std::endl << "reason: " << e.what() << std::endl << "info: " << e.info() << std::endl; info_vec[i] = e.info(); i++; } } queue.memcpy(info, info_vec.data(), group_size * sizeof(int)).wait(); std::free(matrix_info); if (scratchpad) sycl::free(scratchpad, queue); return 1; } catch (sycl::exception const &e) { std::cerr << "Caught synchronous SYCL exception:" << std::endl << "reason: " << e.what() << std::endl; queue.memset(info, 0, group_size * sizeof(int)).wait(); std::free(matrix_info); if (scratchpad) sycl::free(scratchpad, queue); return 1; } queue.submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { std::free(matrix_info); sycl::free(scratchpad, queue); }); }); queue.memset(info, 0, group_size * sizeof(int)); return 0; #endif } namespace detail { template <template <typename> typename functor_t, typename... args_t> inline int lapack_shim(sycl::queue &q, library_data_t a_type, int *info, std::string const &lapack_api_name, args_t &&...args) { auto handle_lapack_exception = [&](const oneapi::mkl::lapack::exception &e) { std::cerr << "Unexpected exception caught during call to LAPACK API: " << lapack_api_name << std::endl << "reason: " << e.what() << std::endl << "info: " << e.info() << std::endl << "detail: " << e.detail() << std::endl; if (e.info() < std::numeric_limits<int>::min() || e.info() > std::numeric_limits<int>::max()) { throw std::runtime_error("e.info() exceeds the limit of int type"); } int info_val = static_cast<int>(e.info()); if (info) dpct::detail::dpct_memcpy(q, info, &info_val, sizeof(int), memcpy_direction::host_to_device) .wait(); return 1; }; try { switch (a_type) { case library_data_t::real_float: { functor_t<float>()(std::forward<args_t>(args)...); break; } case library_data_t::real_double: { functor_t<double>()(std::forward<args_t>(args)...); break; } case library_data_t::complex_float: { functor_t<std::complex<float>>()(std::forward<args_t>(args)...); break; } case library_data_t::complex_double: { functor_t<std::complex<double>>()(std::forward<args_t>(args)...); break; } default: throw std::runtime_error("the data type is unsupported"); } } catch (oneapi::mkl::lapack::batch_error const &be) { try { std::rethrow_exception(be.exceptions()[0]); } catch (oneapi::mkl::lapack::exception &e) { return handle_lapack_exception(e); } } catch (oneapi::mkl::lapack::exception const &e) { return handle_lapack_exception(e); } catch (sycl::exception const &e) { std::cerr << "Caught synchronous SYCL exception:" << std::endl << "reason: " << e.what() << std::endl; if (info) dpct::detail::dpct_memset(q, info, 0, sizeof(int)).wait(); return 1; } return 0; } template <typename T> class working_memory { public: working_memory(std::size_t element_number, const sycl::queue &q) : _q(q) { _ptr = dpct::detail::dpct_malloc(element_number * sizeof(T), _q); } auto get_memory() { return dpct::detail::get_memory(reinterpret_cast<T *>(_ptr)); } auto get_ptr() { return _ptr; } void set_event(sycl::event e) { _e = e; } ~working_memory() { if (_ptr) { dpct::async_dpct_free({_ptr}, {_e}, _q); } } private: void *_ptr = nullptr; sycl::event _e; sycl::queue _q; }; std::size_t byte_to_element_number(std::size_t size_in_byte, dpct::library_data_t element_type) { auto dv = std::lldiv( size_in_byte, dpct::detail::library_data_size[static_cast<unsigned int>(element_type)] / 8); if (dv.rem) { throw std::runtime_error( "size_in_byte is not divisible by the size of element (in bytes)"); } return dv.quot; } std::size_t element_number_to_byte(std::size_t size_in_element, dpct::library_data_t element_type) { auto dv = std::lldiv( dpct::detail::library_data_size[static_cast<unsigned int>(element_type)], 8); if (dv.rem) { throw std::runtime_error( "the size of element (in bits) is not divisible by 8"); } return size_in_element * dv.quot; } inline oneapi::mkl::jobsvd char2jobsvd(signed char job) { switch (job) { case 'A': return oneapi::mkl::jobsvd::vectors; case 'S': return oneapi::mkl::jobsvd::somevec; case 'O': return oneapi::mkl::jobsvd::vectorsina; case 'N': return oneapi::mkl::jobsvd::novec; default: throw std::runtime_error("the job type is unsupported"); } } template <typename T> struct getrf_scratchpad_size_impl { void operator()(sycl::queue &q, std::int64_t m, std::int64_t n, library_data_t a_type, std::int64_t lda, std::size_t &device_ws_size) { device_ws_size = oneapi::mkl::lapack::getrf_scratchpad_size<T>(q, m, n, lda); } }; template <typename T> struct getrf_impl { void operator()(sycl::queue &q, std::int64_t m, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, std::int64_t *ipiv, void *device_ws, std::size_t device_ws_size, int *info) { auto ipiv_data = dpct::detail::get_memory(ipiv); auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto device_ws_data = dpct::detail::get_memory(reinterpret_cast<T *>(device_ws)); oneapi::mkl::lapack::getrf(q, m, n, a_data, lda, ipiv_data, device_ws_data, device_ws_size); dpct::detail::dpct_memset(q, info, 0, sizeof(int)); } }; template <typename T> struct getrs_impl { void operator()(sycl::queue &q, oneapi::mkl::transpose trans, std::int64_t n, std::int64_t nrhs, library_data_t a_type, void *a, std::int64_t lda, std::int64_t *ipiv, library_data_t b_type, void *b, std::int64_t ldb, int *info) { auto ipiv_data = dpct::detail::get_memory(ipiv); std::int64_t device_ws_size = oneapi::mkl::lapack::getrs_scratchpad_size<T>( q, trans, n, nrhs, lda, ldb); working_memory<T> device_ws(device_ws_size, q); auto device_ws_data = device_ws.get_memory(); auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b)); oneapi::mkl::lapack::getrs(q, trans, n, nrhs, a_data, lda, ipiv_data, b_data, ldb, device_ws_data, device_ws_size); sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int)); device_ws.set_event(e); } }; template <typename T> struct geqrf_scratchpad_size_impl { void operator()(sycl::queue &q, std::int64_t m, std::int64_t n, library_data_t a_type, std::int64_t lda, std::size_t &device_ws_size) { device_ws_size = oneapi::mkl::lapack::geqrf_scratchpad_size<T>(q, m, n, lda); } }; template <typename T> struct geqrf_impl { void operator()(sycl::queue &q, std::int64_t m, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, library_data_t tau_type, void *tau, void *device_ws, std::size_t device_ws_size, int *info) { auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto tau_data = dpct::detail::get_memory(reinterpret_cast<T *>(tau)); auto device_ws_data = dpct::detail::get_memory(reinterpret_cast<T *>(device_ws)); oneapi::mkl::lapack::geqrf(q, m, n, a_data, lda, tau_data, device_ws_data, device_ws_size); dpct::detail::dpct_memset(q, info, 0, sizeof(int)); } }; template <typename T> struct getrfnp_impl { void operator()(sycl::queue &q, std::int64_t m, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, std::int64_t *ipiv, void *device_ws, std::size_t device_ws_size, int *info) { #ifndef __INTEL_MKL__ throw std::runtime_error( "The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else std::int64_t a_stride = m * lda; auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto device_ws_data = dpct::detail::get_memory(reinterpret_cast<T *>(device_ws)); oneapi::mkl::lapack::getrfnp_batch(q, m, n, a_data, lda, a_stride, 1, device_ws_data, device_ws_size); dpct::detail::dpct_memset(q, info, 0, sizeof(int)); #endif } }; template <typename T> struct gesvd_scratchpad_size_impl { void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu, oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n, library_data_t a_type, std::int64_t lda, library_data_t u_type, std::int64_t ldu, library_data_t vt_type, std::int64_t ldvt, std::size_t &device_ws_size) { device_ws_size = oneapi::mkl::lapack::gesvd_scratchpad_size<T>( q, jobu, jobvt, m, n, lda, ldu, ldvt); } }; template <typename T> struct ElementType { using value_tpye = T; }; template <typename T> struct ElementType<std::complex<T>> { using value_tpye = T; }; template <typename T> struct gesvd_impl { void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu, oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, library_data_t s_type, void *s, library_data_t u_type, void *u, std::int64_t ldu, library_data_t vt_type, void *vt, std::int64_t ldvt, void *device_ws, std::size_t device_ws_size, int *info) { auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto s_data = dpct::detail::get_memory( reinterpret_cast<typename ElementType<T>::value_tpye *>(s)); auto u_data = dpct::detail::get_memory(reinterpret_cast<T *>(u)); auto vt_data = dpct::detail::get_memory(reinterpret_cast<T *>(vt)); auto device_ws_data = dpct::detail::get_memory(reinterpret_cast<T *>(device_ws)); oneapi::mkl::lapack::gesvd(q, jobu, jobvt, m, n, a_data, lda, s_data, u_data, ldu, vt_data, ldvt, device_ws_data, device_ws_size); dpct::detail::dpct_memset(q, info, 0, sizeof(int)); } }; template <typename T> struct gesvd_conj_impl : public gesvd_impl<T> { void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu, oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, library_data_t s_type, void *s, library_data_t u_type, void *u, std::int64_t ldu, library_data_t vt_type, void *vt, std::int64_t ldvt, void *device_ws, std::size_t device_ws_size, int *info) { #ifndef __INTEL_MKL__ throw std::runtime_error( "The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else using base = gesvd_impl<T>; base::operator()(q, jobu, jobvt, m, n, a_type, a, lda, s_type, s, u_type, u, ldu, vt_type, vt, ldvt, device_ws, device_ws_size, info); auto vt_data = dpct::detail::get_memory(reinterpret_cast<T *>(vt)); oneapi::mkl::blas::row_major::imatcopy(q, oneapi::mkl::transpose::conjtrans, n, n, T(1.0f), vt_data, ldvt, ldvt); dpct::detail::dpct_memset(q, info, 0, sizeof(int)); #endif } }; template <typename T> struct potrf_scratchpad_size_impl { void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n, library_data_t a_type, std::int64_t lda, std::size_t &device_ws_size) { device_ws_size = oneapi::mkl::lapack::potrf_scratchpad_size<T>(q, uplo, n, lda); } }; template <typename T> struct potrf_impl { void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, void *device_ws, std::size_t device_ws_size, int *info) { auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto device_ws_data = dpct::detail::get_memory(reinterpret_cast<T *>(device_ws)); oneapi::mkl::lapack::potrf(q, uplo, n, a_data, lda, device_ws_data, device_ws_size); dpct::detail::dpct_memset(q, info, 0, sizeof(int)); } }; template <typename T> struct potrs_impl { void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t nrhs, library_data_t a_type, void *a, std::int64_t lda, library_data_t b_type, void *b, std::int64_t ldb, int *info) { std::int64_t device_ws_size = oneapi::mkl::lapack::potrs_scratchpad_size<T>( q, uplo, n, nrhs, lda, ldb); working_memory<T> device_ws(device_ws_size, q); auto device_ws_data = device_ws.get_memory(); auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b)); oneapi::mkl::lapack::potrs(q, uplo, n, nrhs, a_data, lda, b_data, ldb, device_ws_data, device_ws_size); sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int)); device_ws.set_event(e); } }; template <typename T> struct value_type_trait { using value_type = T; }; template <typename T> struct value_type_trait<std::complex<T>> { using value_type = T; }; template <typename T> auto lamch_s() { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else if constexpr (std::is_same_v<T, float>) { return slamch("S"); } else if constexpr (std::is_same_v<T, double>) { return dlamch("S"); } throw std::runtime_error("the type is unsupported"); #endif } #define DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(FUNC, ...) \ do { \ if constexpr (std::is_floating_point_v<T>) { \ device_ws_size = oneapi::mkl::lapack::sy##FUNC(__VA_ARGS__); \ } else { \ device_ws_size = oneapi::mkl::lapack::he##FUNC(__VA_ARGS__); \ } \ } while (0) #define DISPATCH_FLOAT_FOR_CALCULATION(FUNC, ...) \ do { \ if constexpr (std::is_floating_point_v<T>) { \ oneapi::mkl::lapack::sy##FUNC(__VA_ARGS__); \ } else { \ oneapi::mkl::lapack::he##FUNC(__VA_ARGS__); \ } \ } while (0) template <typename T> struct syheevx_scratchpad_size_impl { void operator()(sycl::queue &q, oneapi::mkl::compz jobz, oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda, void *vl, void *vu, std::int64_t il, std::int64_t iu, std::size_t &device_ws_size) { #ifndef __INTEL_MKL__ throw std::runtime_error( "The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else using value_t = typename value_type_trait<T>::value_type; auto vl_value = *reinterpret_cast<value_t *>(vl); auto vu_value = *reinterpret_cast<value_t *>(vu); auto abstol = 2 * lamch_s<value_t>(); DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(evx_scratchpad_size<T>, q, jobz, range, uplo, n, lda, vl_value, vu_value, il, iu, abstol, lda); #endif } }; template <typename T> constexpr library_data_t get_library_data_t_from_type() { if constexpr (std::is_same_v<T, float>) { return library_data_t::real_float; } else if constexpr (std::is_same_v<T, double>) { return library_data_t::real_double; } else if constexpr (std::is_same_v<T, sycl::float2> || std::is_same_v<T, std::complex<float>>) { return library_data_t::complex_float; } else if constexpr (std::is_same_v<T, sycl::double2> || std::is_same_v<T, std::complex<double>>) { return library_data_t::complex_double; } throw std::runtime_error("the type is unsupported"); } template <typename T> struct syheevx_impl { void operator()(sycl::queue &q, oneapi::mkl::compz jobz, oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, void *vl, void *vu, std::int64_t il, std::int64_t iu, std::int64_t *m, library_data_t w_type, void *w, void *device_ws, std::size_t device_ws_size, int *info) { #ifndef __INTEL_MKL__ throw std::runtime_error( "The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else using value_t = typename value_type_trait<T>::value_type; working_memory<T> z(n * lda, q); working_memory<std::int64_t> m_device(1, q); auto z_data = z.get_memory(); auto m_device_data = m_device.get_memory(); auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto device_ws_data = dpct::detail::get_memory(reinterpret_cast<T *>(device_ws)); auto vl_value = *reinterpret_cast<value_t *>(vl); auto vu_value = *reinterpret_cast<value_t *>(vu); auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w)); auto abstol = 2 * lamch_s<value_t>(); DISPATCH_FLOAT_FOR_CALCULATION(evx, q, jobz, range, uplo, n, a_data, lda, vl_value, vu_value, il, iu, abstol, m_device_data, w_data, z_data, lda, device_ws_data, device_ws_size); dpct::async_dpct_memcpy(a, z.get_ptr(), n * lda * sizeof(T), memcpy_direction::device_to_device, q); dpct::async_dpct_memcpy(m, m_device.get_ptr(), sizeof(std::int64_t), memcpy_direction::device_to_host, q); sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int)); z.set_event(e); m_device.set_event(e); #endif } }; template <typename T> struct syhegvx_scratchpad_size_impl { void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::compz jobz, oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda, std::int64_t ldb, void *vl, void *vu, std::int64_t il, std::int64_t iu, std::size_t &device_ws_size) { #ifndef __INTEL_MKL__ throw std::runtime_error( "The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else using value_t = typename value_type_trait<T>::value_type; auto vl_value = *reinterpret_cast<value_t *>(vl); auto vu_value = *reinterpret_cast<value_t *>(vu); auto abstol = 2 * lamch_s<value_t>(); DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(gvx_scratchpad_size<T>, q, itype, jobz, range, uplo, n, lda, ldb, vl_value, vu_value, il, iu, abstol, lda); #endif } }; template <typename T> struct syhegvx_impl { void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::compz jobz, oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, std::int64_t n, void *a, std::int64_t lda, void *b, std::int64_t ldb, void *vl, void *vu, std::int64_t il, std::int64_t iu, std::int64_t *m, void *w, void *device_ws, std::size_t device_ws_size, int *info) { #ifndef __INTEL_MKL__ throw std::runtime_error( "The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else using value_t = typename value_type_trait<T>::value_type; working_memory<T> z(n * lda, q); working_memory<std::int64_t> m_device(1, q); auto z_data = z.get_memory(); auto m_device_data = m_device.get_memory(); auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b)); auto device_ws_data = dpct::detail::get_memory(reinterpret_cast<T *>(device_ws)); auto vl_value = *reinterpret_cast<value_t *>(vl); auto vu_value = *reinterpret_cast<value_t *>(vu); auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w)); auto abstol = 2 * lamch_s<value_t>(); DISPATCH_FLOAT_FOR_CALCULATION(gvx, q, itype, jobz, range, uplo, n, a_data, lda, b_data, ldb, vl_value, vu_value, il, iu, abstol, m_device_data, w_data, z_data, lda, device_ws_data, device_ws_size); dpct::async_dpct_memcpy(a, z.get_ptr(), n * lda * sizeof(T), memcpy_direction::device_to_device, q); dpct::async_dpct_memcpy(m, m_device.get_ptr(), sizeof(std::int64_t), memcpy_direction::device_to_host, q); sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int)); z.set_event(e); m_device.set_event(e); #endif } }; template <typename T> struct syhegvd_scratchpad_size_impl { void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda, std::int64_t ldb, std::size_t &device_ws_size) { DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(gvd_scratchpad_size<T>, q, itype, jobz, uplo, n, lda, ldb); } }; template <typename T> struct syhegvd_impl { void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, std::int64_t n, void *a, std::int64_t lda, void *b, std::int64_t ldb, void *w, void *device_ws, std::size_t device_ws_size, int *info) { using value_t = typename value_type_trait<T>::value_type; auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b)); auto device_ws_data = dpct::detail::get_memory(reinterpret_cast<T *>(device_ws)); auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w)); DISPATCH_FLOAT_FOR_CALCULATION(gvd, q, itype, jobz, uplo, n, a_data, lda, b_data, ldb, w_data, device_ws_data, device_ws_size); dpct::detail::dpct_memset(q, info, 0, sizeof(int)); } }; oneapi::mkl::compz job2compz(const oneapi::mkl::job &job) { oneapi::mkl::compz ret; if (job == oneapi::mkl::job::novec) { ret = oneapi::mkl::compz::novectors; } else if (job == oneapi::mkl::job::vec) { ret = oneapi::mkl::compz::vectors; } else { throw std::runtime_error("the job type is unsupported"); } return ret; } template <typename T> struct syheev_scratchpad_size_impl { void operator()(sycl::queue &q, oneapi::mkl::compz jobz, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda, std::size_t &device_ws_size) { #ifndef __INTEL_MKL__ throw std::runtime_error( "The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(ev_scratchpad_size<T>, q, jobz, uplo, n, lda); #endif } }; template <typename T> struct syheev_impl { void operator()(sycl::queue &q, oneapi::mkl::compz jobz, oneapi::mkl::uplo uplo, std::int64_t n, void *a, std::int64_t lda, void *w, void *device_ws, std::size_t device_ws_size, int *info) { #ifndef __INTEL_MKL__ throw std::runtime_error( "The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else using value_t = typename value_type_trait<T>::value_type; auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto device_ws_data = dpct::detail::get_memory(reinterpret_cast<T *>(device_ws)); auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w)); DISPATCH_FLOAT_FOR_CALCULATION(ev, q, jobz, uplo, n, a_data, lda, w_data, device_ws_data, device_ws_size); dpct::detail::dpct_memset(q, info, 0, sizeof(int)); #endif } }; template <typename T> struct syheevd_scratchpad_size_impl { void operator()(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, std::int64_t n, library_data_t a_type, std::int64_t lda, std::size_t &device_ws_size) { DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(evd_scratchpad_size<T>, q, jobz, uplo, n, lda); } }; template <typename T> struct syheevd_impl { void operator()(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, void *w, void *device_ws, std::size_t device_ws_size, int *info) { using value_t = typename value_type_trait<T>::value_type; auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto device_ws_data = dpct::detail::get_memory(reinterpret_cast<T *>(device_ws)); auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w)); DISPATCH_FLOAT_FOR_CALCULATION(evd, q, jobz, uplo, n, a_data, lda, w_data, device_ws_data, device_ws_size); dpct::detail::dpct_memset(q, info, 0, sizeof(int)); } }; #undef DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE #undef DISPATCH_FLOAT_FOR_CALCULATION template <typename T> struct trtri_scratchpad_size_impl { void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, oneapi::mkl::diag diag, std::int64_t n, library_data_t a_type, std::int64_t lda, std::size_t &device_ws_size) { #ifndef __INTEL_MKL__ throw std::runtime_error( "The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else device_ws_size = oneapi::mkl::lapack::trtri_scratchpad_size<T>(q, uplo, diag, n, lda); #endif } }; template <typename T> struct trtri_impl { void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, oneapi::mkl::diag diag, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, void *device_ws, std::size_t device_ws_size, int *info) { #ifndef __INTEL_MKL__ throw std::runtime_error( "The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a)); auto device_ws_data = dpct::detail::get_memory(reinterpret_cast<T *>(device_ws)); oneapi::mkl::lapack::trtri(q, uplo, diag, n, a_data, lda, device_ws_data, device_ws_size); dpct::detail::dpct_memset(q, info, 0, sizeof(int)); #endif } }; } // namespace detail /// Computes the size of workspace memory of getrf function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] m The number of rows in the matrix A. /// \param [in] n The number of columns in the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [out] device_ws_size The workspace size in bytes. /// \param [out] host_ws_size The host workspace size in bytes. Currently the /// value is always zero. inline int getrf_scratchpad_size(sycl::queue &q, std::int64_t m, std::int64_t n, library_data_t a_type, std::int64_t lda, std::size_t *device_ws_size, std::size_t *host_ws_size = nullptr) { if (host_ws_size) *host_ws_size = 0; std::size_t device_ws_size_tmp; int ret = detail::lapack_shim<detail::getrf_scratchpad_size_impl>( q, a_type, nullptr, "getrf_scratchpad_size", q, m, n, a_type, lda, device_ws_size_tmp); *device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type); return ret; } /// Computes the LU factorization of a general m-by-n matrix. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q The queue where the routine should be executed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] m The number of rows in the matrix A. /// \param [in] n The number of columns in the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in, out] a The input matrix A. Overwritten by L and U. The unit /// diagonal elements of L are not stored. /// \param [in] lda The leading dimension of the matrix A. /// \param [out] ipiv The pivot indices. If \p ipiv is nullptr, non-pivoting /// LU factorization is computed. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The workspace size in bytes. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. inline int getrf(sycl::queue &q, std::int64_t m, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, std::int64_t *ipiv, void *device_ws, std::size_t device_ws_size, int *info) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else std::size_t device_ws_size_in_element_number = detail::byte_to_element_number(device_ws_size, a_type); if (ipiv == nullptr) { return detail::lapack_shim<detail::getrfnp_impl>( q, a_type, info, "getrfnp_batch", q, m, n, a_type, a, lda, ipiv, device_ws, device_ws_size_in_element_number, info); } return detail::lapack_shim<detail::getrf_impl>( q, a_type, info, "getrf", q, m, n, a_type, a, lda, ipiv, device_ws, device_ws_size_in_element_number, info); #endif } /// Solves a system of linear equations with a LU-factored square coefficient /// matrix, with multiple right-hand sides. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q The queue where the routine should be executed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] trans Indicates the form of the linear equation. /// \param [in] n The order of the matrix A and the number of rows in matrix B. /// \param [in] nrhs The number of right hand sides. /// \param [in] a_type The data type of the matrix A. /// \param [in] a The input matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] ipiv The pivot indices. /// \param [in] b_type The data type of the matrix B. /// \param [in, out] b The matrix B, whose columns are the right-hand sides /// for the systems of equations. /// \param [in] ldb The leading dimension of the matrix B. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. inline int getrs(sycl::queue &q, oneapi::mkl::transpose trans, std::int64_t n, std::int64_t nrhs, library_data_t a_type, void *a, std::int64_t lda, std::int64_t *ipiv, library_data_t b_type, void *b, std::int64_t ldb, int *info) { return detail::lapack_shim<detail::getrs_impl>( q, a_type, info, "getrs_scratchpad_size/getrs", q, trans, n, nrhs, a_type, a, lda, ipiv, b_type, b, ldb, info); } /// Computes the size of workspace memory of geqrf function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] m The number of rows in the matrix A. /// \param [in] n The number of columns in the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [out] device_ws_size The device workspace size in bytes. /// \param [out] host_ws_size The host workspace size in bytes. Currently the /// value is always zero. inline int geqrf_scratchpad_size(sycl::queue &q, std::int64_t m, std::int64_t n, library_data_t a_type, std::int64_t lda, std::size_t *device_ws_size, std::size_t *host_ws_size = nullptr) { if (host_ws_size) *host_ws_size = 0; std::size_t device_ws_size_tmp; int ret = detail::lapack_shim<detail::geqrf_scratchpad_size_impl>( q, a_type, nullptr, "geqrf_scratchpad_size", q, m, n, a_type, lda, device_ws_size_tmp); *device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type); return ret; } /// Computes the QR factorization of a general m-by-n matrix. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q The queue where the routine should be executed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] m The number of rows in the matrix A. /// \param [in] n The number of columns in the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in, out] a The input matrix A. Overwritten by the factorization data. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] tau_type The data type of the array tau. /// \param [in] tau The array contains scalars that define elementary reflectors /// for the matrix Q in its decomposition in a product of elementary reflectors. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The workspace size in bytes. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. inline int geqrf(sycl::queue &q, std::int64_t m, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, library_data_t tau_type, void *tau, void *device_ws, std::size_t device_ws_size, int *info) { std::size_t device_ws_size_in_element_number = detail::byte_to_element_number(device_ws_size, a_type); return detail::lapack_shim<detail::geqrf_impl>( q, a_type, info, "geqrf", q, m, n, a_type, a, lda, tau_type, tau, device_ws, device_ws_size_in_element_number, info); } /// Computes the size of workspace memory of gesvd function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobu Must be 'A' (representing jobsvd::vectors), 'S' /// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N' /// (representing jobsvd::novec). /// \param [in] jobvt Must be 'A' (representing jobsvd::vectors), 'S' /// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N' /// (representing jobsvd::novec). /// \param [in] m The number of rows in the matrix A. /// \param [in] n The number of columns in the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] u_type The data type of the matrix U. /// \param [in] ldu The leading dimension of the matrix U. /// \param [in] vt_type The data type of the matrix VT. /// \param [in] ldvt The leading dimension of the matrix VT. /// \param [out] device_ws_size The device workspace size in bytes. /// \param [out] host_ws_size The host workspace size in bytes. Currently the /// value is always zero. inline int gesvd_scratchpad_size(sycl::queue &q, signed char jobu, signed char jobvt, std::int64_t m, std::int64_t n, library_data_t a_type, std::int64_t lda, library_data_t u_type, std::int64_t ldu, library_data_t vt_type, std::int64_t ldvt, std::size_t *device_ws_size, std::size_t *host_ws_size = nullptr) { oneapi::mkl::jobsvd jobu_enum = detail::char2jobsvd(jobu); oneapi::mkl::jobsvd jobvt_enum = detail::char2jobsvd(jobvt); if (host_ws_size) *host_ws_size = 0; std::size_t device_ws_size_tmp; int ret = detail::lapack_shim<detail::gesvd_scratchpad_size_impl>( q, a_type, nullptr, "gesvd_scratchpad_size", q, jobu_enum, jobvt_enum, m, n, a_type, lda, u_type, ldu, vt_type, ldvt, device_ws_size_tmp); *device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type); return ret; } /// Computes the size of workspace memory of gesvd function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::vec or job::novec /// \param [in] all_vec Only have effects when \param jobz is job::vec.If the /// value is zero, all m columns of U are returned in the matrix U, otherwise /// the first min( \param m, \param n ) columns of U (the left singular vectors) /// are returned in the matrix U. /// \param [in] m The number of rows in the matrix A. /// \param [in] n The number of columns in the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] u_type The data type of the matrix U. /// \param [in] ldu The leading dimension of the matrix U. /// \param [in] vt_type The data type of the matrix VT. /// \param [in] ldvt The leading dimension of the matrix VT. /// \param [out] device_ws_size The device workspace size as a number of /// elements of type \param a_type. /// \param [out] host_ws_size The host workspace size as a number of elements /// of type \param a_type. Currently the value is always zero. inline int gesvd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz, std::int64_t all_vec, std::int64_t m, std::int64_t n, library_data_t a_type, std::int64_t lda, library_data_t u_type, std::int64_t ldu, library_data_t vt_type, std::int64_t ldvt, int *device_ws_size, std::size_t *host_ws_size = nullptr) { if (host_ws_size) *host_ws_size = 0; oneapi::mkl::jobsvd jobu; oneapi::mkl::jobsvd jobvt; if (jobz == oneapi::mkl::job::vec) { if (all_vec) { jobu = jobvt = oneapi::mkl::jobsvd::somevec; } else { jobu = jobvt = oneapi::mkl::jobsvd::vectors; } } else if (jobz == oneapi::mkl::job::novec) { jobu = jobvt = oneapi::mkl::jobsvd::novec; } else { throw std::runtime_error("the job type is unsupported"); } std::size_t device_ws_size_64; int ret = detail::lapack_shim<detail::gesvd_scratchpad_size_impl>( q, a_type, nullptr, "gesvd_scratchpad_size", q, jobu, jobvt, m, n, a_type, lda, u_type, ldu, vt_type, ldvt, device_ws_size_64); *device_ws_size = device_ws_size_64; return ret; } /// Computes the size of workspace memory of gesvd function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobu Must be 'A' (representing jobsvd::vectors), 'S' /// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N' /// (representing jobsvd::novec). /// \param [in] jobvt Must be 'A' (representing jobsvd::vectors), 'S' /// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N' /// (representing jobsvd::novec). /// \param [in] m The number of rows in the matrix A. /// \param [in] n The number of columns in the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in, out] a The input matrix A and it will be overwritten according /// to \p jobu and \p jobvt. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] s_type The data type of the matrix S. /// \param [out] s The output matrix S. /// \param [in] u_type The data type of the matrix U. /// \param [out] u The output matrix U. /// \param [in] ldu The leading dimension of the matrix U. /// \param [in] vt_type The data type of the matrix VT. /// \param [out] vt The output matrix VT. /// \param [in] ldvt The leading dimension of the matrix VT. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The workspace size in bytes. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. inline int gesvd(sycl::queue &q, signed char jobu, signed char jobvt, std::int64_t m, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, library_data_t s_type, void *s, library_data_t u_type, void *u, std::int64_t ldu, library_data_t vt_type, void *vt, std::int64_t ldvt, void *device_ws, std::size_t device_ws_size, int *info) { oneapi::mkl::jobsvd jobu_enum = detail::char2jobsvd(jobu); oneapi::mkl::jobsvd jobvt_enum = detail::char2jobsvd(jobvt); std::size_t device_ws_size_in_element_number = detail::byte_to_element_number(device_ws_size, a_type); return detail::lapack_shim<detail::gesvd_impl>( q, a_type, info, "gesvd", q, jobu_enum, jobvt_enum, m, n, a_type, a, lda, s_type, s, u_type, u, ldu, vt_type, vt, ldvt, device_ws, device_ws_size_in_element_number, info); } /// Computes the size of workspace memory of gesvd function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::vec or job::novec. /// \param [in] all_vec Only have effects when \param jobz is job::vec.If the /// value is zero, all m columns of U are returned in the matrix U, otherwise /// the first min( \param m, \param n ) columns of U (the left singular vectors) /// are returned in the matrix U. /// \param [in] m The number of rows in the matrix A. /// \param [in] n The number of columns in the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in, out] a The input matrix A and it will be overwritten according /// to \p jobu and \p jobvt. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] s_type The data type of the matrix S. /// \param [out] s The output matrix S. /// \param [in] u_type The data type of the matrix U. /// \param [out] u The output matrix U. /// \param [in] ldu The leading dimension of the matrix U. /// \param [in] vt_type The data type of the matrix VT. /// \param [out] vt The output matrix VT. /// \param [in] ldvt The leading dimension of the matrix VT. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The device workspace size as a number of /// elements of type \param a_type. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. inline int gesvd(sycl::queue &q, oneapi::mkl::job jobz, std::int64_t all_vec, std::int64_t m, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, library_data_t s_type, void *s, library_data_t u_type, void *u, std::int64_t ldu, library_data_t vt_type, void *vt, std::int64_t ldvt, void *device_ws, std::size_t device_ws_size, int *info) { oneapi::mkl::jobsvd jobu; oneapi::mkl::jobsvd jobvt; if (jobz == oneapi::mkl::job::vec) { if (all_vec) { jobu = jobvt = oneapi::mkl::jobsvd::somevec; } else { jobu = jobvt = oneapi::mkl::jobsvd::vectors; } } else if (jobz == oneapi::mkl::job::novec) { jobu = jobvt = oneapi::mkl::jobsvd::novec; } else { throw std::runtime_error("the job type is unsupported"); } detail::lapack_shim<detail::gesvd_conj_impl>( q, a_type, info, "gesvd", q, jobu, jobvt, m, n, a_type, a, lda, s_type, s, u_type, u, ldu, vt_type, vt, ldvt, device_ws, device_ws_size, info); return 0; } /// Computes the size of workspace memory of potrf function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The number of columns in the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [out] device_ws_size The device workspace size in bytes. /// \param [out] host_ws_size The host workspace size in bytes. Currently the /// value is always zero. inline int potrf_scratchpad_size(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n, library_data_t a_type, std::int64_t lda, std::size_t *device_ws_size, std::size_t *host_ws_size = nullptr) { if (host_ws_size) *host_ws_size = 0; std::size_t device_ws_size_tmp; int ret = detail::lapack_shim<detail::potrf_scratchpad_size_impl>( q, a_type, nullptr, "potrf_scratchpad_size", q, uplo, n, a_type, lda, device_ws_size_tmp); *device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type); return ret; } /// Computes the Cholesky factorization of a symmetric (Hermitian) /// positive-definite matrix. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q The queue where the routine should be executed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The number of columns in the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in, out] a The input matrix A. Overwritten by the Cholesky factor U /// or L, as specified by \p uplo. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The workspace size in bytes. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. inline int potrf(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, void *device_ws, std::size_t device_ws_size, int *info) { std::size_t device_ws_size_in_element_number = detail::byte_to_element_number(device_ws_size, a_type); return detail::lapack_shim<detail::potrf_impl>( q, a_type, info, "potrf", q, uplo, n, a_type, a, lda, device_ws, device_ws_size_in_element_number, info); } /// Solves a system of linear equations with a Cholesky-factored symmetric /// (Hermitian) positive-definite coefficient matrix. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q The queue where the routine should be executed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A and the number of rows in matrix B. /// \param [in] nrhs The number of right hand sides. /// \param [in] a_type The data type of the matrix A. /// \param [in, out] a The input matrix A. Overwritten by the Cholesky factor U /// or L, as specified by \p uplo. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] b_type The data type of the matrix B. /// \param [in, out] b The matrix B, whose columns are the right-hand sides /// for the systems of equations. /// \param [in] ldb The leading dimension of the matrix B. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. inline int potrs(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t nrhs, library_data_t a_type, void *a, std::int64_t lda, library_data_t b_type, void *b, std::int64_t ldb, int *info) { return detail::lapack_shim<detail::potrs_impl>( q, a_type, info, "potrs_scratchpad_size/potrs", q, uplo, n, nrhs, a_type, a, lda, b_type, b, ldb, info); } /// Computes the size of workspace memory of syevx/heevx function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] range Must be rangev::all, rangev::values or uplo::indices. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] vl If range == rangev::values, the lower bound of the interval /// to be searched for eigenvalues /// \param [in] vu If range == rangev::values, the upper bound of the interval /// to be searched for eigenvalues /// \param [in] il If range == rangev::indices, the indices of the smallest /// eigenvalue to be returned. /// \param [in] iu If range == rangev::indices, the indices of the largest /// eigenvalue to be returned. /// \param [in] w_type The data type of the eigenvalues. /// \param [out] device_ws_size The device workspace size in bytes. /// \param [out] host_ws_size The host workspace size in bytes. Currently the /// value is always zero. inline int syheevx_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, std::int64_t n, library_data_t a_type, std::int64_t lda, void *vl, void *vu, std::int64_t il, std::int64_t iu, library_data_t w_type, std::size_t *device_ws_size, std::size_t *host_ws_size = nullptr) { if (host_ws_size) *host_ws_size = 0; oneapi::mkl::compz compz_jobz = detail::job2compz(jobz); std::size_t device_ws_size_tmp; int ret = detail::lapack_shim<detail::syheevx_scratchpad_size_impl>( q, a_type, nullptr, "syevx_scratchpad_size/heevx_scratchpad_size", q, compz_jobz, range, uplo, n, lda, vl, vu, il, iu, device_ws_size_tmp); *device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type); return ret; } /// Computes selected eigenvalues and, optionally, eigenvectors of a /// symmetric/Hermitian matrix. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] range Must be rangev::all, rangev::values or uplo::indices. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in, out] a The input matrix A. On exit, the lower or upper triangle is /// overwritten. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] vl If range == rangev::values, the lower bound of the interval /// to be searched for eigenvalues /// \param [in] vu If range == rangev::values, the upper bound of the interval /// to be searched for eigenvalues /// \param [in] il If range == rangev::indices, the indices of the smallest /// eigenvalue to be returned. /// \param [in] iu If range == rangev::indices, the indices of the largest /// eigenvalue to be returned. /// \param [out] m The total number of eigenvalues found. /// \param [in] w_type The data type of the eigenvalues. /// \param [out] w The eigenvalues of the matrix A in ascending order. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The workspace size in bytes. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. inline int syheevx(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, void *vl, void *vu, std::int64_t il, std::int64_t iu, std::int64_t *m, library_data_t w_type, void *w, void *device_ws, std::size_t device_ws_size, int *info) { std::size_t device_ws_size_in_element_number = detail::byte_to_element_number(device_ws_size, a_type); oneapi::mkl::compz compz_jobz = detail::job2compz(jobz); int ret = detail::lapack_shim<detail::syheevx_impl>( q, a_type, info, "syevx/heevx", q, compz_jobz, range, uplo, n, a_type, a, lda, vl, vu, il, iu, m, w_type, w, device_ws, device_ws_size_in_element_number, info); q.wait(); return ret; } /// Computes the size of workspace memory of syevx/heevx function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] range Must be rangev::all, rangev::values or uplo::indices. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] vl If range == rangev::values, the lower bound of the interval /// to be searched for eigenvalues /// \param [in] vu If range == rangev::values, the upper bound of the interval /// to be searched for eigenvalues /// \param [in] il If range == rangev::indices, the indices of the smallest /// eigenvalue to be returned. /// \param [in] iu If range == rangev::indices, the indices of the largest /// eigenvalue to be returned. /// \param [out] device_ws_size The device workspace size as a number of /// elements of type \tparam T. template <typename T, typename ValueT> inline int syheevx_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, int n, int lda, ValueT vl, ValueT vu, int il, int iu, int *device_ws_size) { oneapi::mkl::compz compz_jobz = detail::job2compz(jobz); std::size_t device_ws_size_tmp; int ret = detail::lapack_shim<detail::syheevx_scratchpad_size_impl>( q, detail::get_library_data_t_from_type<T>(), nullptr, "syevx_scratchpad_size/heevx_scratchpad_size", q, compz_jobz, range, uplo, n, lda, &vl, &vu, il, iu, device_ws_size_tmp); *device_ws_size = (int)device_ws_size_tmp; return ret; } /// Computes selected eigenvalues and, optionally, eigenvectors of a /// symmetric/Hermitian matrix. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] range Must be rangev::all, rangev::values or uplo::indices. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in, out] a The input matrix A. On exit, the lower or upper triangle is /// overwritten. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] vl If range == rangev::values, the lower bound of the interval /// to be searched for eigenvalues /// \param [in] vu If range == rangev::values, the upper bound of the interval /// to be searched for eigenvalues /// \param [in] il If range == rangev::indices, the indices of the smallest /// eigenvalue to be returned. /// \param [in] iu If range == rangev::indices, the indices of the largest /// eigenvalue to be returned. /// \param [out] m The total number of eigenvalues found. /// \param [out] w The eigenvalues of the matrix A in ascending order. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The device workspace size as a number of /// elements of type \tparam T. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. template <typename T, typename ValueT> inline int syheevx(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, int n, T *a, int lda, ValueT vl, ValueT vu, int il, int iu, int *m, ValueT *w, T *device_ws, int device_ws_size, int *info) { oneapi::mkl::compz compz_jobz = detail::job2compz(jobz); std::int64_t m64; int ret = detail::lapack_shim<detail::syheevx_impl>( q, detail::get_library_data_t_from_type<T>(), info, "syevx/heevx", q, compz_jobz, range, uplo, n, detail::get_library_data_t_from_type<T>(), a, lda, &vl, &vu, il, iu, &m64, detail::get_library_data_t_from_type<ValueT>(), w, device_ws, device_ws_size, info); q.wait(); *m = (int)m64; return ret; } /// Computes the size of workspace memory of sygvx/hegvx function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] itype Must be 1, 2 or 3. /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] range Must be rangev::all, rangev::values or uplo::indices. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] ldb The leading dimension of the matrix B. /// \param [in] vl If range == rangev::values, the lower bound of the interval /// to be searched for eigenvalues /// \param [in] vu If range == rangev::values, the upper bound of the interval /// to be searched for eigenvalues /// \param [in] il If range == rangev::indices, the indices of the smallest /// eigenvalue to be returned. /// \param [in] iu If range == rangev::indices, the indices of the largest /// eigenvalue to be returned. /// \param [in] device_ws_size The device workspace size as a number of /// elements of type \tparam T. template <typename T, typename ValueT> inline int syhegvx_scratchpad_size(sycl::queue &q, int itype, oneapi::mkl::job jobz, oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, int n, int lda, int ldb, ValueT vl, ValueT vu, int il, int iu, int *device_ws_size) { oneapi::mkl::compz compz_jobz = detail::job2compz(jobz); std::size_t device_ws_size_tmp; int ret = detail::lapack_shim<detail::syhegvx_scratchpad_size_impl>( q, detail::get_library_data_t_from_type<T>(), nullptr, "sygvx_scratchpad_size/hegvx_scratchpad_size", q, itype, compz_jobz, range, uplo, n, lda, ldb, &vl, &vu, il, iu, device_ws_size_tmp); *device_ws_size = (int)device_ws_size_tmp; return ret; } /// Computes selected eigenvalues and, optionally, eigenvectors of a real /// generalized symmetric/Hermitian definite eigenproblem. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] itype Must be 1, 2 or 3. /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] range Must be rangev::all, rangev::values or uplo::indices. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in, out] a The input matrix A. On exit, the lower or upper triangle is /// overwritten. /// \param [in] lda The leading dimension of the matrix A. /// \param [in, out] b The input matrix B. /// \param [in] ldb The leading dimension of the matrix B. /// \param [in] vl If range == rangev::values, the lower bound of the interval /// to be searched for eigenvalues /// \param [in] vu If range == rangev::values, the upper bound of the interval /// to be searched for eigenvalues /// \param [in] il If range == rangev::indices, the indices of the smallest /// eigenvalue to be returned. /// \param [in] iu If range == rangev::indices, the indices of the largest /// eigenvalue to be returned. /// \param [out] m The total number of eigenvalues found. /// \param [out] w The eigenvalues of the matrix A in ascending order. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The device workspace size as a number of /// elements of type \tparam T. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. template <typename T, typename ValueT> inline int syhegvx(sycl::queue &q, int itype, oneapi::mkl::job jobz, oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb, ValueT vl, ValueT vu, int il, int iu, int *m, ValueT *w, T *device_ws, int device_ws_size, int *info) { oneapi::mkl::compz compz_jobz = detail::job2compz(jobz); std::int64_t m64; int ret = detail::lapack_shim<detail::syhegvx_impl>( q, detail::get_library_data_t_from_type<T>(), info, "sygvx/hegvx", q, itype, compz_jobz, range, uplo, n, a, lda, b, ldb, &vl, &vu, il, iu, &m64, w, device_ws, device_ws_size, info); q.wait(); *m = (int)m64; return ret; } /// Computes the size of workspace memory of sygvd/hegvd function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] itype Must be 1, 2 or 3. /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] ldb The leading dimension of the matrix B. /// \param [in] device_ws_size The device workspace size as a number of /// elements of type \tparam T. template <typename T> inline int syhegvd_scratchpad_size(sycl::queue &q, int itype, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, int n, int lda, int ldb, int *device_ws_size) { std::size_t device_ws_size_tmp; int ret = detail::lapack_shim<detail::syhegvd_scratchpad_size_impl>( q, detail::get_library_data_t_from_type<T>(), nullptr, "sygvd_scratchpad_size/hegvd_scratchpad_size", q, itype, jobz, uplo, n, lda, ldb, device_ws_size_tmp); *device_ws_size = (int)device_ws_size_tmp; return ret; } /// Computes all eigenvalues and, optionally, eigenvectors of a real generalized /// symmetric/Hermitian definite eigenproblem using a divide and conquer method. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] itype Must be 1, 2 or 3. /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in, out] a The input matrix A. On exit, it is overwritten by eigenvectors. /// \param [in] lda The leading dimension of the matrix A. /// \param [in, out] b The input matrix B. /// \param [in] ldb The leading dimension of the matrix B. /// \param [out] w The eigenvalues of the matrix A in ascending order. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The device workspace size as a number of /// elements of type \tparam T. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. template <typename T, typename ValueT> inline int syhegvd(sycl::queue &q, int itype, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb, ValueT *w, T *device_ws, int device_ws_size, int *info) { return detail::lapack_shim<detail::syhegvd_impl>( q, detail::get_library_data_t_from_type<T>(), info, "sygvd/hegvd", q, itype, jobz, uplo, n, a, lda, b, ldb, w, device_ws, device_ws_size, info); } /// Computes the size of workspace memory of syev/heev function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [out] device_ws_size The device workspace size as a number of /// elements of type \tparam T. template <typename T> inline int syheev_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, int n, int lda, int *device_ws_size) { std::size_t device_ws_size_tmp; oneapi::mkl::compz compz_jobz = detail::job2compz(jobz); int ret = detail::lapack_shim<detail::syheev_scratchpad_size_impl>( q, detail::get_library_data_t_from_type<T>(), nullptr, "syev_scratchpad_size/heev_scratchpad_size", q, compz_jobz, uplo, n, lda, device_ws_size_tmp); *device_ws_size = (int)device_ws_size_tmp; return ret; } /// Computes all eigenvalues and, optionally, eigenvectors of a real symmetric /// or Hermitian matrix. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in, out] a The input matrix A. On exit, it is overwritten by /// eigenvectors. /// \param [in] lda The leading dimension of the matrix A. /// \param [out] w The eigenvalues of the matrix A in ascending order. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The device workspace size as a number of /// elements of type \tparam T. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. template <typename T, typename ValueT> inline int syheev(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, int n, T *a, int lda, ValueT *w, T *device_ws, int device_ws_size, int *info) { oneapi::mkl::compz compz_jobz = detail::job2compz(jobz); return detail::lapack_shim<detail::syheev_impl>( q, detail::get_library_data_t_from_type<T>(), info, "syev/heev", q, compz_jobz, uplo, n, a, lda, w, device_ws, device_ws_size, info); } /// Computes the size of workspace memory of syevd/heevd function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] w_type The data type of the eigenvalues. /// \param [out] device_ws_size The device workspace in bytes. /// \param [out] host_ws_size The host workspace size in bytes. Currently the /// value is always zero. inline int syheevd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, std::int64_t n, library_data_t a_type, std::int64_t lda, library_data_t w_type, std::size_t *device_ws_size, std::size_t *host_ws_size = nullptr) { if (host_ws_size) *host_ws_size = 0; std::size_t device_ws_size_tmp; int ret = detail::lapack_shim<detail::syheevd_scratchpad_size_impl>( q, a_type, nullptr, "syevd_scratchpad_size/heevd_scratchpad_size", q, jobz, uplo, n, a_type, lda, device_ws_size_tmp); *device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type); return ret; } /// Computes all eigenvalues and, optionally, all eigenvectors of a real /// symmetric or Hermitian matrix using divide and conquer algorithm. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in, out] a The input matrix A. On exit, it is overwritten by /// eigenvectors. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] w_type The data type of the eigenvalues. /// \param [out] w The eigenvalues of the matrix A in ascending order. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The workspace size in bytes. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. inline int syheevd(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, library_data_t w_type, void *w, void *device_ws, std::size_t device_ws_size, int *info) { std::size_t device_ws_size_in_element_number = detail::byte_to_element_number(device_ws_size, a_type); return detail::lapack_shim<detail::syheevd_impl>( q, a_type, info, "syevd/heevd", q, jobz, uplo, n, a_type, a, lda, w, device_ws, device_ws_size_in_element_number, info); } /// Computes the size of workspace memory of syevd/heevd function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] w_type The data type of the eigenvalues. /// \param [out] device_ws_size The device workspace size as a number of /// elements of type \tparam T. template <typename T> inline int syheevd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda, int *device_ws_size) { std::size_t device_ws_size_tmp; int ret = detail::lapack_shim<detail::syheevd_scratchpad_size_impl>( q, detail::get_library_data_t_from_type<T>(), nullptr, "syevd_scratchpad_size/heevd_scratchpad_size", q, jobz, uplo, n, detail::get_library_data_t_from_type<T>(), lda, device_ws_size_tmp); *device_ws_size = (int)device_ws_size_tmp; return ret; } /// Computes all eigenvalues and, optionally, all eigenvectors of a real /// symmetric or Hermitian matrix using divide and conquer algorithm. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] jobz Must be job::novec or job::vec. /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] n The order of the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in, out] a The input matrix A. On exit, it is overwritten by /// eigenvectors. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] w_type The data type of the eigenvalues. /// \param [out] w The eigenvalues of the matrix A in ascending order. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The workspace size as a number of /// elements of type \tparam T. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. template <typename T, typename ValueT> inline int syheevd(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo, std::int64_t n, T *a, std::int64_t lda, ValueT *w, T *device_ws, int device_ws_size, int *info) { return detail::lapack_shim<detail::syheevd_impl>( q, detail::get_library_data_t_from_type<T>(), info, "syevd/heevd", q, jobz, uplo, n, detail::get_library_data_t_from_type<T>(), a, lda, w, device_ws, device_ws_size, info); } /// Computes the size of workspace memory of trtri function. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] diag Must be diag::nonunit or diag::unit. /// \param [in] n The order of the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in] lda The leading dimension of the matrix A. /// \param [out] device_ws_size The device workspace in bytes. /// \param [out] host_ws_size The host workspace size in bytes. Currently the /// value is always zero. inline int trtri_scratchpad_size(sycl::queue &q, oneapi::mkl::uplo uplo, oneapi::mkl::diag diag, std::int64_t n, library_data_t a_type, std::int64_t lda, std::size_t *device_ws_size, std::size_t *host_ws_size = nullptr) { if (host_ws_size) *host_ws_size = 0; std::size_t device_ws_size_tmp; int ret = detail::lapack_shim<detail::trtri_scratchpad_size_impl>( q, a_type, nullptr, "trtri_scratchpad_size", q, uplo, diag, n, a_type, lda, device_ws_size_tmp); *device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type); return ret; } /// Computes the inverse of a triangular matrix. /// \return Returns 0 if no synchronous exception, otherwise returns 1. /// \param [in] q Device queue where computation will be performed. It must /// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is /// not defined). /// \param [in] uplo Must be uplo::upper or uplo::lower. /// \param [in] diag Must be diag::nonunit or diag::unit. /// \param [in] n The order of the matrix A. /// \param [in] a_type The data type of the matrix A. /// \param [in, out] a The input matrix A. On exit, it is overwritten by /// the inverse matrix of A. /// \param [in] lda The leading dimension of the matrix A. /// \param [in] device_ws The workspace. /// \param [in] device_ws_size The workspace size in bytes. /// \param [out] info If lapack synchronous exception is caught, the value /// returned from info() method of the exception is set to \p info. inline int trtri(sycl::queue &q, oneapi::mkl::uplo uplo, oneapi::mkl::diag diag, std::int64_t n, library_data_t a_type, void *a, std::int64_t lda, void *device_ws, std::size_t device_ws_size, int *info) { #ifdef DPCT_USM_LEVEL_NONE throw std::runtime_error("this API is unsupported when USM level is none"); #else std::size_t device_ws_size_in_element_number = detail::byte_to_element_number(device_ws_size, a_type); return detail::lapack_shim<detail::trtri_impl>( q, a_type, info, "trtri", q, uplo, diag, n, a_type, a, lda, device_ws, device_ws_size_in_element_number, info); #endif } } // namespace lapack } // namespace dpct #endif // __DPCT_LAPACK_UTILS_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/fft_utils.hpp
//==---- fft_utils.hpp ----------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_FFT_UTILS_HPP__ #define __DPCT_FFT_UTILS_HPP__ #include <sycl/sycl.hpp> #include <oneapi/mkl.hpp> #include <optional> #include <utility> #include "lib_common_utils.hpp" namespace dpct { namespace fft { /// An enumeration type to describe the FFT direction is forward or backward. enum fft_direction : int { forward = 0, backward }; /// An enumeration type to describe the types of FFT input and output data. enum fft_type : int { real_float_to_complex_float = 0, complex_float_to_real_float, real_double_to_complex_double, complex_double_to_real_double, complex_float_to_complex_float, complex_double_to_complex_double, }; /// A class to perform FFT calculation. class fft_engine { public: /// Default constructor. fft_engine() {} /// Commit the configuration to calculate n-D FFT. /// \param [in] exec_queue The queue where the calculation should be executed. /// \param [in] dim Dimension number of the data. /// \param [in] n Pointer to an array containing each dimension's size. /// \param [in] inembed Pointer to an array containing each dimension's size /// of the embedded input data. /// \param [in] istride Stride size of the input data. /// \param [in] idist Distance between the two batches of the input data. /// \param [in] input_type Input data type. /// \param [in] onembed Pointer to an array containing each dimension's size /// of the embedded output data. /// \param [in] ostride Stride size of the output data. /// \param [in] odist Distance between the two batches of the output data. /// \param [in] output_type Output data type. /// \param [in] batch The number of FFT operations to perform. /// \param [out] scratchpad_size The workspace size required for this FFT. /// If this value is used to allocate memory, \p direction_and_placement need /// to be specified explicitly to get correct result. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If this value is specified, the direction parameter /// will be ignored in the fft_engine::compute function. If it is not set, /// forward direction(if current FFT is complex-to-complex) and out-of-place /// (false) are set by default. void commit(sycl::queue *exec_queue, int dim, long long *n, long long *inembed, long long istride, long long idist, library_data_t input_type, long long *onembed, long long ostride, long long odist, library_data_t output_type, long long batch, size_t *scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { _q = exec_queue; init<long long>(dim, n, inembed, istride, idist, input_type, onembed, ostride, odist, output_type, batch, direction_and_placement); if (scratchpad_size) { if (_is_estimate_call) *scratchpad_size = _workspace_estimate_bytes; else *scratchpad_size = _workspace_bytes; } } /// Commit the configuration to calculate n-D FFT. /// \param [in] exec_queue The queue where the calculation should be executed. /// \param [in] dim Dimension number of the data. /// \param [in] n Pointer to an array containing each dimension's size. /// \param [in] inembed Pointer to an array containing each dimension's size /// of the embedded input data. /// \param [in] istride Stride size of the input data. /// \param [in] idist Distance between the two batches of the input data. /// \param [in] input_type Input data type. /// \param [in] onembed Pointer to an array containing each dimension's size /// of the embedded output data. /// \param [in] ostride Stride size of the output data. /// \param [in] odist Distance between the two batches of the output data. /// \param [in] output_type Output data type. /// \param [in] batch The number of FFT operations to perform. /// \param [out] scratchpad_size The workspace size required for this FFT. /// If this value is used to allocate memory, \p direction_and_placement need /// to be specified explicitly to get correct result. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If this value is specified, the direction parameter /// will be ignored in the fft_engine::compute function. If it is not set, /// forward direction(if current FFT is complex-to-complex) and out-of-place /// (false) are set by default. void commit(sycl::queue *exec_queue, int dim, int *n, int *inembed, int istride, int idist, library_data_t input_type, int *onembed, int ostride, int odist, library_data_t output_type, int batch, size_t *scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { _q = exec_queue; init<int>(dim, n, inembed, istride, idist, input_type, onembed, ostride, odist, output_type, batch, direction_and_placement); if (scratchpad_size) { if (_is_estimate_call) *scratchpad_size = _workspace_estimate_bytes; else *scratchpad_size = _workspace_bytes; } } /// Commit the configuration to calculate n-D FFT. /// \param [in] exec_queue The queue where the calculation should be executed. /// \param [in] dim Dimension number of the data. /// \param [in] n Pointer to an array containing each dimension's size. /// \param [in] inembed Pointer to an array containing each dimension's size /// of the embedded input data. /// \param [in] istride Stride size of the input data. /// \param [in] idist Distance between the two batches of the input data. /// \param [in] onembed Pointer to an array containing each dimension's size /// of the embedded output data. /// \param [in] ostride Stride size of the output data. /// \param [in] odist Distance between the two batches of the output data. /// \param [in] type The FFT type. /// \param [in] batch The number of FFT operations to perform. /// \param [out] scratchpad_size The workspace size required for this FFT. /// If this value is used to allocate memory, \p direction_and_placement need /// to be specified explicitly to get correct result. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If this value is specified, the direction parameter /// will be ignored in the fft_engine::compute function. If it is not set, /// forward direction(if current FFT is complex-to-complex) and out-of-place /// (false) are set by default. void commit(sycl::queue *exec_queue, int dim, long long *n, long long *inembed, long long istride, long long idist, long long *onembed, long long ostride, long long odist, fft_type type, long long batch, size_t *scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { commit(exec_queue, dim, n, inembed, istride, idist, fft_type_to_data_type(type).first, onembed, ostride, odist, fft_type_to_data_type(type).second, batch, scratchpad_size, direction_and_placement); } /// Commit the configuration to calculate n-D FFT. /// \param [in] exec_queue The queue where the calculation should be executed. /// \param [in] dim Dimension number of the data. /// \param [in] n Pointer to an array containing each dimension's size. /// \param [in] inembed Pointer to an array containing each dimension's size /// of the embedded input data. /// \param [in] istride Stride size of the input data. /// \param [in] idist Distance between the two batches of the input data. /// \param [in] onembed Pointer to an array containing each dimension's size /// of the embedded output data. /// \param [in] ostride Stride size of the output data. /// \param [in] odist Distance between the two batches of the output data. /// \param [in] type The FFT type. /// \param [in] batch The number of FFT operations to perform. /// \param [out] scratchpad_size The workspace size required for this FFT. /// If this value is used to allocate memory, \p direction_and_placement need /// to be specified explicitly to get correct result. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If this value is specified, the direction parameter /// will be ignored in the fft_engine::compute function. If it is not set, /// forward direction(if current FFT is complex-to-complex) and out-of-place /// (false) are set by default. void commit(sycl::queue *exec_queue, int dim, int *n, int *inembed, int istride, int idist, int *onembed, int ostride, int odist, fft_type type, int batch, size_t *scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { commit(exec_queue, dim, n, inembed, istride, idist, fft_type_to_data_type(type).first, onembed, ostride, odist, fft_type_to_data_type(type).second, batch, scratchpad_size, direction_and_placement); } /// Commit the configuration to calculate 1-D FFT. /// \param [in] exec_queue The queue where the calculation should be executed. /// \param [in] n1 The size of the dimension of the data. /// \param [in] type The FFT type. /// \param [in] batch The number of FFT operations to perform. /// \param [out] scratchpad_size The workspace size required for this FFT. /// If this value is used to allocate memory, \p direction_and_placement need /// to be specified explicitly to get correct result. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If this value is specified, the direction parameter /// will be ignored in the fft_engine::compute function. If it is not set, /// forward direction(if current FFT is complex-to-complex) and out-of-place /// (false) are set by default. void commit(sycl::queue *exec_queue, int n1, fft_type type, int batch, size_t *scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { _q = exec_queue; _n.resize(1); _n[0] = n1; std::tie(_input_type, _output_type) = fft_type_to_data_type(type); _dim = 1; _batch = batch; _is_basic = true; if (direction_and_placement.has_value()) { _is_user_specified_dir_and_placement = true; _direction = direction_and_placement->first; _is_inplace = direction_and_placement->second; } config_and_commit_basic(); if (scratchpad_size) { if (_is_estimate_call) *scratchpad_size = _workspace_estimate_bytes; else *scratchpad_size = _workspace_bytes; } } /// Commit the configuration to calculate 2-D FFT. /// \param [in] exec_queue The queue where the calculation should be executed. /// \param [in] n2 The size of the 2nd dimension (outermost) of the data. /// \param [in] n1 The size of the 1st dimension (innermost) of the data. /// \param [in] type The FFT type. /// \param [out] scratchpad_size The workspace size required for this FFT. /// If this value is used to allocate memory, \p direction_and_placement need /// to be specified explicitly to get correct result. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If this value is specified, the direction parameter /// will be ignored in the fft_engine::compute function. If it is not set, /// forward direction(if current FFT is complex-to-complex) and out-of-place /// (false) are set by default. void commit(sycl::queue *exec_queue, int n2, int n1, fft_type type, size_t *scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { _q = exec_queue; _n.resize(2); _n[0] = n2; _n[1] = n1; std::tie(_input_type, _output_type) = fft_type_to_data_type(type); _dim = 2; _is_basic = true; if (direction_and_placement.has_value()) { _is_user_specified_dir_and_placement = true; _direction = direction_and_placement->first; _is_inplace = direction_and_placement->second; } config_and_commit_basic(); if (scratchpad_size) { if (_is_estimate_call) *scratchpad_size = _workspace_estimate_bytes; else *scratchpad_size = _workspace_bytes; } } /// Commit the configuration to calculate 3-D FFT. /// \param [in] exec_queue The queue where the calculation should be executed. /// \param [in] n3 The size of the 3rd dimension (outermost) of the data. /// \param [in] n2 The size of the 2nd dimension of the data. /// \param [in] n1 The size of the 1st dimension (innermost) of the data. /// \param [in] type The FFT type. /// \param [out] scratchpad_size The workspace size required for this FFT. /// If this value is used to allocate memory, \p direction_and_placement need /// to be specified explicitly to get correct result. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If this value is specified, the direction parameter /// will be ignored in the fft_engine::compute function. If it is not set, /// forward direction(if current FFT is complex-to-complex) and out-of-place /// (false) are set by default. void commit(sycl::queue *exec_queue, int n3, int n2, int n1, fft_type type, size_t *scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { _q = exec_queue; _n.resize(3); _n[0] = n3; _n[1] = n2; _n[2] = n1; std::tie(_input_type, _output_type) = fft_type_to_data_type(type); _dim = 3; _is_basic = true; if (direction_and_placement.has_value()) { _is_user_specified_dir_and_placement = true; _direction = direction_and_placement->first; _is_inplace = direction_and_placement->second; } config_and_commit_basic(); if (scratchpad_size) { if (_is_estimate_call) *scratchpad_size = _workspace_estimate_bytes; else *scratchpad_size = _workspace_bytes; } } /// Create the class for calculate 1-D FFT. /// \param [in] exec_queue The queue where the calculation should be executed. /// \param [in] n1 The size of the dimension of the data. /// \param [in] type The FFT type. /// \param [in] batch The number of FFT operations to perform. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If this value is specified, the direction parameter /// will be ignored in the fft_engine::compute function. If it is not set, /// forward direction(if current FFT is complex-to-complex) and out-of-place /// (false) are set by default. static fft_engine * create(sycl::queue *exec_queue, int n1, fft_type type, int batch, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { fft_engine *engine = new fft_engine(); engine->commit(exec_queue, n1, type, batch, nullptr, direction_and_placement); return engine; } /// Create the class for calculate 2-D FFT. /// \param [in] exec_queue The queue where the calculation should be executed. /// \param [in] n2 The size of the 2nd dimension (outermost) of the data. /// \param [in] n1 The size of the 1st dimension (innermost) of the data. /// \param [in] type The FFT type. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If this value is specified, the direction parameter /// will be ignored in the fft_engine::compute function. If it is not set, /// forward direction(if current FFT is complex-to-complex) and out-of-place /// (false) are set by default. static fft_engine * create(sycl::queue *exec_queue, int n2, int n1, fft_type type, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { fft_engine *engine = new fft_engine(); engine->commit(exec_queue, n2, n1, type, nullptr, direction_and_placement); return engine; } /// Create the class for calculate 3-D FFT. /// \param [in] exec_queue The queue where the calculation should be executed. /// \param [in] n3 The size of the 3rd dimension (outermost) of the data. /// \param [in] n2 The size of the 2nd dimension of the data. /// \param [in] n1 The size of the 1st dimension (innermost) of the data. /// \param [in] type The FFT type. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If this value is specified, the direction parameter /// will be ignored in the fft_engine::compute function. If it is not set, /// forward direction(if current FFT is complex-to-complex) and out-of-place /// (false) are set by default. static fft_engine * create(sycl::queue *exec_queue, int n3, int n2, int n1, fft_type type, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { fft_engine *engine = new fft_engine(); engine->commit(exec_queue, n3, n2, n1, type, nullptr, direction_and_placement); return engine; } /// Create the class for calculate n-D FFT. /// \param [in] exec_queue The queue where the calculation should be executed. /// \param [in] dim Dimension number of the data. /// \param [in] n Pointer to an array containing each dimension's size. /// \param [in] inembed Pointer to an array containing each dimension's size /// of the embedded input data. /// \param [in] istride Stride size of the input data. /// \param [in] idist Distance between the two batches of the input data. /// \param [in] onembed Pointer to an array containing each dimension's size /// of the embedded output data. /// \param [in] ostride Stride size of the output data. /// \param [in] odist Distance between the two batches of the output data. /// \param [in] type The FFT type. /// \param [in] batch The number of FFT operations to perform. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If this value is specified, the direction parameter /// will be ignored in the fft_engine::compute function. If it is not set, /// forward direction(if current FFT is complex-to-complex) and out-of-place /// (false) are set by default. static fft_engine * create(sycl::queue *exec_queue, int dim, int *n, int *inembed, int istride, int idist, int *onembed, int ostride, int odist, fft_type type, int batch, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { fft_engine *engine = new fft_engine(); engine->commit(exec_queue, dim, n, inembed, istride, idist, onembed, ostride, odist, type, batch, nullptr, direction_and_placement); return engine; } /// Create the class for calculate FFT without commit any config. static fft_engine *create() { fft_engine *engine = new fft_engine(); return engine; } /// Destroy the class for calculate FFT. /// \param [in] engine Pointer returned from fft_engine::craete. static void destroy(fft_engine *engine) { delete engine; } #ifdef __INTEL_MKL__ /// Estimates the workspace size for calculating n-D FFT. /// \param [in] dim Dimension number of the data. /// \param [in] n Pointer to an array containing each dimension's size. /// \param [in] inembed Pointer to an array containing each dimension's size /// of the embedded input data. /// \param [in] istride Stride size of the input data. /// \param [in] idist Distance between the two batches of the input data. /// \param [in] onembed Pointer to an array containing each dimension's size /// of the embedded output data. /// \param [in] ostride Stride size of the output data. /// \param [in] odist Distance between the two batches of the output data. /// \param [in] type The FFT type. /// \param [in] batch The number of FFT operations to perform. /// \param [out] estimated_scratchpad_size The estimated workspace size /// required for this FFT. If this value is used to allocate memory, /// \p direction_and_placement need to be specified explicitly to get correct /// result. /// \param [in] direction_and_placement Explicitly specify the FFT /// direction and placement info. If it is not set, forward direction(if /// current FFT is complex-to-complex) and out-of-place (false) are set by default. static void estimate_size(int dim, long long *n, long long *inembed, long long istride, long long idist, long long *onembed, long long ostride, long long odist, fft_type type, long long batch, size_t *estimated_scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { fft_engine *engine = fft_engine::create(); engine->_is_estimate_call = true; engine->commit(&dpct::get_default_queue(), dim, n, inembed, istride, idist, fft_type_to_data_type(type).first, onembed, ostride, odist, fft_type_to_data_type(type).second, batch, estimated_scratchpad_size, direction_and_placement); fft_engine::destroy(engine); } /// Estimates the workspace size for calculating n-D FFT. /// \param [in] dim Dimension number of the data. /// \param [in] n Pointer to an array containing each dimension's size. /// \param [in] inembed Pointer to an array containing each dimension's size /// of the embedded input data. /// \param [in] istride Stride size of the input data. /// \param [in] idist Distance between the two batches of the input data. /// \param [in] onembed Pointer to an array containing each dimension's size /// of the embedded output data. /// \param [in] ostride Stride size of the output data. /// \param [in] odist Distance between the two batches of the output data. /// \param [in] type The FFT type. /// \param [in] batch The number of FFT operations to perform. /// \param [out] estimated_scratchpad_size The estimated workspace size /// required for this FFT. If this value is used to allocate memory, /// \p direction_and_placement need to be specified explicitly to get correct /// result. /// \param [in] direction_and_placement Explicitly specify the FFT /// direction and placement info. If it is not set, forward direction(if /// current FFT is complex-to-complex) and out-of-place (false) are set by default. static void estimate_size(int dim, int *n, int *inembed, int istride, int idist, int *onembed, int ostride, int odist, fft_type type, int batch, size_t *estimated_scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { fft_engine *engine = fft_engine::create(); engine->_is_estimate_call = true; engine->commit(&dpct::get_default_queue(), dim, n, inembed, istride, idist, fft_type_to_data_type(type).first, onembed, ostride, odist, fft_type_to_data_type(type).second, batch, estimated_scratchpad_size, direction_and_placement); fft_engine::destroy(engine); } /// Estimates the workspace size for calculating 1-D FFT. /// \param [in] n1 The size of the dimension of the data. /// \param [in] type The FFT type. /// \param [in] batch The number of FFT operations to perform. /// \param [out] estimated_scratchpad_size The estimated workspace size /// required for this FFT. If this value is used to allocate memory, /// \p direction_and_placement need to be specified explicitly to get correct /// result. /// \param [in] direction_and_placement Explicitly specify the FFT direction /// and placement info. If it is not set, forward direction(if current FFT is /// complex-to-complex) and out-of-place (false) are set by default. static void estimate_size(int n1, fft_type type, int batch, size_t *estimated_scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { fft_engine *engine = fft_engine::create(); engine->_is_estimate_call = true; engine->commit(&dpct::get_default_queue(), n1, type, batch, estimated_scratchpad_size, direction_and_placement); fft_engine::destroy(engine); } /// Estimates the workspace size for calculating 2-D FFT. /// \param [in] n2 The size of the 2nd dimension (outermost) of the data. /// \param [in] n1 The size of the 1st dimension (innermost) of the data. /// \param [in] type The FFT type. /// \param [out] estimated_scratchpad_size The estimated workspace size /// required for this FFT. If this value is used to allocate memory, /// \p direction_and_placement need to be specified explicitly to get correct /// result. /// \param [in] direction_and_placement Explicitly specify the FFT /// direction and placement info. If it is not set, forward direction(if /// current FFT is complex-to-complex) and out-of-place (false) are set by default. static void estimate_size(int n2, int n1, fft_type type, size_t *estimated_scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { fft_engine *engine = fft_engine::create(); engine->_is_estimate_call = true; engine->commit(&dpct::get_default_queue(), n2, n1, type, estimated_scratchpad_size, direction_and_placement); fft_engine::destroy(engine); } /// Estimates the workspace size for calculating 3-D FFT. /// \param [in] n3 The size of the 3rd dimension (outermost) of the data. /// \param [in] n2 The size of the 2nd dimension of the data. /// \param [in] n1 The size of the 1st dimension (innermost) of the data. /// \param [in] type The FFT type. /// \param [out] estimated_scratchpad_size The estimated workspace size /// required for this FFT. If this value is used to allocate memory, /// \p direction_and_placement need to be specified explicitly to get correct /// result. /// \param [in] direction_and_placement Explicitly specify the FFT /// direction and placement info. If it is not set, forward direction(if /// current FFT is complex-to-complex) and out-of-place (false) are set by default. static void estimate_size(int n3, int n2, int n1, fft_type type, size_t *estimated_scratchpad_size, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement = std::nullopt) { fft_engine *engine = fft_engine::create(); engine->_is_estimate_call = true; engine->commit(&dpct::get_default_queue(), n3, n2, n1, type, estimated_scratchpad_size, direction_and_placement); fft_engine::destroy(engine); } #endif /// Execute the FFT calculation. /// \param [in] input Pointer to the input data. /// \param [out] output Pointer to the output data. /// \param [in] direction The FFT direction. template <typename input_t, typename output_t> void compute(input_t *input, output_t *output, fft_direction direction) { if (_input_type == library_data_t::complex_float && _output_type == library_data_t::complex_float) { compute_complex<float, oneapi::mkl::dft::precision::SINGLE>( (float *)input, (float *)output, direction); } else if (_input_type == library_data_t::complex_double && _output_type == library_data_t::complex_double) { compute_complex<double, oneapi::mkl::dft::precision::DOUBLE>( (double *)input, (double *)output, direction); } else if (_input_type == library_data_t::real_float && _output_type == library_data_t::complex_float) { _direction = direction; compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input, (float *)output); } else if (_input_type == library_data_t::complex_float && _output_type == library_data_t::real_float) { _direction = direction; compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input, (float *)output); } else if (_input_type == library_data_t::real_double && _output_type == library_data_t::complex_double) { _direction = direction; compute_real<double, oneapi::mkl::dft::precision::DOUBLE>( (double *)input, (double *)output); } else if (_input_type == library_data_t::complex_double && _output_type == library_data_t::real_double) { _direction = direction; compute_real<double, oneapi::mkl::dft::precision::DOUBLE>( (double *)input, (double *)output); } } template <> void compute(float *input, sycl::float2 *output, fft_direction direction) { _direction = direction; compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input, (float *)output); } template <> void compute(sycl::float2 *input, float *output, fft_direction direction) { _direction = direction; compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input, (float *)output); } template <> void compute(double *input, sycl::double2 *output, fft_direction direction) { _direction = direction; compute_real<double, oneapi::mkl::dft::precision::DOUBLE>((double *)input, (double *)output); } template <> void compute(sycl::double2 *input, double *output, fft_direction direction) { _direction = direction; compute_real<double, oneapi::mkl::dft::precision::DOUBLE>((double *)input, (double *)output); } template <> void compute(sycl::float2 *input, sycl::float2 *output, fft_direction direction) { compute_complex<float, oneapi::mkl::dft::precision::SINGLE>( (float *)input, (float *)output, direction); } template <> void compute(sycl::double2 *input, sycl::double2 *output, fft_direction direction) { compute_complex<double, oneapi::mkl::dft::precision::DOUBLE>( (double *)input, (double *)output, direction); } /// Setting the user's SYCL queue for calculation. /// \param [in] q Pointer to the SYCL queue. void set_queue(sycl::queue *q) { _q = q; } #ifdef __INTEL_MKL__ /// Setting whether to use external or internal workspace. /// \param [in] flag True means using internal workspace. False means using /// external workspace. void use_internal_workspace(bool flag = true) { _use_external_workspace = !flag; } /// Specify the external workspace. /// \param [in] ptr Pointer to the workspace. void set_workspace(void *ptr) { if (!_use_external_workspace) { return; } if (_input_type == library_data_t::complex_float && _output_type == library_data_t::complex_float) { if (_q->get_device().is_gpu()) { auto data = dpct::detail::get_memory(reinterpret_cast<float *>(ptr)); _desc_sc->set_workspace(data); } } else if (_input_type == library_data_t::complex_double && _output_type == library_data_t::complex_double) { if (_q->get_device().is_gpu()) { auto data = dpct::detail::get_memory(reinterpret_cast<double *>(ptr)); _desc_dc->set_workspace(data); } } else if ((_input_type == library_data_t::real_float && _output_type == library_data_t::complex_float) || (_input_type == library_data_t::complex_float && _output_type == library_data_t::real_float)) { if (_q->get_device().is_gpu()) { auto data = dpct::detail::get_memory(reinterpret_cast<float *>(ptr)); _desc_sr->set_workspace(data); } } else if ((_input_type == library_data_t::real_double && _output_type == library_data_t::complex_double) || (_input_type == library_data_t::complex_double && _output_type == library_data_t::real_double)) { if (_q->get_device().is_gpu()) { auto data = dpct::detail::get_memory(reinterpret_cast<double *>(ptr)); _desc_dr->set_workspace(data); } } else { throw sycl::exception(sycl::make_error_code(sycl::errc::invalid), "invalid fft type"); } } #endif /// Get the workspace size. /// \param [out] scratchpad_size Workspace size in bytes. void get_workspace_size(size_t *scratchpad_size) { if (scratchpad_size) { *scratchpad_size = _workspace_bytes; } } private: static std::pair<library_data_t, library_data_t> fft_type_to_data_type(fft_type type) { switch (type) { case fft_type::real_float_to_complex_float: { return std::make_pair(library_data_t::real_float, library_data_t::complex_float); } case fft_type::complex_float_to_real_float: { return std::make_pair(library_data_t::complex_float, library_data_t::real_float); } case fft_type::real_double_to_complex_double: { return std::make_pair(library_data_t::real_double, library_data_t::complex_double); } case fft_type::complex_double_to_real_double: { return std::make_pair(library_data_t::complex_double, library_data_t::real_double); } case fft_type::complex_float_to_complex_float: { return std::make_pair(library_data_t::complex_float, library_data_t::complex_float); } case fft_type::complex_double_to_complex_double: { return std::make_pair(library_data_t::complex_double, library_data_t::complex_double); } } } void config_and_commit_basic() { if (_input_type == library_data_t::complex_float && _output_type == library_data_t::complex_float) { _desc_sc = std::make_shared< oneapi::mkl::dft::descriptor<oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::COMPLEX>>(_n); std::int64_t distance = 1; for (auto i : _n) distance = distance * i; _fwd_dist = distance; _bwd_dist = distance; _desc_sc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, distance); _desc_sc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, distance); _desc_sc->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, _batch); #ifdef __INTEL_MKL__ if (_is_user_specified_dir_and_placement && _is_inplace) _desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_INPLACE); else _desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); if (_use_external_workspace) { if (_q->get_device().is_gpu()) { _desc_sc->set_value( oneapi::mkl::dft::config_param::WORKSPACE, oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL); } } if (_is_estimate_call) { if (_q->get_device().is_gpu()) { _desc_sc->get_value( oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES, &_workspace_estimate_bytes); } } else { _desc_sc->commit(*_q); if (_q->get_device().is_gpu()) { _desc_sc->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES, &_workspace_bytes); } } #else if (_is_user_specified_dir_and_placement && _is_inplace) _desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::INPLACE); else _desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::NOT_INPLACE); _desc_sc->commit(*_q); #endif } else if (_input_type == library_data_t::complex_double && _output_type == library_data_t::complex_double) { _desc_dc = std::make_shared< oneapi::mkl::dft::descriptor<oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::COMPLEX>>(_n); std::int64_t distance = 1; for (auto i : _n) distance = distance * i; _fwd_dist = distance; _bwd_dist = distance; _desc_dc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, distance); _desc_dc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, distance); _desc_dc->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, _batch); #ifdef __INTEL_MKL__ if (_is_user_specified_dir_and_placement && _is_inplace) _desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_INPLACE); else _desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); if (_use_external_workspace) { if (_q->get_device().is_gpu()) { _desc_dc->set_value( oneapi::mkl::dft::config_param::WORKSPACE, oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL); } } if (_is_estimate_call) { if (_q->get_device().is_gpu()) { _desc_dc->get_value( oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES, &_workspace_estimate_bytes); } } else { _desc_dc->commit(*_q); if (_q->get_device().is_gpu()) { _desc_dc->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES, &_workspace_bytes); } } #else if (_is_user_specified_dir_and_placement && _is_inplace) _desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::INPLACE); else _desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::NOT_INPLACE); _desc_dc->commit(*_q); #endif } else if ((_input_type == library_data_t::real_float && _output_type == library_data_t::complex_float) || (_input_type == library_data_t::complex_float && _output_type == library_data_t::real_float)) { _desc_sr = std::make_shared<oneapi::mkl::dft::descriptor< oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::REAL>>( _n); if (_input_type == library_data_t::real_float && _output_type == library_data_t::complex_float) _direction = fft_direction::forward; else _direction = fft_direction::backward; _desc_sr->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, _batch); #ifdef __INTEL_MKL__ if (_is_user_specified_dir_and_placement && _is_inplace) { _desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_INPLACE); set_stride_and_distance_basic<true>(_desc_sr); } else { _desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); set_stride_and_distance_basic<false>(_desc_sr); } if (_use_external_workspace) { if (_q->get_device().is_gpu()) { _desc_sr->set_value( oneapi::mkl::dft::config_param::WORKSPACE, oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL); } } if (_is_estimate_call) { if (_q->get_device().is_gpu()) { _desc_sr->get_value( oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES, &_workspace_estimate_bytes); } } else { _desc_sr->commit(*_q); if (_q->get_device().is_gpu()) { _desc_sr->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES, &_workspace_bytes); } } #else if (_is_user_specified_dir_and_placement && _is_inplace) { _desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::INPLACE); set_stride_and_distance_basic<true>(_desc_sr); } else { _desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::NOT_INPLACE); set_stride_and_distance_basic<false>(_desc_sr); } _desc_sr->commit(*_q); #endif } else if ((_input_type == library_data_t::real_double && _output_type == library_data_t::complex_double) || (_input_type == library_data_t::complex_double && _output_type == library_data_t::real_double)) { _desc_dr = std::make_shared<oneapi::mkl::dft::descriptor< oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::REAL>>( _n); if (_input_type == library_data_t::real_double && _output_type == library_data_t::complex_double) _direction = fft_direction::forward; else _direction = fft_direction::backward; _desc_dr->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, _batch); #ifdef __INTEL_MKL__ if (_is_user_specified_dir_and_placement && _is_inplace) { _desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_INPLACE); set_stride_and_distance_basic<true>(_desc_dr); } else { _desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); set_stride_and_distance_basic<false>(_desc_dr); } if (_use_external_workspace) { if (_q->get_device().is_gpu()) { _desc_dr->set_value( oneapi::mkl::dft::config_param::WORKSPACE, oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL); } } if (_is_estimate_call) { if (_q->get_device().is_gpu()) { _desc_dr->get_value( oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES, &_workspace_estimate_bytes); } } else { _desc_dr->commit(*_q); if (_q->get_device().is_gpu()) { _desc_dr->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES, &_workspace_bytes); } } #else if (_is_user_specified_dir_and_placement && _is_inplace) { _desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::INPLACE); set_stride_and_distance_basic<true>(_desc_dr); } else { _desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::NOT_INPLACE); set_stride_and_distance_basic<false>(_desc_dr); } _desc_dr->commit(*_q); #endif } else { throw sycl::exception(sycl::make_error_code(sycl::errc::invalid), "invalid fft type"); } } void config_and_commit_advanced() { #ifdef __INTEL_MKL__ #define CONFIG_AND_COMMIT(DESC, PREC, DOM, TYPE) \ { \ DESC = std::make_shared<oneapi::mkl::dft::descriptor< \ oneapi::mkl::dft::precision::PREC, oneapi::mkl::dft::domain::DOM>>( \ _n); \ set_stride_advanced(DESC); \ DESC->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _fwd_dist); \ DESC->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _bwd_dist); \ DESC->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, \ _batch); \ if (_is_user_specified_dir_and_placement && _is_inplace) \ DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \ DFTI_CONFIG_VALUE::DFTI_INPLACE); \ else \ DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \ DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); \ if (_use_external_workspace) { \ DESC->set_value(oneapi::mkl::dft::config_param::WORKSPACE, \ oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL); \ } \ if (_is_estimate_call) { \ if (_q->get_device().is_gpu()) { \ DESC->get_value( \ oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES, \ &_workspace_estimate_bytes); \ } \ } else { \ DESC->commit(*_q); \ if (_is_estimate_call) { \ DESC->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES, \ &_workspace_bytes); \ } \ } \ } #else #define CONFIG_AND_COMMIT(DESC, PREC, DOM, TYPE) \ { \ DESC = std::make_shared<oneapi::mkl::dft::descriptor< \ oneapi::mkl::dft::precision::PREC, oneapi::mkl::dft::domain::DOM>>( \ _n); \ set_stride_advanced(DESC); \ DESC->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _fwd_dist); \ DESC->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _bwd_dist); \ DESC->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, \ _batch); \ if (_is_user_specified_dir_and_placement && _is_inplace) \ DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \ oneapi::mkl::dft::config_value::INPLACE); \ else \ DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \ oneapi::mkl::dft::config_value::NOT_INPLACE); \ DESC->commit(*_q); \ } #endif if (_input_type == library_data_t::complex_float && _output_type == library_data_t::complex_float) { CONFIG_AND_COMMIT(_desc_sc, SINGLE, COMPLEX, float); } else if (_input_type == library_data_t::complex_double && _output_type == library_data_t::complex_double) { CONFIG_AND_COMMIT(_desc_dc, DOUBLE, COMPLEX, double); } else if ((_input_type == library_data_t::real_float && _output_type == library_data_t::complex_float) || (_input_type == library_data_t::complex_float && _output_type == library_data_t::real_float)) { CONFIG_AND_COMMIT(_desc_sr, SINGLE, REAL, float); } else if ((_input_type == library_data_t::real_double && _output_type == library_data_t::complex_double) || (_input_type == library_data_t::complex_double && _output_type == library_data_t::real_double)) { CONFIG_AND_COMMIT(_desc_dr, DOUBLE, REAL, double); } else { throw sycl::exception(sycl::make_error_code(sycl::errc::invalid), "invalid fft type"); } #undef CONFIG_AND_COMMIT } template <typename T> void init(int dim, T *n, T *inembed, T istride, T idist, library_data_t input_type, T *onembed, T ostride, T odist, library_data_t output_type, T batch, std::optional<std::pair<fft_direction, bool /*is_inplace*/>> direction_and_placement) { if (direction_and_placement.has_value()) { _is_user_specified_dir_and_placement = true; _direction = direction_and_placement->first; _is_inplace = direction_and_placement->second; } _n.resize(dim); _inembed.resize(dim); _onembed.resize(dim); _input_type = input_type; _output_type = output_type; for (int i = 0; i < dim; i++) { _n[i] = n[i]; } if (inembed && onembed) { for (int i = 0; i < dim; i++) { _inembed[i] = inembed[i]; _onembed[i] = onembed[i]; } _istride = istride; _ostride = ostride; if ((_input_type == library_data_t::real_float && _output_type == library_data_t::complex_float) || (_input_type == library_data_t::real_double && _output_type == library_data_t::complex_double)) { _fwd_dist = idist; _bwd_dist = odist; } else if ((_output_type == library_data_t::real_float && _input_type == library_data_t::complex_float) || (_output_type == library_data_t::real_double && _input_type == library_data_t::complex_double)) { _fwd_dist = odist; _bwd_dist = idist; } else { if (_is_user_specified_dir_and_placement && (_direction == fft_direction::backward)) { _fwd_dist = odist; _bwd_dist = idist; } else { _fwd_dist = idist; _bwd_dist = odist; } } } else { _is_basic = true; } _batch = batch; _dim = dim; if (_is_basic) config_and_commit_basic(); else config_and_commit_advanced(); } template <class Desc_t> void set_stride_advanced(std::shared_ptr<Desc_t> desc) { if (_dim == 1) { std::int64_t input_stride[2] = {0, _istride}; std::int64_t output_stride[2] = {0, _ostride}; desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES, input_stride); desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES, output_stride); } else if (_dim == 2) { std::int64_t input_stride[3] = {0, _inembed[1] * _istride, _istride}; std::int64_t output_stride[3] = {0, _onembed[1] * _ostride, _ostride}; desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES, input_stride); desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES, output_stride); } else if (_dim == 3) { std::int64_t input_stride[4] = {0, _inembed[2] * _inembed[1] * _istride, _inembed[2] * _istride, _istride}; std::int64_t output_stride[4] = {0, _onembed[2] * _onembed[1] * _ostride, _onembed[2] * _ostride, _ostride}; desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES, input_stride); desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES, output_stride); } } template <class Desc_t> void swap_distance(std::shared_ptr<Desc_t> desc) { desc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _bwd_dist); desc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _fwd_dist); std::int64_t temp = _bwd_dist; _bwd_dist = _fwd_dist; _fwd_dist = temp; } template <bool Is_inplace, class Desc_t> void set_stride_and_distance_basic(std::shared_ptr<Desc_t> desc) { std::int64_t forward_distance = 0; std::int64_t backward_distance = 0; #define SET_STRIDE \ { \ if (_direction == fft_direction::forward) { \ desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES, \ real_stride); \ desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES, \ complex_stride); \ } else { \ desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES, \ complex_stride); \ desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES, \ real_stride); \ } \ } if (_dim == 1) { if constexpr (Is_inplace) { std::int64_t real_stride[2] = {0, 1}; std::int64_t complex_stride[2] = {0, 1}; SET_STRIDE; forward_distance = 2 * (_n[0] / 2 + 1); backward_distance = _n[0] / 2 + 1; } else { std::int64_t real_stride[2] = {0, 1}; std::int64_t complex_stride[2] = {0, 1}; SET_STRIDE; forward_distance = _n[0]; backward_distance = _n[0] / 2 + 1; } } else if (_dim == 2) { if constexpr (Is_inplace) { std::int64_t complex_stride[3] = {0, _n[1] / 2 + 1, 1}; std::int64_t real_stride[3] = {0, 2 * (_n[1] / 2 + 1), 1}; SET_STRIDE; forward_distance = _n[0] * 2 * (_n[1] / 2 + 1); backward_distance = _n[0] * (_n[1] / 2 + 1); } else { std::int64_t complex_stride[3] = {0, _n[1] / 2 + 1, 1}; std::int64_t real_stride[3] = {0, _n[1], 1}; SET_STRIDE; forward_distance = _n[0] * _n[1]; backward_distance = _n[0] * (_n[1] / 2 + 1); } } else if (_dim == 3) { if constexpr (Is_inplace) { std::int64_t complex_stride[4] = {0, _n[1] * (_n[2] / 2 + 1), _n[2] / 2 + 1, 1}; std::int64_t real_stride[4] = {0, _n[1] * 2 * (_n[2] / 2 + 1), 2 * (_n[2] / 2 + 1), 1}; SET_STRIDE; forward_distance = _n[0] * _n[1] * 2 * (_n[2] / 2 + 1); backward_distance = _n[0] * _n[1] * (_n[2] / 2 + 1); } else { std::int64_t complex_stride[4] = {0, _n[1] * (_n[2] / 2 + 1), _n[2] / 2 + 1, 1}; std::int64_t real_stride[4] = {0, _n[1] * _n[2], _n[2], 1}; SET_STRIDE; forward_distance = _n[0] * _n[1] * _n[2]; backward_distance = _n[0] * _n[1] * (_n[2] / 2 + 1); } } #undef SET_STRIDE desc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, forward_distance); desc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, backward_distance); } #define COMPUTE(DESC) \ { \ if (_is_inplace) { \ auto data_input = \ dpct::detail::get_memory(reinterpret_cast<T *>(input)); \ if (_direction == fft_direction::forward) { \ oneapi::mkl::dft::compute_forward(*DESC, data_input); \ } else { \ oneapi::mkl::dft::compute_backward(*DESC, data_input); \ } \ } else { \ auto data_input = \ dpct::detail::get_memory(reinterpret_cast<T *>(input)); \ auto data_output = \ dpct::detail::get_memory(reinterpret_cast<T *>(output)); \ if (_direction == fft_direction::forward) { \ oneapi::mkl::dft::compute_forward(*DESC, data_input, data_output); \ } else { \ oneapi::mkl::dft::compute_backward(*DESC, data_input, data_output); \ } \ } \ } template <class T, oneapi::mkl::dft::precision Precision> void compute_complex(T *input, T *output, fft_direction direction) { bool is_this_compute_inplace = input == output; if (!_is_user_specified_dir_and_placement) { // The complex domain descriptor need different config values if the // FFT direction or placement is different. // Here we check the conditions, and new config values are set and // re-committed if needed. if (direction != _direction || is_this_compute_inplace != _is_inplace) { if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) { if (direction != _direction) { swap_distance(_desc_sc); _direction = direction; } if (is_this_compute_inplace != _is_inplace) { _is_inplace = is_this_compute_inplace; #ifdef __INTEL_MKL__ if (_is_inplace) { _desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_INPLACE); } else { _desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); } #else if (_is_inplace) { _desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::INPLACE); } else { _desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::NOT_INPLACE); } #endif } _desc_sc->commit(*_q); } else { if (direction != _direction) { swap_distance(_desc_dc); _direction = direction; } if (is_this_compute_inplace != _is_inplace) { _is_inplace = is_this_compute_inplace; #ifdef __INTEL_MKL__ if (_is_inplace) { _desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_INPLACE); } else { _desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); } #else if (_is_inplace) { _desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::INPLACE); } else { _desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::NOT_INPLACE); } #endif } _desc_dc->commit(*_q); } } } if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) { COMPUTE(_desc_sc); } else { COMPUTE(_desc_dc); } } template <class T, oneapi::mkl::dft::precision Precision> void compute_real(T *input, T *output) { bool is_this_compute_inplace = input == output; if (!_is_user_specified_dir_and_placement) { // The real domain descriptor need different config values if the // FFT placement is different. // Here we check the condition, and new config values are set and // re-committed if needed. if (is_this_compute_inplace != _is_inplace) { if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) { _is_inplace = is_this_compute_inplace; if (_is_inplace) { #ifdef __INTEL_MKL__ _desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_INPLACE); #else _desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::INPLACE); #endif if (_is_basic) set_stride_and_distance_basic<true>(_desc_sr); } else { #ifdef __INTEL_MKL__ _desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); #else _desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::NOT_INPLACE); #endif if (_is_basic) set_stride_and_distance_basic<false>(_desc_sr); } _desc_sr->commit(*_q); } else { _is_inplace = is_this_compute_inplace; if (_is_inplace) { #ifdef __INTEL_MKL__ _desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_INPLACE); #else _desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::INPLACE); #endif if (_is_basic) set_stride_and_distance_basic<true>(_desc_dr); } else { #ifdef __INTEL_MKL__ _desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); #else _desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT, oneapi::mkl::dft::config_value::NOT_INPLACE); #endif if (_is_basic) set_stride_and_distance_basic<false>(_desc_dr); } _desc_dr->commit(*_q); } } } if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) { COMPUTE(_desc_sr); } else { COMPUTE(_desc_dr); } } #undef COMPUTE private: sycl::queue *_q = nullptr; int _dim; std::vector<std::int64_t> _n; std::vector<std::int64_t> _inembed; std::int64_t _istride; std::int64_t _fwd_dist; library_data_t _input_type; std::vector<std::int64_t> _onembed; std::int64_t _ostride; std::int64_t _bwd_dist; library_data_t _output_type; std::int64_t _batch = 1; bool _is_basic = false; bool _is_inplace = false; fft_direction _direction = fft_direction::forward; bool _is_user_specified_dir_and_placement = false; bool _use_external_workspace = false; void *_external_workspace_ptr = nullptr; size_t _workspace_bytes = 0; bool _is_estimate_call = false; size_t _workspace_estimate_bytes = 0; std::shared_ptr<oneapi::mkl::dft::descriptor< oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::REAL>> _desc_sr; std::shared_ptr<oneapi::mkl::dft::descriptor< oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::REAL>> _desc_dr; std::shared_ptr<oneapi::mkl::dft::descriptor< oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::COMPLEX>> _desc_sc; std::shared_ptr<oneapi::mkl::dft::descriptor< oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::COMPLEX>> _desc_dc; }; using fft_engine_ptr = fft_engine *; } // namespace fft } // namespace dpct #endif // __DPCT_FFT_UTILS_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/lib_common_utils.hpp
//==---- lib_common_utils.hpp ---------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_LIB_COMMON_UTILS_HPP__ #define __DPCT_LIB_COMMON_UTILS_HPP__ #include <sycl/sycl.hpp> #include <oneapi/mkl.hpp> #include "memory.hpp" #include "util.hpp" namespace dpct { namespace detail { template <typename T> inline auto get_memory(T *x) { #ifdef DPCT_USM_LEVEL_NONE return dpct::get_buffer<std::remove_cv_t<T>>(x); #else return x; #endif } template <typename T> inline typename DataType<T>::T2 get_value(const T *s, sycl::queue &q) { using Ty = typename DataType<T>::T2; Ty s_h; detail::dpct_memcpy(q, (void *)&s_h, (void *)s, sizeof(T), automatic).wait(); return s_h; } } enum class version_field : int { major, minor, update, patch }; /// Returns the requested field of Intel(R) oneAPI Math Kernel Library version. /// \param field The version information field (major, minor, update or patch). /// \param result The result value. inline void mkl_get_version(version_field field, int *result) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else MKLVersion version; mkl_get_version(&version); if (version_field::major == field) { *result = version.MajorVersion; } else if (version_field::minor == field) { *result = version.MinorVersion; } else if (version_field::update == field) { *result = version.UpdateVersion; } else if (version_field::patch == field) { *result = 0; } else { throw std::runtime_error("unknown field"); } #endif } enum class library_data_t : unsigned char { real_float = 0, complex_float, real_double, complex_double, real_half, complex_half, real_bfloat16, complex_bfloat16, real_int4, complex_int4, real_uint4, complex_uint4, real_int8, complex_int8, real_uint8, complex_uint8, real_int16, complex_int16, real_uint16, complex_uint16, real_int32, complex_int32, real_uint32, complex_uint32, real_int64, complex_int64, real_uint64, complex_uint64, real_int8_4, real_int8_32, real_uint8_4, library_data_t_size }; namespace detail { template <typename ArgT> inline constexpr std::uint64_t get_type_combination_id(ArgT Val) { static_assert((unsigned char)library_data_t::library_data_t_size <= std::numeric_limits<unsigned char>::max() && "library_data_t size exceeds limit."); static_assert(std::is_same_v<ArgT, library_data_t>, "Unsupported ArgT"); return (std::uint64_t)Val; } template <typename FirstT, typename... RestT> inline constexpr std::uint64_t get_type_combination_id(FirstT FirstVal, RestT... RestVal) { static_assert((std::uint8_t)library_data_t::library_data_t_size <= std::numeric_limits<unsigned char>::max() && "library_data_t size exceeds limit."); static_assert(sizeof...(RestT) <= 8 && "Too many parameters"); static_assert(std::is_same_v<FirstT, library_data_t>, "Unsupported FirstT"); return get_type_combination_id(RestVal...) << 8 | ((std::uint64_t)FirstVal); } inline constexpr std::size_t library_data_size[] = { 8 * sizeof(float), // real_float 8 * sizeof(std::complex<float>), // complex_float 8 * sizeof(double), // real_double 8 * sizeof(std::complex<double>), // complex_double 8 * sizeof(sycl::half), // real_half 8 * sizeof(std::complex<sycl::half>), // complex_half 16, // real_bfloat16 16 * 2, // complex_bfloat16 4, // real_int4 4 * 2, // complex_int4 4, // real_uint4 4 * 2, // complex_uint4 8, // real_int8 8 * 2, // complex_int8 8, // real_uint8 8 * 2, // complex_uint8 16, // real_int16 16 * 2, // complex_int16 16, // real_uint16 16 * 2, // complex_uint16 32, // real_int32 32 * 2, // complex_int32 32, // real_uint32 32 * 2, // complex_uint32 64, // real_int64 64 * 2, // complex_int64 64, // real_uint64 64 * 2, // complex_uint64 8, // real_int8_4 8, // real_int8_32 8 // real_uint8_4 }; } // namespace detail } // namespace dpct #endif // __DPCT_LIB_COMMON_UTILS_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/sparse_utils.hpp
//==---- sparse_utils.hpp -------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_SPARSE_UTILS_HPP__ #define __DPCT_SPARSE_UTILS_HPP__ #include "lib_common_utils.hpp" #include <oneapi/mkl.hpp> #include <sycl/sycl.hpp> namespace dpct { namespace sparse { /// Describes properties of a sparse matrix. /// The properties are matrix type, diag, uplo and index base. class matrix_info { public: /// Matrix types are: /// ge: General matrix /// sy: Symmetric matrix /// he: Hermitian matrix /// tr: Triangular matrix enum class matrix_type : int { ge = 0, sy, he, tr }; auto get_matrix_type() const { return _matrix_type; } auto get_diag() const { return _diag; } auto get_uplo() const { return _uplo; } auto get_index_base() const { return _index_base; } void set_matrix_type(matrix_type mt) { _matrix_type = mt; } void set_diag(oneapi::mkl::diag d) { _diag = d; } void set_uplo(oneapi::mkl::uplo u) { _uplo = u; } void set_index_base(oneapi::mkl::index_base ib) { _index_base = ib; } private: matrix_type _matrix_type = matrix_type::ge; oneapi::mkl::diag _diag = oneapi::mkl::diag::nonunit; oneapi::mkl::uplo _uplo = oneapi::mkl::uplo::upper; oneapi::mkl::index_base _index_base = oneapi::mkl::index_base::zero; }; /// Computes a CSR format sparse matrix-dense vector product. /// y = alpha * op(A) * x + beta * y /// \param [in] queue The queue where the routine should be executed. It must /// have the in_order property when using the USM mode. /// \param [in] trans The operation applied to the matrix A. /// \param [in] num_rows Number of rows of the matrix A. /// \param [in] num_cols Number of columns of the matrix A. /// \param [in] alpha Scaling factor for the matrix A. /// \param [in] info Matrix info of the matrix A. /// \param [in] val An array containing the non-zero elements of the matrix A. /// \param [in] row_ptr An array of length \p num_rows + 1. /// \param [in] col_ind An array containing the column indices in index-based /// numbering. /// \param [in] x Data of the vector x. /// \param [in] beta Scaling factor for the vector x. /// \param [in, out] y Data of the vector y. template <typename T> void csrmv(sycl::queue &queue, oneapi::mkl::transpose trans, int num_rows, int num_cols, const T *alpha, const std::shared_ptr<matrix_info> info, const T *val, const int *row_ptr, const int *col_ind, const T *x, const T *beta, T *y) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else using Ty = typename dpct::DataType<T>::T2; auto alpha_value = dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue); auto beta_value = dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue); oneapi::mkl::sparse::matrix_handle_t *sparse_matrix_handle = new oneapi::mkl::sparse::matrix_handle_t; oneapi::mkl::sparse::init_matrix_handle(sparse_matrix_handle); auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr)); auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind)); auto data_val = dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val))); oneapi::mkl::sparse::set_csr_data(queue, *sparse_matrix_handle, num_rows, num_cols, info->get_index_base(), data_row_ptr, data_col_ind, data_val); auto data_x = dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(x))); auto data_y = dpct::detail::get_memory(reinterpret_cast<Ty *>(y)); switch (info->get_matrix_type()) { case matrix_info::matrix_type::ge: { oneapi::mkl::sparse::optimize_gemv(queue, trans, *sparse_matrix_handle); oneapi::mkl::sparse::gemv(queue, trans, alpha_value, *sparse_matrix_handle, data_x, beta_value, data_y); break; } case matrix_info::matrix_type::sy: { oneapi::mkl::sparse::symv(queue, info->get_uplo(), alpha_value, *sparse_matrix_handle, data_x, beta_value, data_y); break; } case matrix_info::matrix_type::tr: { oneapi::mkl::sparse::optimize_trmv(queue, info->get_uplo(), trans, info->get_diag(), *sparse_matrix_handle); oneapi::mkl::sparse::trmv(queue, info->get_uplo(), trans, info->get_diag(), alpha_value, *sparse_matrix_handle, data_x, beta_value, data_y); break; } default: throw std::runtime_error( "the spmv does not support matrix_info::matrix_type::he"); } sycl::event e = oneapi::mkl::sparse::release_matrix_handle(queue, sparse_matrix_handle); queue.submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { delete sparse_matrix_handle; }); }); #endif } /// Computes a CSR format sparse matrix-dense matrix product. /// C = alpha * op(A) * B + beta * C /// \param [in] queue The queue where the routine should be executed. It must /// have the in_order property when using the USM mode. /// \param [in] trans The operation applied to the matrix A. /// \param [in] sparse_rows Number of rows of the matrix A. /// \param [in] dense_cols Number of columns of the matrix B or C. /// \param [in] sparse_cols Number of columns of the matrix A. /// \param [in] alpha Scaling factor for the matrix A. /// \param [in] info Matrix info of the matrix A. /// \param [in] val An array containing the non-zero elements of the matrix A. /// \param [in] row_ptr An array of length \p num_rows + 1. /// \param [in] col_ind An array containing the column indices in index-based /// numbering. /// \param [in] b Data of the matrix B. /// \param [in] ldb Leading dimension of the matrix B. /// \param [in] beta Scaling factor for the matrix B. /// \param [in, out] c Data of the matrix C. /// \param [in] ldc Leading dimension of the matrix C. template <typename T> void csrmm(sycl::queue &queue, oneapi::mkl::transpose trans, int sparse_rows, int dense_cols, int sparse_cols, const T *alpha, const std::shared_ptr<matrix_info> info, const T *val, const int *row_ptr, const int *col_ind, const T *b, int ldb, const T *beta, T *c, int ldc) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else using Ty = typename dpct::DataType<T>::T2; auto alpha_value = dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue); auto beta_value = dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue); oneapi::mkl::sparse::matrix_handle_t *sparse_matrix_handle = new oneapi::mkl::sparse::matrix_handle_t; oneapi::mkl::sparse::init_matrix_handle(sparse_matrix_handle); auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr)); auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind)); auto data_val = dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val))); oneapi::mkl::sparse::set_csr_data(queue, *sparse_matrix_handle, sparse_rows, sparse_cols, info->get_index_base(), data_row_ptr, data_col_ind, data_val); auto data_b = dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(b))); auto data_c = dpct::detail::get_memory(reinterpret_cast<Ty *>(c)); switch (info->get_matrix_type()) { case matrix_info::matrix_type::ge: { oneapi::mkl::sparse::gemm(queue, oneapi::mkl::layout::row_major, trans, oneapi::mkl::transpose::nontrans, alpha_value, *sparse_matrix_handle, data_b, dense_cols, ldb, beta_value, data_c, ldc); break; } default: throw std::runtime_error( "the csrmm does not support matrix_info::matrix_type::sy, " "matrix_info::matrix_type::tr and matrix_info::matrix_type::he"); } sycl::event e = oneapi::mkl::sparse::release_matrix_handle(queue, sparse_matrix_handle); queue.submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { delete sparse_matrix_handle; }); }); #endif } #ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this. /// Saving the optimization information for solving a system of linear /// equations. class optimize_info { public: /// Constructor optimize_info() { oneapi::mkl::sparse::init_matrix_handle(&_matrix_handle); } /// Destructor ~optimize_info() { oneapi::mkl::sparse::release_matrix_handle(get_default_queue(), &_matrix_handle, _deps) .wait(); } /// Add dependency for the destructor. /// \param [in] e The event which the destructor depends on. void add_dependency(sycl::event e) { _deps.push_back(e); } /// Get the internal saved matrix handle. /// \return Returns the matrix handle. oneapi::mkl::sparse::matrix_handle_t get_matrix_handle() const noexcept { return _matrix_handle; } private: oneapi::mkl::sparse::matrix_handle_t _matrix_handle = nullptr; std::vector<sycl::event> _deps; }; #endif #ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this. /// Performs internal optimizations for solving a system of linear equations for /// a CSR format sparse matrix. /// \param [in] queue The queue where the routine should be executed. It must /// have the in_order property when using the USM mode. /// \param [in] trans The operation applied to the sparse matrix. /// \param [in] row_col Number of rows of the sparse matrix. /// \param [in] info Matrix info of the sparse matrix. /// \param [in] val An array containing the non-zero elements of the sparse matrix. /// \param [in] row_ptr An array of length \p num_rows + 1. /// \param [in] col_ind An array containing the column indices in index-based /// numbering. /// \param [out] optimize_info The result of the optimizations. template <typename T> void optimize_csrsv(sycl::queue &queue, oneapi::mkl::transpose trans, int row_col, const std::shared_ptr<matrix_info> info, const T *val, const int *row_ptr, const int *col_ind, std::shared_ptr<optimize_info> optimize_info) { using Ty = typename dpct::DataType<T>::T2; auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr)); auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind)); auto data_val = dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val))); oneapi::mkl::sparse::set_csr_data(queue, optimize_info->get_matrix_handle(), row_col, row_col, info->get_index_base(), data_row_ptr, data_col_ind, data_val); if (info->get_matrix_type() != matrix_info::matrix_type::tr) return; #ifndef DPCT_USM_LEVEL_NONE sycl::event e; e = #endif oneapi::mkl::sparse::optimize_trsv(queue, info->get_uplo(), trans, info->get_diag(), optimize_info->get_matrix_handle()); #ifndef DPCT_USM_LEVEL_NONE optimize_info->add_dependency(e); #endif } #endif class sparse_matrix_desc; using sparse_matrix_desc_t = std::shared_ptr<sparse_matrix_desc>; /// Structure for describe a dense vector class dense_vector_desc { public: dense_vector_desc(std::int64_t ele_num, void *value, library_data_t value_type) : _ele_num(ele_num), _value(value), _value_type(value_type) {} void get_desc(std::int64_t *ele_num, const void **value, library_data_t *value_type) const noexcept { *ele_num = _ele_num; *value = _value; *value_type = _value_type; } void get_desc(std::int64_t *ele_num, void **value, library_data_t *value_type) const noexcept { get_desc(ele_num, const_cast<const void **>(value), value_type); } void *get_value() const noexcept { return _value; } void set_value(void *value) { _value = value; } private: std::int64_t _ele_num; void *_value; library_data_t _value_type; }; /// Structure for describe a dense matrix class dense_matrix_desc { public: dense_matrix_desc(std::int64_t row_num, std::int64_t col_num, std::int64_t leading_dim, void *value, library_data_t value_type, oneapi::mkl::layout layout) : _row_num(row_num), _col_num(col_num), _leading_dim(leading_dim), _value(value), _value_type(value_type), _layout(layout) {} void get_desc(std::int64_t *row_num, std::int64_t *col_num, std::int64_t *leading_dim, void **value, library_data_t *value_type, oneapi::mkl::layout *layout) const noexcept { *row_num = _row_num; *col_num = _col_num; *leading_dim = _leading_dim; *value = _value; *value_type = _value_type; *layout = _layout; } void *get_value() const noexcept { return _value; } void set_value(void *value) { _value = value; } std::int64_t get_col_num() const noexcept { return _col_num; } std::int64_t get_leading_dim() const noexcept { return _leading_dim; } oneapi::mkl::layout get_layout() const noexcept { return _layout; } private: std::int64_t _row_num; std::int64_t _col_num; std::int64_t _leading_dim; void *_value; library_data_t _value_type; oneapi::mkl::layout _layout; }; /// Sparse matrix data format enum matrix_format : int { csr = 1, }; /// Sparse matrix attribute enum matrix_attribute : int { uplo = 0, diag }; #ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this. /// Structure for describe a sparse matrix class sparse_matrix_desc { public: /// Constructor /// \param [out] desc The descriptor to be created /// \param [in] row_num Number of rows of the sparse matrix. /// \param [in] col_num Number of colums of the sparse matrix. /// \param [in] nnz Non-zero elements in the sparse matrix. /// \param [in] row_ptr An array of length \p row_num + 1. /// \param [in] col_ind An array containing the column indices in index-based /// numbering. /// \param [in] value An array containing the non-zero elements of the sparse matrix. /// \param [in] row_ptr_type Data type of the \p row_ptr . /// \param [in] col_ind_type Data type of the \p col_ind . /// \param [in] base Indicates how input arrays are indexed. /// \param [in] value_type Data type of the \p value . /// \param [in] data_format The matrix data format. sparse_matrix_desc(std::int64_t row_num, std::int64_t col_num, std::int64_t nnz, void *row_ptr, void *col_ind, void *value, library_data_t row_ptr_type, library_data_t col_ind_type, oneapi::mkl::index_base base, library_data_t value_type, matrix_format data_format) : _row_num(row_num), _col_num(col_num), _nnz(nnz), _row_ptr(row_ptr), _col_ind(col_ind), _value(value), _row_ptr_type(row_ptr_type), _col_ind_type(col_ind_type), _base(base), _value_type(value_type), _data_format(data_format) { if (_data_format != matrix_format::csr) { throw std::runtime_error("the sparse matrix data format is unsupported"); } oneapi::mkl::sparse::init_matrix_handle(&_matrix_handle); construct(); } /// Destructor ~sparse_matrix_desc() { oneapi::mkl::sparse::release_matrix_handle(get_default_queue(), &_matrix_handle, _deps) .wait(); } /// Add dependency for the destroy method. /// \param [in] e The event which the destroy method depends on. void add_dependency(sycl::event e) { _deps.push_back(e); } /// Get the internal saved matrix handle. /// \return Returns the matrix handle. oneapi::mkl::sparse::matrix_handle_t get_matrix_handle() const noexcept { return _matrix_handle; } /// Get the values saved in the descriptor /// \param [out] row_num Number of rows of the sparse matrix. /// \param [out] col_num Number of colums of the sparse matrix. /// \param [out] nnz Non-zero elements in the sparse matrix. /// \param [out] row_ptr An array of length \p row_num + 1. /// \param [out] col_ind An array containing the column indices in index-based /// numbering. /// \param [out] value An array containing the non-zero elements of the sparse matrix. /// \param [out] row_ptr_type Data type of the \p row_ptr . /// \param [out] col_ind_type Data type of the \p col_ind . /// \param [out] base Indicates how input arrays are indexed. /// \param [out] value_type Data type of the \p value . void get_desc(int64_t *row_num, int64_t *col_num, int64_t *nnz, void **row_ptr, void **col_ind, void **value, library_data_t *row_ptr_type, library_data_t *col_ind_type, oneapi::mkl::index_base *base, library_data_t *value_type) const noexcept { *row_num = _row_num; *col_num = _col_num; *nnz = _nnz; *row_ptr = _row_ptr; *col_ind = _col_ind; *value = _value; *row_ptr_type = _row_ptr_type; *col_ind_type = _col_ind_type; *base = _base; *value_type = _value_type; } /// Get the sparse matrix data format of this descriptor /// \param [out] format The matrix data format result void get_format(matrix_format *data_format) const noexcept { *data_format = _data_format; } /// Get the index base of this descriptor /// \param [out] base The index base result void get_base(oneapi::mkl::index_base *base) const noexcept { *base = _base; } /// Get the value pointer of this descriptor /// \param [out] value The value pointer result void get_value(void **value) const noexcept { *value = _value; } /// Set the value pointer of this descriptor /// \param [in] value The input value pointer void set_value(void *value) { // Assume the new data is different from the old data _value = value; construct(); } /// Get the size of the sparse matrix /// \param [out] row_num Number of rows of the sparse matrix. /// \param [out] col_num Number of colums of the sparse matrix. /// \param [out] nnz Non-zero elements in the sparse matrix. void get_size(int64_t *row_num, int64_t *col_num, int64_t *nnz) const noexcept { *row_num = _row_num; *col_num = _col_num; *nnz = _nnz; } /// Set the sparse matrix attribute /// \param [in] attribute The attribute type /// \param [in] data The attribute value /// \param [in] data_size The data size of the attribute value void set_attribute(matrix_attribute attribute, const void *data, size_t data_size) { if (attribute == matrix_attribute::diag) { const oneapi::mkl::diag *diag_ptr = reinterpret_cast<const oneapi::mkl::diag *>(data); if (*diag_ptr == oneapi::mkl::diag::unit) { _diag = oneapi::mkl::diag::unit; } else if (*diag_ptr == oneapi::mkl::diag::nonunit) { _diag = oneapi::mkl::diag::nonunit; } else { throw std::runtime_error("unsupported diag value"); } } else if (attribute == matrix_attribute::uplo) { const oneapi::mkl::uplo *uplo_ptr = reinterpret_cast<const oneapi::mkl::uplo *>(data); if (*uplo_ptr == oneapi::mkl::uplo::upper) { _uplo = oneapi::mkl::uplo::upper; } else if (*uplo_ptr == oneapi::mkl::uplo::lower) { _uplo = oneapi::mkl::uplo::lower; } else { throw std::runtime_error("unsupported uplo value"); } } else { throw std::runtime_error("unsupported attribute"); } } /// Get the sparse matrix attribute /// \param [out] attribute The attribute type /// \param [out] data The attribute value /// \param [out] data_size The data size of the attribute value void get_attribute(matrix_attribute attribute, void *data, size_t data_size) const { if (attribute == matrix_attribute::diag) { oneapi::mkl::diag *diag_ptr = reinterpret_cast<oneapi::mkl::diag *>(data); if (_diag.has_value()) { *diag_ptr = _diag.value(); } else { *diag_ptr = oneapi::mkl::diag::nonunit; } } else if (attribute == matrix_attribute::uplo) { oneapi::mkl::uplo *uplo_ptr = reinterpret_cast<oneapi::mkl::uplo *>(data); if (_uplo.has_value()) { *uplo_ptr = _uplo.value(); } else { *uplo_ptr = oneapi::mkl::uplo::lower; } } else { throw std::runtime_error("unsupported attribute"); } } /// Set the pointers for describing the sparse matrix /// \param [in] row_ptr An array of length \p row_num + 1. /// \param [in] col_ind An array containing the column indices in index-based /// numbering. /// \param [in] value An array containing the non-zero elements of the sparse matrix. void set_pointers(void *row_ptr, void *col_ind, void *value) { // Assume the new data is different from the old data _row_ptr = row_ptr; _col_ind = col_ind; _value = value; construct(); } /// Get the diag attribute /// \return diag value std::optional<oneapi::mkl::diag> get_diag() const noexcept { return _diag; } /// Get the uplo attribute /// \return uplo value std::optional<oneapi::mkl::uplo> get_uplo() const noexcept { return _uplo; } private: template <typename index_t, typename value_t> void set_data() { auto data_row_ptr = dpct::detail::get_memory(reinterpret_cast<index_t *>(_row_ptr)); auto data_col_ind = dpct::detail::get_memory(reinterpret_cast<index_t *>(_col_ind)); auto data_value = dpct::detail::get_memory(reinterpret_cast<value_t *>(_value)); oneapi::mkl::sparse::set_csr_data(get_default_queue(), _matrix_handle, _row_num, _col_num, _base, data_row_ptr, data_col_ind, data_value); get_default_queue().wait(); } void construct() { std::uint64_t key = dpct::detail::get_type_combination_id( _row_ptr_type, _col_ind_type, _value_type); switch (key) { case dpct::detail::get_type_combination_id(library_data_t::real_int32, library_data_t::real_int32, library_data_t::real_float): { set_data<std::int32_t, float>(); break; } case dpct::detail::get_type_combination_id(library_data_t::real_int32, library_data_t::real_int32, library_data_t::real_double): { set_data<std::int32_t, double>(); break; } case dpct::detail::get_type_combination_id(library_data_t::real_int32, library_data_t::real_int32, library_data_t::complex_float): { set_data<std::int32_t, std::complex<float>>(); break; } case dpct::detail::get_type_combination_id( library_data_t::real_int32, library_data_t::real_int32, library_data_t::complex_double): { set_data<std::int32_t, std::complex<double>>(); break; } case dpct::detail::get_type_combination_id(library_data_t::real_int64, library_data_t::real_int64, library_data_t::real_float): { set_data<std::int64_t, float>(); break; } case dpct::detail::get_type_combination_id(library_data_t::real_int64, library_data_t::real_int64, library_data_t::real_double): { set_data<std::int64_t, double>(); break; } case dpct::detail::get_type_combination_id(library_data_t::real_int64, library_data_t::real_int64, library_data_t::complex_float): { set_data<std::int64_t, std::complex<float>>(); break; } case dpct::detail::get_type_combination_id( library_data_t::real_int64, library_data_t::real_int64, library_data_t::complex_double): { set_data<std::int64_t, std::complex<double>>(); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } } std::int64_t _row_num; std::int64_t _col_num; std::int64_t _nnz; void *_row_ptr; void *_col_ind; void *_value; library_data_t _row_ptr_type; library_data_t _col_ind_type; oneapi::mkl::index_base _base; library_data_t _value_type; oneapi::mkl::sparse::matrix_handle_t _matrix_handle = nullptr; std::vector<sycl::event> _deps; matrix_format _data_format; std::optional<oneapi::mkl::uplo> _uplo; std::optional<oneapi::mkl::diag> _diag; }; namespace detail { #ifdef DPCT_USM_LEVEL_NONE #define SPARSE_CALL(X) \ do { \ X; \ } while (0) #else #define SPARSE_CALL(X) \ do { \ sycl::event e = X; \ a->add_dependency(e); \ } while (0) #endif template <typename Ty> inline void spmv_impl(sycl::queue queue, oneapi::mkl::transpose trans, const void *alpha, sparse_matrix_desc_t a, std::shared_ptr<dense_vector_desc> x, const void *beta, std::shared_ptr<dense_vector_desc> y) { auto alpha_value = dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue); auto beta_value = dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue); auto data_x = dpct::detail::get_memory(reinterpret_cast<Ty *>(x->get_value())); auto data_y = dpct::detail::get_memory(reinterpret_cast<Ty *>(y->get_value())); if (a->get_diag().has_value() && a->get_uplo().has_value()) { oneapi::mkl::sparse::optimize_trmv(queue, a->get_uplo().value(), trans, a->get_diag().value(), a->get_matrix_handle()); SPARSE_CALL(oneapi::mkl::sparse::trmv( queue, a->get_uplo().value(), trans, a->get_diag().value(), alpha_value, a->get_matrix_handle(), data_x, beta_value, data_y)); } else { oneapi::mkl::sparse::optimize_gemv(queue, trans, a->get_matrix_handle()); SPARSE_CALL(oneapi::mkl::sparse::gemv(queue, trans, alpha_value, a->get_matrix_handle(), data_x, beta_value, data_y)); } } template <typename Ty> inline void spmm_impl(sycl::queue queue, oneapi::mkl::transpose trans_a, oneapi::mkl::transpose trans_b, const void *alpha, sparse_matrix_desc_t a, std::shared_ptr<dense_matrix_desc> b, const void *beta, std::shared_ptr<dense_matrix_desc> c) { auto alpha_value = dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue); auto beta_value = dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue); auto data_b = dpct::detail::get_memory(reinterpret_cast<Ty *>(b->get_value())); auto data_c = dpct::detail::get_memory(reinterpret_cast<Ty *>(c->get_value())); SPARSE_CALL(oneapi::mkl::sparse::gemm( queue, b->get_layout(), trans_a, trans_b, alpha_value, a->get_matrix_handle(), data_b, b->get_col_num(), b->get_leading_dim(), beta_value, data_c, c->get_leading_dim())); } #undef SPARSE_CALL } // namespace detail /// Computes a sparse matrix-dense vector product: y = alpha * op(a) * x + beta * y. /// \param [in] queue The queue where the routine should be executed. It must /// have the in_order property when using the USM mode. /// \param [in] trans Specifies operation on input matrix. /// \param [in] alpha Specifies the scalar alpha. /// \param [in] a Specifies the sparse matrix a. /// \param [in] x Specifies the dense vector x. /// \param [in] beta Specifies the scalar beta. /// \param [in, out] y Specifies the dense vector y. /// \param [in] data_type Specifies the data type of \param a, \param x and \param y . inline void spmv(sycl::queue queue, oneapi::mkl::transpose trans, const void *alpha, sparse_matrix_desc_t a, std::shared_ptr<dense_vector_desc> x, const void *beta, std::shared_ptr<dense_vector_desc> y, library_data_t data_type) { switch (data_type) { case library_data_t::real_float: { detail::spmv_impl<float>(queue, trans, alpha, a, x, beta, y); break; } case library_data_t::real_double: { detail::spmv_impl<double>(queue, trans, alpha, a, x, beta, y); break; } case library_data_t::complex_float: { detail::spmv_impl<std::complex<float>>(queue, trans, alpha, a, x, beta, y); break; } case library_data_t::complex_double: { detail::spmv_impl<std::complex<double>>(queue, trans, alpha, a, x, beta, y); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } } /// Computes a sparse matrix-dense matrix product: c = alpha * op(a) * op(b) + beta * c. /// \param [in] queue The queue where the routine should be executed. It must /// have the in_order property when using the USM mode. /// \param [in] trans_a Specifies operation on input matrix a. /// \param [in] trans_b Specifies operation on input matrix b. /// \param [in] alpha Specifies the scalar alpha. /// \param [in] a Specifies the sparse matrix a. /// \param [in] b Specifies the dense matrix b. /// \param [in] beta Specifies the scalar beta. /// \param [in, out] c Specifies the dense matrix c. /// \param [in] data_type Specifies the data type of \param a, \param b and \param c . inline void spmm(sycl::queue queue, oneapi::mkl::transpose trans_a, oneapi::mkl::transpose trans_b, const void *alpha, sparse_matrix_desc_t a, std::shared_ptr<dense_matrix_desc> b, const void *beta, std::shared_ptr<dense_matrix_desc> c, library_data_t data_type) { if (b->get_layout() != c->get_layout()) throw std::runtime_error("the layout of b and c are different"); switch (data_type) { case library_data_t::real_float: { detail::spmm_impl<float>(queue, trans_a, trans_b, alpha, a, b, beta, c); break; } case library_data_t::real_double: { detail::spmm_impl<double>(queue, trans_a, trans_b, alpha, a, b, beta, c); break; } case library_data_t::complex_float: { detail::spmm_impl<std::complex<float>>(queue, trans_a, trans_b, alpha, a, b, beta, c); break; } case library_data_t::complex_double: { detail::spmm_impl<std::complex<double>>(queue, trans_a, trans_b, alpha, a, b, beta, c); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } } #endif } // namespace sparse } // namespace dpct #endif // __DPCT_SPARSE_UTILS_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/device.hpp
//==---- device.hpp -------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_DEVICE_HPP__ #define __DPCT_DEVICE_HPP__ #include <sycl/sycl.hpp> #include <algorithm> #include <array> #include <cstring> #include <iostream> #include <mutex> #include <set> #include <sstream> #include <map> #include <vector> #include <thread> #if defined(__linux__) #include <unistd.h> #include <sys/syscall.h> #endif #if defined(_WIN64) #define NOMINMAX #include <windows.h> #endif namespace dpct { /// SYCL default exception handler inline auto exception_handler = [](sycl::exception_list exceptions) { for (std::exception_ptr const &e : exceptions) { try { std::rethrow_exception(e); } catch (sycl::exception const &e) { std::cerr << "Caught asynchronous SYCL exception:" << std::endl << e.what() << std::endl << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; } } }; typedef sycl::event *event_ptr; typedef sycl::queue *queue_ptr; typedef char *device_ptr; /// Destroy \p event pointed memory. /// /// \param event Pointer to the sycl::event address. static void destroy_event(event_ptr event) { delete event; } class device_info { public: // get interface const char *get_name() const { return _name; } char *get_name() { return _name; } template <typename WorkItemSizesTy = sycl::id<3>, std::enable_if_t<std::is_same_v<WorkItemSizesTy, sycl::id<3>> || std::is_same_v<WorkItemSizesTy, int *>, int> = 0> auto get_max_work_item_sizes() const { if constexpr (std::is_same_v<WorkItemSizesTy, sycl::id<3>>) return _max_work_item_sizes; else return _max_work_item_sizes_i; } template <typename WorkItemSizesTy = sycl::id<3>, std::enable_if_t<std::is_same_v<WorkItemSizesTy, sycl::id<3>> || std::is_same_v<WorkItemSizesTy, int *>, int> = 0> auto get_max_work_item_sizes() { if constexpr (std::is_same_v<WorkItemSizesTy, sycl::id<3>>) return _max_work_item_sizes; else return _max_work_item_sizes_i; } bool get_host_unified_memory() const { return _host_unified_memory; } int get_major_version() const { return _major; } int get_minor_version() const { return _minor; } int get_integrated() const { return _integrated; } int get_max_clock_frequency() const { return _frequency; } int get_max_compute_units() const { return _max_compute_units; } int get_max_work_group_size() const { return _max_work_group_size; } int get_max_sub_group_size() const { return _max_sub_group_size; } int get_max_work_items_per_compute_unit() const { return _max_work_items_per_compute_unit; } int get_max_register_size_per_work_group() const { return _max_register_size_per_work_group; } template <typename NDRangeSizeTy = size_t *, std::enable_if_t<std::is_same_v<NDRangeSizeTy, size_t *> || std::is_same_v<NDRangeSizeTy, int *>, int> = 0> auto get_max_nd_range_size() const { if constexpr (std::is_same_v<NDRangeSizeTy, size_t *>) return _max_nd_range_size; else return _max_nd_range_size_i; } template <typename NDRangeSizeTy = size_t *, std::enable_if_t<std::is_same_v<NDRangeSizeTy, size_t *> || std::is_same_v<NDRangeSizeTy, int *>, int> = 0> auto get_max_nd_range_size() { if constexpr (std::is_same_v<NDRangeSizeTy, size_t *>) return _max_nd_range_size; else return _max_nd_range_size_i; } size_t get_global_mem_size() const { return _global_mem_size; } size_t get_local_mem_size() const { return _local_mem_size; } /// Returns the maximum clock rate of device's global memory in kHz. If /// compiler does not support this API then returns default value 3200000 kHz. unsigned int get_memory_clock_rate() const { return _memory_clock_rate; } /// Returns the maximum bus width between device and memory in bits. If /// compiler does not support this API then returns default value 64 bits. unsigned int get_memory_bus_width() const { return _memory_bus_width; } uint32_t get_device_id() const { return _device_id; } std::array<unsigned char, 16> get_uuid() const { return _uuid; } // set interface void set_name(const char* name) { size_t length = strlen(name); if (length < 256) { std::memcpy(_name, name, length + 1); } else { std::memcpy(_name, name, 255); _name[255] = '\0'; } } void set_max_work_item_sizes(const sycl::id<3> max_work_item_sizes) { _max_work_item_sizes = max_work_item_sizes; for (int i = 0; i < 3; ++i) _max_work_item_sizes_i[i] = max_work_item_sizes[i]; } void set_host_unified_memory(bool host_unified_memory) { _host_unified_memory = host_unified_memory; } void set_major_version(int major) { _major = major; } void set_minor_version(int minor) { _minor = minor; } void set_integrated(int integrated) { _integrated = integrated; } void set_max_clock_frequency(int frequency) { _frequency = frequency; } void set_max_compute_units(int max_compute_units) { _max_compute_units = max_compute_units; } void set_global_mem_size(size_t global_mem_size) { _global_mem_size = global_mem_size; } void set_local_mem_size(size_t local_mem_size) { _local_mem_size = local_mem_size; } void set_max_work_group_size(int max_work_group_size) { _max_work_group_size = max_work_group_size; } void set_max_sub_group_size(int max_sub_group_size) { _max_sub_group_size = max_sub_group_size; } void set_max_work_items_per_compute_unit(int max_work_items_per_compute_unit) { _max_work_items_per_compute_unit = max_work_items_per_compute_unit; } void set_max_nd_range_size(int max_nd_range_size[]) { for (int i = 0; i < 3; i++) { _max_nd_range_size[i] = max_nd_range_size[i]; _max_nd_range_size_i[i] = max_nd_range_size[i]; } } void set_memory_clock_rate(unsigned int memory_clock_rate) { _memory_clock_rate = memory_clock_rate; } void set_memory_bus_width(unsigned int memory_bus_width) { _memory_bus_width = memory_bus_width; } void set_max_register_size_per_work_group(int max_register_size_per_work_group) { _max_register_size_per_work_group = max_register_size_per_work_group; } void set_device_id(uint32_t device_id) { _device_id = device_id; } void set_uuid(std::array<unsigned char, 16> uuid) { _uuid = std::move(uuid); } private: char _name[256]; sycl::id<3> _max_work_item_sizes; int _max_work_item_sizes_i[3]; bool _host_unified_memory = false; int _major; int _minor; int _integrated = 0; int _frequency; // Set estimated value 3200000 kHz as default value. unsigned int _memory_clock_rate = 3200000; // Set estimated value 64 bits as default value. unsigned int _memory_bus_width = 64; int _max_compute_units; int _max_work_group_size; int _max_sub_group_size; int _max_work_items_per_compute_unit; int _max_register_size_per_work_group; size_t _global_mem_size; size_t _local_mem_size; size_t _max_nd_range_size[3]; int _max_nd_range_size_i[3]; uint32_t _device_id; std::array<unsigned char, 16> _uuid; }; /// dpct device extension class device_ext : public sycl::device { typedef std::mutex mutex_type; public: device_ext() : sycl::device(), _ctx(*this) {} ~device_ext() { std::lock_guard<mutex_type> lock(m_mutex); clear_queues(); } device_ext(const sycl::device &base) : sycl::device(base), _ctx(*this) { std::lock_guard<mutex_type> lock(m_mutex); init_queues(); } int is_native_atomic_supported() { return 0; } int get_major_version() const { int major, minor; get_version(major, minor); return major; } int get_minor_version() const { int major, minor; get_version(major, minor); return minor; } int get_max_compute_units() const { return get_device_info().get_max_compute_units(); } /// Return the maximum clock frequency of this device in KHz. int get_max_clock_frequency() const { return get_device_info().get_max_clock_frequency(); } int get_integrated() const { return get_device_info().get_integrated(); } int get_max_sub_group_size() const { return get_device_info().get_max_sub_group_size(); } int get_max_register_size_per_work_group() const { return get_device_info().get_max_register_size_per_work_group(); } int get_max_work_group_size() const { return get_device_info().get_max_work_group_size(); } int get_mem_base_addr_align() const { return get_info<sycl::info::device::mem_base_addr_align>(); } size_t get_global_mem_size() const { return get_device_info().get_global_mem_size(); } /// Get the number of bytes of free and total memory on the SYCL device. /// \param [out] free_memory The number of bytes of free memory on the SYCL device. /// \param [out] total_memory The number of bytes of total memory on the SYCL device. void get_memory_info(size_t &free_memory, size_t &total_memory) { #if (defined(__SYCL_COMPILER_VERSION) && __SYCL_COMPILER_VERSION >= 20221105) if (!has(sycl::aspect::ext_intel_free_memory)) { std::cerr << "get_memory_info: ext_intel_free_memory is not supported." << std::endl; free_memory = 0; } else { free_memory = get_info<sycl::ext::intel::info::device::free_memory>(); } #else std::cerr << "get_memory_info: ext_intel_free_memory is not supported." << std::endl; free_memory = 0; #if defined(_MSC_VER) && !defined(__clang__) #pragma message("Querying the number of bytes of free memory is not supported") #else #warning "Querying the number of bytes of free memory is not supported" #endif #endif total_memory = get_device_info().get_global_mem_size(); } void get_device_info(device_info &out) const { device_info prop; prop.set_name(get_info<sycl::info::device::name>().c_str()); int major, minor; get_version(major, minor); prop.set_major_version(major); prop.set_minor_version(minor); prop.set_max_work_item_sizes( #if (__SYCL_COMPILER_VERSION && __SYCL_COMPILER_VERSION<20220902) // oneAPI DPC++ compiler older than 2022/09/02, where max_work_item_sizes is an enum class element get_info<sycl::info::device::max_work_item_sizes>()); #else // SYCL 2020-conformant code, max_work_item_sizes is a struct templated by an int get_info<sycl::info::device::max_work_item_sizes<3>>()); #endif prop.set_host_unified_memory( this->has(sycl::aspect::usm_host_allocations)); prop.set_max_clock_frequency( get_info<sycl::info::device::max_clock_frequency>() * 1000); prop.set_max_compute_units( get_info<sycl::info::device::max_compute_units>()); prop.set_max_work_group_size( get_info<sycl::info::device::max_work_group_size>()); prop.set_global_mem_size( get_info<sycl::info::device::global_mem_size>()); prop.set_local_mem_size(get_info<sycl::info::device::local_mem_size>()); #if (defined(SYCL_EXT_INTEL_DEVICE_INFO) && SYCL_EXT_INTEL_DEVICE_INFO >= 6) if (this->has(sycl::aspect::ext_intel_memory_clock_rate)) { unsigned int tmp = this->get_info<sycl::ext::intel::info::device::memory_clock_rate>(); if (tmp != 0) prop.set_memory_clock_rate(1000 * tmp); } if (this->has(sycl::aspect::ext_intel_memory_bus_width)) { prop.set_memory_bus_width( this->get_info<sycl::ext::intel::info::device::memory_bus_width>()); } if (this->has(sycl::aspect::ext_intel_device_id)) { prop.set_device_id( this->get_info<sycl::ext::intel::info::device::device_id>()); } if (this->has(sycl::aspect::ext_intel_device_info_uuid)) { prop.set_uuid( this->get_info<sycl::ext::intel::info::device::uuid>()); } #elif defined(_MSC_VER) && !defined(__clang__) #pragma message("get_device_info: querying memory_clock_rate and \ memory_bus_width are not supported by the compiler used. \ Use 3200000 kHz as memory_clock_rate default value. \ Use 64 bits as memory_bus_width default value.") #else #warning "get_device_info: querying memory_clock_rate and \ memory_bus_width are not supported by the compiler used. \ Use 3200000 kHz as memory_clock_rate default value. \ Use 64 bits as memory_bus_width default value." #endif size_t max_sub_group_size = 1; std::vector<size_t> sub_group_sizes = get_info<sycl::info::device::sub_group_sizes>(); for (const auto &sub_group_size : sub_group_sizes) { if (max_sub_group_size < sub_group_size) max_sub_group_size = sub_group_size; } prop.set_max_sub_group_size(max_sub_group_size); prop.set_max_work_items_per_compute_unit( get_info<sycl::info::device::max_work_group_size>()); int max_nd_range_size[] = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF}; prop.set_max_nd_range_size(max_nd_range_size); // Estimates max register size per work group, feel free to update the value // according to device properties. prop.set_max_register_size_per_work_group(65536); out = prop; } device_info get_device_info() const { device_info prop; get_device_info(prop); return prop; } void reset() { std::lock_guard<mutex_type> lock(m_mutex); clear_queues(); init_queues(); } sycl::queue &in_order_queue() { return *_q_in_order; } sycl::queue &out_of_order_queue() { return *_q_out_of_order; } sycl::queue &default_queue() { #ifdef DPCT_USM_LEVEL_NONE return out_of_order_queue(); #else return in_order_queue(); #endif // DPCT_USM_LEVEL_NONE } void queues_wait_and_throw() { std::unique_lock<mutex_type> lock(m_mutex); std::vector<std::shared_ptr<sycl::queue>> current_queues( _queues); lock.unlock(); for (const auto &q : current_queues) { q->wait_and_throw(); } // Guard the destruct of current_queues to make sure the ref count is safe. lock.lock(); } sycl::queue *create_queue(bool enable_exception_handler = false) { #ifdef DPCT_USM_LEVEL_NONE return create_out_of_order_queue(enable_exception_handler); #else return create_in_order_queue(enable_exception_handler); #endif // DPCT_USM_LEVEL_NONE } sycl::queue *create_in_order_queue(bool enable_exception_handler = false) { std::lock_guard<mutex_type> lock(m_mutex); return create_queue_impl(enable_exception_handler, sycl::property::queue::in_order()); } sycl::queue *create_out_of_order_queue(bool enable_exception_handler = false) { std::lock_guard<mutex_type> lock(m_mutex); return create_queue_impl(enable_exception_handler); } void destroy_queue(sycl::queue *&queue) { std::lock_guard<mutex_type> lock(m_mutex); _queues.erase(std::remove_if(_queues.begin(), _queues.end(), [=](const std::shared_ptr<sycl::queue> &q) -> bool { return q.get() == queue; }), _queues.end()); queue = nullptr; } void set_saved_queue(sycl::queue* q) { std::lock_guard<mutex_type> lock(m_mutex); _saved_queue = q; } sycl::queue *get_saved_queue() const { std::lock_guard<mutex_type> lock(m_mutex); return _saved_queue; } sycl::context get_context() const { return _ctx; } private: void clear_queues() { _queues.clear(); _q_in_order = _q_out_of_order = _saved_queue = nullptr; } void init_queues() { _q_in_order = create_queue_impl(true, sycl::property::queue::in_order()); _q_out_of_order = create_queue_impl(true); _saved_queue = &default_queue(); } /// Caller should acquire resource \p m_mutex before calling this function. template <class... Properties> sycl::queue *create_queue_impl(bool enable_exception_handler, Properties... properties) { sycl::async_handler eh = {}; if (enable_exception_handler) { eh = exception_handler; } _queues.push_back(std::make_shared<sycl::queue>( _ctx, *this, eh, sycl::property_list( #ifdef DPCT_PROFILING_ENABLED sycl::property::queue::enable_profiling(), #endif properties...))); return _queues.back().get(); } void get_version(int &major, int &minor) const { // Version string has the following format: // a. OpenCL<space><major.minor><space><vendor-specific-information> // b. <major.minor> std::string ver; ver = get_info<sycl::info::device::version>(); std::string::size_type i = 0; while (i < ver.size()) { if (isdigit(ver[i])) break; i++; } major = std::stoi(&(ver[i])); while (i < ver.size()) { if (ver[i] == '.') break; i++; } i++; minor = std::stoi(&(ver[i])); } sycl::queue *_q_in_order, *_q_out_of_order; sycl::queue *_saved_queue; sycl::context _ctx; std::vector<std::shared_ptr<sycl::queue>> _queues; mutable mutex_type m_mutex; }; static inline unsigned int get_tid() { #if defined(__linux__) return syscall(SYS_gettid); #elif defined(_WIN64) return GetCurrentThreadId(); #else #error "Only support Windows and Linux." #endif } /// device manager class dev_mgr { public: device_ext &current_device() { unsigned int dev_id=current_device_id(); check_id(dev_id); return *_devs[dev_id]; } device_ext &cpu_device() const { std::lock_guard<std::recursive_mutex> lock(m_mutex); if (_cpu_device == -1) { throw std::runtime_error("no valid cpu device"); } else { return *_devs[_cpu_device]; } } device_ext &get_device(unsigned int id) const { std::lock_guard<std::recursive_mutex> lock(m_mutex); check_id(id); return *_devs[id]; } unsigned int current_device_id() const { std::lock_guard<std::recursive_mutex> lock(m_mutex); auto it=_thread2dev_map.find(get_tid()); if(it != _thread2dev_map.end()) return it->second; return DEFAULT_DEVICE_ID; } /// Select device with a device ID. /// \param [in] id The id of the device which can /// be obtained through get_device_id(const sycl::device). void select_device(unsigned int id) { std::lock_guard<std::recursive_mutex> lock(m_mutex); check_id(id); _thread2dev_map[get_tid()]=id; } unsigned int device_count() { return _devs.size(); } unsigned int get_device_id(const sycl::device &dev) { unsigned int id = 0; for(auto dev_item : _devs) { if (*dev_item == dev) { break; } id++; } return id; } template <class DeviceSelector> std::enable_if_t< std::is_invocable_r_v<int, DeviceSelector, const sycl::device &>> select_device(const DeviceSelector &selector = sycl::gpu_selector_v) { sycl::device selected_device = sycl::device(selector); unsigned int selected_device_id = get_device_id(selected_device); select_device(selected_device_id); } /// Returns the instance of device manager singleton. static dev_mgr &instance() { static dev_mgr d_m; return d_m; } dev_mgr(const dev_mgr &) = delete; dev_mgr &operator=(const dev_mgr &) = delete; dev_mgr(dev_mgr &&) = delete; dev_mgr &operator=(dev_mgr &&) = delete; private: mutable std::recursive_mutex m_mutex; dev_mgr() { sycl::device default_device = sycl::device(sycl::default_selector_v); _devs.push_back(std::make_shared<device_ext>(default_device)); std::vector<sycl::device> sycl_all_devs = sycl::device::get_devices(sycl::info::device_type::all); // Collect other devices except for the default device. if (default_device.is_cpu()) _cpu_device = 0; for (auto &dev : sycl_all_devs) { if (dev == default_device) { continue; } _devs.push_back(std::make_shared<device_ext>(dev)); if (_cpu_device == -1 && dev.is_cpu()) { _cpu_device = _devs.size() - 1; } } } void check_id(unsigned int id) const { if (id >= _devs.size()) { throw std::runtime_error("invalid device id"); } } std::vector<std::shared_ptr<device_ext>> _devs; /// DEFAULT_DEVICE_ID is used, if current_device_id() can not find current /// thread id in _thread2dev_map, which means default device should be used /// for the current thread. const unsigned int DEFAULT_DEVICE_ID = 0; /// thread-id to device-id map. std::map<unsigned int, unsigned int> _thread2dev_map; int _cpu_device = -1; }; /// Util function to get the default queue of current selected device depends on /// the USM config. Return the default out-of-ordered queue when USM-none is /// enabled, otherwise return the default in-ordered queue. static inline sycl::queue &get_default_queue() { return dev_mgr::instance().current_device().default_queue(); } /// Util function to get the default in-ordered queue of current device in /// dpct device manager. static inline sycl::queue &get_in_order_queue() { return dev_mgr::instance().current_device().in_order_queue(); } /// Util function to get the default out-of-ordered queue of current device in /// dpct device manager. static inline sycl::queue &get_out_of_order_queue() { return dev_mgr::instance().current_device().out_of_order_queue(); } /// Util function to get the id of current device in /// dpct device manager. static inline unsigned int get_current_device_id() { return dev_mgr::instance().current_device_id(); } /// Util function to get the current device. static inline device_ext &get_current_device() { return dev_mgr::instance().current_device(); } /// Util function to get a device by id. static inline device_ext &get_device(unsigned int id) { return dev_mgr::instance().get_device(id); } /// Util function to get the context of the default queue of current /// device in dpct device manager. static inline sycl::context get_default_context() { return dpct::get_current_device().get_context(); } /// Util function to get a CPU device. static inline device_ext &cpu_device() { return dev_mgr::instance().cpu_device(); } static inline unsigned int select_device(unsigned int id) { dev_mgr::instance().select_device(id); return id; } template <class DeviceSelector> static inline std::enable_if_t< std::is_invocable_r_v<int, DeviceSelector, const sycl::device &>> select_device(const DeviceSelector &selector = sycl::gpu_selector_v) { dev_mgr::instance().select_device(selector); } static inline unsigned int get_device_id(const sycl::device &dev){ return dev_mgr::instance().get_device_id(dev); } /// Util function to check whether a device supports some kinds of sycl::aspect. inline void has_capability_or_fail(const sycl::device &dev, const std::initializer_list<sycl::aspect> &props) { for (const auto &it : props) { if (dev.has(it)) continue; switch (it) { case sycl::aspect::fp64: throw std::runtime_error("'double' is not supported in '" + dev.get_info<sycl::info::device::name>() + "' device"); break; case sycl::aspect::fp16: throw std::runtime_error("'half' is not supported in '" + dev.get_info<sycl::info::device::name>() + "' device"); break; default: #define __SYCL_ASPECT(ASPECT, ID) \ case sycl::aspect::ASPECT: \ return #ASPECT; #define __SYCL_ASPECT_DEPRECATED(ASPECT, ID, MESSAGE) __SYCL_ASPECT(ASPECT, ID) #define __SYCL_ASPECT_DEPRECATED_ALIAS(ASPECT, ID, MESSAGE) auto getAspectNameStr = [](sycl::aspect AspectNum) -> std::string { switch (AspectNum) { #include <sycl/info/aspects.def> #include <sycl/info/aspects_deprecated.def> default: return "unknown aspect"; } }; #undef __SYCL_ASPECT_DEPRECATED_ALIAS #undef __SYCL_ASPECT_DEPRECATED #undef __SYCL_ASPECT throw std::runtime_error( "'" + getAspectNameStr(it) + "' is not supported in '" + dev.get_info<sycl::info::device::name>() + "' device"); } break; } } } // namespace dpct #endif // __DPCT_DEVICE_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/memory.hpp
//==---- memory.hpp -------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_MEMORY_HPP__ #define __DPCT_MEMORY_HPP__ #include "device.hpp" #include <sycl/sycl.hpp> #include <cassert> #include <cstdint> #include <cstring> #include <mutex> #include <unordered_map> #include <map> #include <utility> #include <thread> #include <type_traits> #if defined(__linux__) #include <sys/mman.h> #elif defined(_WIN64) #define NOMINMAX #include <windows.h> #else #error "Only support Windows and Linux." #endif namespace dpct { enum memcpy_direction { host_to_host, host_to_device, device_to_host, device_to_device, automatic }; enum memory_region { global = 0, // device global memory constant, // device constant memory local, // device local memory shared, // memory which can be accessed by host and device }; typedef uint8_t byte_t; /// Buffer type to be used in Memory Management runtime. typedef sycl::buffer<byte_t> buffer_t; /// Pitched 2D/3D memory data. class pitched_data { public: pitched_data() : pitched_data(nullptr, 0, 0, 0) {} pitched_data(void *data, size_t pitch, size_t x, size_t y) : _data(data), _pitch(pitch), _x(x), _y(y) {} void *get_data_ptr() { return _data; } void set_data_ptr(void *data) { _data = data; } size_t get_pitch() { return _pitch; } void set_pitch(size_t pitch) { _pitch = pitch; } size_t get_x() { return _x; } void set_x(size_t x) { _x = x; }; size_t get_y() { return _y; } void set_y(size_t y) { _y = y; } private: void *_data; size_t _pitch, _x, _y; }; namespace detail { class mem_mgr { mem_mgr() { // Reserved address space, no real memory allocation happens here. #if defined(__linux__) mapped_address_space = (byte_t *)mmap(nullptr, mapped_region_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); #elif defined(_WIN64) mapped_address_space = (byte_t *)VirtualAlloc( NULL, // NULL specified as the base address parameter mapped_region_size, // Size of allocation MEM_RESERVE, // Allocate reserved pages PAGE_NOACCESS); // Protection = no access #else #error "Only support Windows and Linux." #endif next_free = mapped_address_space; }; public: using buffer_id_t = int; struct allocation { buffer_t buffer; byte_t *alloc_ptr; size_t size; }; ~mem_mgr() { #if defined(__linux__) munmap(mapped_address_space, mapped_region_size); #elif defined(_WIN64) VirtualFree(mapped_address_space, 0, MEM_RELEASE); #else #error "Only support Windows and Linux." #endif }; mem_mgr(const mem_mgr &) = delete; mem_mgr &operator=(const mem_mgr &) = delete; mem_mgr(mem_mgr &&) = delete; mem_mgr &operator=(mem_mgr &&) = delete; /// Allocate void *mem_alloc(size_t size) { if (!size) return nullptr; std::lock_guard<std::mutex> lock(m_mutex); if (next_free + size > mapped_address_space + mapped_region_size) { throw std::runtime_error("dpct_malloc: out of memory for virtual memory pool"); } // Allocation sycl::range<1> r(size); buffer_t buf(r); allocation A{buf, next_free, size}; // Map allocation to device pointer void *result = next_free; m_map.emplace(next_free + size, A); // Update pointer to the next free space. next_free += (size + extra_padding + alignment - 1) & ~(alignment - 1); return result; } /// Deallocate void mem_free(const void *ptr) { if (!ptr) return; std::lock_guard<std::mutex> lock(m_mutex); auto it = get_map_iterator(ptr); m_map.erase(it); } /// map: device pointer -> allocation(buffer, alloc_ptr, size) allocation translate_ptr(const void *ptr) { std::lock_guard<std::mutex> lock(m_mutex); auto it = get_map_iterator(ptr); return it->second; } /// Check if the pointer represents device pointer or not. bool is_device_ptr(const void *ptr) const { std::lock_guard<std::mutex> lock(m_mutex); return (mapped_address_space <= ptr) && (ptr < mapped_address_space + mapped_region_size); } /// Returns the instance of memory manager singleton. static mem_mgr &instance() { static mem_mgr m; return m; } private: std::map<byte_t *, allocation> m_map; mutable std::mutex m_mutex; byte_t *mapped_address_space; byte_t *next_free; const size_t mapped_region_size = 128ull * 1024 * 1024 * 1024; const size_t alignment = 256; /// This padding may be defined to some positive value to debug /// out of bound accesses. const size_t extra_padding = 0; std::map<byte_t *, allocation>::iterator get_map_iterator(const void *ptr) { auto it = m_map.upper_bound((byte_t *)ptr); if (it == m_map.end()) { // Not a virtual pointer. throw std::runtime_error("can not get buffer from non-virtual pointer"); } const allocation &alloc = it->second; if (ptr < alloc.alloc_ptr) { // Out of bound. // This may happen if there's a gap between allocations due to alignment // or extra padding and pointer points to this gap. throw std::runtime_error("invalid virtual pointer"); } return it; } }; template <class T, memory_region Memory, size_t Dimension> class accessor; template <memory_region Memory, class T = byte_t> class memory_traits { public: static constexpr sycl::access::target target = sycl::access::target::device; static constexpr sycl::access_mode mode = (Memory == constant) ? sycl::access_mode::read : sycl::access_mode::read_write; static constexpr size_t type_size = sizeof(T); using element_t = typename std::conditional<Memory == constant, const T, T>::type; using value_t = typename std::remove_cv<T>::type; template <size_t Dimension = 1> using accessor_t = typename std::conditional< Memory == local, sycl::local_accessor<value_t, Dimension>, sycl::accessor<T, Dimension, mode, target>>::type; using pointer_t = T *; }; static inline void *dpct_malloc(size_t size, sycl::queue &q) { #ifdef DPCT_USM_LEVEL_NONE return mem_mgr::instance().mem_alloc(size * sizeof(byte_t)); #else return sycl::malloc_device(size, q.get_device(), q.get_context()); #endif // DPCT_USM_LEVEL_NONE } #define PITCH_DEFAULT_ALIGN(x) (((x) + 31) & ~(0x1F)) static inline void *dpct_malloc(size_t &pitch, size_t x, size_t y, size_t z, sycl::queue &q) { pitch = PITCH_DEFAULT_ALIGN(x); return dpct_malloc(pitch * y * z, q); } /// Set \p value to the first \p size bytes starting from \p dev_ptr in \p q. /// /// \param q The queue in which the operation is done. /// \param dev_ptr Pointer to the device memory address. /// \param value Value to be set. /// \param size Number of bytes to be set to the value. /// \returns An event representing the memset operation. static inline sycl::event dpct_memset(sycl::queue &q, void *dev_ptr, int value, size_t size) { #ifdef DPCT_USM_LEVEL_NONE auto &mm = mem_mgr::instance(); assert(mm.is_device_ptr(dev_ptr)); auto alloc = mm.translate_ptr(dev_ptr); size_t offset = (byte_t *)dev_ptr - alloc.alloc_ptr; return q.submit([&](sycl::handler &cgh) { auto r = sycl::range<1>(size); auto o = sycl::id<1>(offset); sycl::accessor<byte_t, 1, sycl::access_mode::write, sycl::access::target::device> acc(alloc.buffer, cgh, r, o); cgh.fill(acc, (byte_t)value); }); #else return q.memset(dev_ptr, value, size); #endif // DPCT_USM_LEVEL_NONE } /// Set \p value to the 3D memory region pointed by \p data in \p q. \p size /// specifies the 3D memory size to set. /// /// \param q The queue in which the operation is done. /// \param data Pointer to the device memory region. /// \param value Value to be set. /// \param size Memory region size. /// \returns An event list representing the memset operations. static inline std::vector<sycl::event> dpct_memset(sycl::queue &q, pitched_data data, int value, sycl::range<3> size) { std::vector<sycl::event> event_list; size_t slice = data.get_pitch() * data.get_y(); unsigned char *data_surface = (unsigned char *)data.get_data_ptr(); for (size_t z = 0; z < size.get(2); ++z) { unsigned char *data_ptr = data_surface; for (size_t y = 0; y < size.get(1); ++y) { event_list.push_back(dpct_memset(q, data_ptr, value, size.get(0))); data_ptr += data.get_pitch(); } data_surface += slice; } return event_list; } /// memset 2D matrix with pitch. static inline std::vector<sycl::event> dpct_memset(sycl::queue &q, void *ptr, size_t pitch, int val, size_t x, size_t y) { return dpct_memset(q, pitched_data(ptr, pitch, x, 1), val, sycl::range<3>(x, y, 1)); } enum class pointer_access_attribute { host_only = 0, device_only, host_device, end }; static pointer_access_attribute get_pointer_attribute(sycl::queue &q, const void *ptr) { #ifdef DPCT_USM_LEVEL_NONE return mem_mgr::instance().is_device_ptr(ptr) ? pointer_access_attribute::device_only : pointer_access_attribute::host_only; #else switch (sycl::get_pointer_type(ptr, q.get_context())) { case sycl::usm::alloc::unknown: return pointer_access_attribute::host_only; case sycl::usm::alloc::device: return pointer_access_attribute::device_only; case sycl::usm::alloc::shared: case sycl::usm::alloc::host: return pointer_access_attribute::host_device; } #endif } static memcpy_direction deduce_memcpy_direction(sycl::queue &q, void *to_ptr, const void *from_ptr, memcpy_direction dir) { switch (dir) { case memcpy_direction::host_to_host: case memcpy_direction::host_to_device: case memcpy_direction::device_to_host: case memcpy_direction::device_to_device: return dir; case memcpy_direction::automatic: { // table[to_attribute][from_attribute] static const memcpy_direction direction_table[static_cast<unsigned>(pointer_access_attribute::end)] [static_cast<unsigned>(pointer_access_attribute::end)] = {{memcpy_direction::host_to_host, memcpy_direction::device_to_host, memcpy_direction::host_to_host}, {memcpy_direction::host_to_device, memcpy_direction::device_to_device, memcpy_direction::device_to_device}, {memcpy_direction::host_to_host, memcpy_direction::device_to_device, memcpy_direction::device_to_device}}; return direction_table[static_cast<unsigned>(get_pointer_attribute( q, to_ptr))][static_cast<unsigned>(get_pointer_attribute(q, from_ptr))]; } default: throw std::runtime_error("dpct_memcpy: invalid direction value"); } } static sycl::event dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, size_t size, memcpy_direction direction, const std::vector<sycl::event> &dep_events = {}) { if (!size) return sycl::event{}; #ifdef DPCT_USM_LEVEL_NONE auto &mm = mem_mgr::instance(); auto real_direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction); switch (real_direction) { case host_to_host: return q.submit([&](sycl::handler &cgh) { cgh.depends_on(dep_events); cgh.host_task([=] { std::memcpy(to_ptr, from_ptr, size); }); }); case host_to_device: { auto alloc = mm.translate_ptr(to_ptr); size_t offset = (byte_t *)to_ptr - alloc.alloc_ptr; return q.submit([&](sycl::handler &cgh) { cgh.depends_on(dep_events); auto r = sycl::range<1>(size); auto o = sycl::id<1>(offset); sycl::accessor<byte_t, 1, sycl::access_mode::write, sycl::access::target::device> acc(alloc.buffer, cgh, r, o); cgh.copy(from_ptr, acc); }); } case device_to_host: { auto alloc = mm.translate_ptr(from_ptr); size_t offset = (byte_t *)from_ptr - alloc.alloc_ptr; return q.submit([&](sycl::handler &cgh) { cgh.depends_on(dep_events); auto r = sycl::range<1>(size); auto o = sycl::id<1>(offset); sycl::accessor<byte_t, 1, sycl::access_mode::read, sycl::access::target::device> acc(alloc.buffer, cgh, r, o); cgh.copy(acc, to_ptr); }); } case device_to_device: { auto to_alloc = mm.translate_ptr(to_ptr); auto from_alloc = mm.translate_ptr(from_ptr); size_t to_offset = (byte_t *)to_ptr - to_alloc.alloc_ptr; size_t from_offset = (byte_t *)from_ptr - from_alloc.alloc_ptr; return q.submit([&](sycl::handler &cgh) { cgh.depends_on(dep_events); auto r = sycl::range<1>(size); auto to_o = sycl::id<1>(to_offset); auto from_o = sycl::id<1>(from_offset); sycl::accessor<byte_t, 1, sycl::access_mode::write, sycl::access::target::device> to_acc(to_alloc.buffer, cgh, r, to_o); sycl::accessor<byte_t, 1, sycl::access_mode::read, sycl::access::target::device> from_acc(from_alloc.buffer, cgh, r, from_o); cgh.copy(from_acc, to_acc); }); } default: throw std::runtime_error("dpct_memcpy: invalid direction value"); } #else return q.memcpy(to_ptr, from_ptr, size, dep_events); #endif // DPCT_USM_LEVEL_NONE } // Get actual copy range and make sure it will not exceed range. static inline size_t get_copy_range(sycl::range<3> size, size_t slice, size_t pitch) { return slice * (size.get(2) - 1) + pitch * (size.get(1) - 1) + size.get(0); } static inline size_t get_offset(sycl::id<3> id, size_t slice, size_t pitch) { return slice * id.get(2) + pitch * id.get(1) + id.get(0); } /// copy 3D matrix specified by \p size from 3D matrix specified by \p from_ptr /// and \p from_range to another specified by \p to_ptr and \p to_range. static inline std::vector<sycl::event> dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, sycl::range<3> to_range, sycl::range<3> from_range, sycl::id<3> to_id, sycl::id<3> from_id, sycl::range<3> size, memcpy_direction direction, const std::vector<sycl::event> &dep_events = {}) { // RAII for host pointer class host_buffer { void *_buf; size_t _size; sycl::queue &_q; const std::vector<sycl::event> &_deps; // free operation depends public: host_buffer(size_t size, sycl::queue &q, const std::vector<sycl::event> &deps) : _buf(std::malloc(size)), _size(size), _q(q), _deps(deps) {} void *get_ptr() const { return _buf; } size_t get_size() const { return _size; } ~host_buffer() { if (_buf) { _q.submit([&](sycl::handler &cgh) { cgh.depends_on(_deps); cgh.host_task([buf = _buf] { std::free(buf); }); }); } } }; std::vector<sycl::event> event_list; size_t to_slice = to_range.get(1) * to_range.get(0), from_slice = from_range.get(1) * from_range.get(0); unsigned char *to_surface = (unsigned char *)to_ptr + get_offset(to_id, to_slice, to_range.get(0)); const unsigned char *from_surface = (const unsigned char *)from_ptr + get_offset(from_id, from_slice, from_range.get(0)); if (to_slice == from_slice && to_slice == size.get(1) * size.get(0)) { return {dpct_memcpy(q, to_surface, from_surface, to_slice * size.get(2), direction, dep_events)}; } direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction); size_t size_slice = size.get(1) * size.get(0); switch (direction) { case host_to_host: for (size_t z = 0; z < size.get(2); ++z) { unsigned char *to_ptr = to_surface; const unsigned char *from_ptr = from_surface; if (to_range.get(0) == from_range.get(0) && to_range.get(0) == size.get(0)) { event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size_slice, direction, dep_events)); } else { for (size_t y = 0; y < size.get(1); ++y) { event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size.get(0), direction, dep_events)); to_ptr += to_range.get(0); from_ptr += from_range.get(0); } } to_surface += to_slice; from_surface += from_slice; } break; case host_to_device: { host_buffer buf(get_copy_range(size, to_slice, to_range.get(0)), q, event_list); std::vector<sycl::event> host_events; if (to_slice == size_slice) { // Copy host data to a temp host buffer with the shape of target. host_events = dpct_memcpy(q, buf.get_ptr(), from_surface, to_range, from_range, sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host, dep_events); } else { // Copy host data to a temp host buffer with the shape of target. host_events = dpct_memcpy( q, buf.get_ptr(), from_surface, to_range, from_range, sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host, // If has padding data, not sure whether it is useless. So fill temp // buffer with it. std::vector<sycl::event>{ dpct_memcpy(q, buf.get_ptr(), to_surface, buf.get_size(), device_to_host, dep_events)}); } // Copy from temp host buffer to device with only one submit. event_list.push_back(dpct_memcpy(q, to_surface, buf.get_ptr(), buf.get_size(), host_to_device, host_events)); break; } case device_to_host: { host_buffer buf(get_copy_range(size, from_slice, from_range.get(0)), q, event_list); // Copy from host temp buffer to host target with reshaping. event_list = dpct_memcpy( q, to_surface, buf.get_ptr(), to_range, from_range, sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host, // Copy from device to temp host buffer with only one submit. std::vector<sycl::event>{dpct_memcpy(q, buf.get_ptr(), from_surface, buf.get_size(), device_to_host, dep_events)}); break; } case device_to_device: #ifdef DPCT_USM_LEVEL_NONE { auto &mm = mem_mgr::instance(); auto to_alloc = mm.translate_ptr(to_surface); auto from_alloc = mm.translate_ptr(from_surface); size_t to_offset = (byte_t *)to_surface - to_alloc.alloc_ptr; size_t from_offset = (byte_t *)from_surface - from_alloc.alloc_ptr; event_list.push_back(q.submit([&](sycl::handler &cgh) { cgh.depends_on(dep_events); auto to_o = sycl::id<1>(to_offset); auto from_o = sycl::id<1>(from_offset); sycl::accessor<byte_t, 1, sycl::access_mode::write, sycl::access::target::device> to_acc(to_alloc.buffer, cgh, get_copy_range(size, to_slice, to_range.get(0)), to_o); sycl::accessor<byte_t, 1, sycl::access_mode::read, sycl::access::target::device> from_acc(from_alloc.buffer, cgh, get_copy_range(size, from_slice, from_range.get(0)), from_o); cgh.parallel_for<class dpct_memcpy_3d_detail_usmnone>( size, [=](sycl::id<3> id) { to_acc[get_offset(id, to_slice, to_range.get(0))] = from_acc[get_offset(id, from_slice, from_range.get(0))]; }); })); } #else event_list.push_back(q.submit([&](sycl::handler &cgh) { cgh.depends_on(dep_events); cgh.parallel_for<class dpct_memcpy_3d_detail>( size, [=](sycl::id<3> id) { to_surface[get_offset(id, to_slice, to_range.get(0))] = from_surface[get_offset(id, from_slice, from_range.get(0))]; }); })); #endif break; default: throw std::runtime_error("dpct_memcpy: invalid direction value"); } return event_list; } /// memcpy 2D/3D matrix specified by pitched_data. static inline std::vector<sycl::event> dpct_memcpy(sycl::queue &q, pitched_data to, sycl::id<3> to_id, pitched_data from, sycl::id<3> from_id, sycl::range<3> size, memcpy_direction direction = automatic) { return dpct_memcpy(q, to.get_data_ptr(), from.get_data_ptr(), sycl::range<3>(to.get_pitch(), to.get_y(), 1), sycl::range<3>(from.get_pitch(), from.get_y(), 1), to_id, from_id, size, direction); } /// memcpy 2D matrix with pitch. static inline std::vector<sycl::event> dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, size_t to_pitch, size_t from_pitch, size_t x, size_t y, memcpy_direction direction = automatic) { return dpct_memcpy(q, to_ptr, from_ptr, sycl::range<3>(to_pitch, y, 1), sycl::range<3>(from_pitch, y, 1), sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), sycl::range<3>(x, y, 1), direction); } namespace deprecated { template <typename T, sycl::usm::alloc AllocKind> class usm_allocator { private: using Alloc = sycl::usm_allocator<T, AllocKind>; Alloc _impl; public: using value_type = typename std::allocator_traits<Alloc>::value_type; using pointer = typename std::allocator_traits<Alloc>::pointer; using const_pointer = typename std::allocator_traits<Alloc>::const_pointer; using void_pointer = typename std::allocator_traits<Alloc>::void_pointer; using const_void_pointer = typename std::allocator_traits<Alloc>::const_void_pointer; using reference = typename std::allocator_traits<Alloc>::value_type &; using const_reference = const typename std::allocator_traits<Alloc>::value_type &; using difference_type = typename std::allocator_traits<Alloc>::difference_type; using size_type = typename std::allocator_traits<Alloc>::size_type; using propagate_on_container_copy_assignment = typename std::allocator_traits< Alloc>::propagate_on_container_copy_assignment; using propagate_on_container_move_assignment = typename std::allocator_traits< Alloc>::propagate_on_container_move_assignment; using propagate_on_container_swap = typename std::allocator_traits<Alloc>::propagate_on_container_swap; using is_always_equal = typename std::allocator_traits<Alloc>::is_always_equal; template <typename U> struct rebind { typedef usm_allocator<U, AllocKind> other; }; usm_allocator() : _impl(dpct::get_default_queue()) {} ~usm_allocator() {} usm_allocator(const usm_allocator &other) : _impl(other._impl) {} usm_allocator(usm_allocator &&other) : _impl(std::move(other._impl)) {} pointer address(reference r) { return &r; } const_pointer address(const_reference r) { return &r; } pointer allocate(size_type cnt, const_void_pointer hint = nullptr) { return std::allocator_traits<Alloc>::allocate(_impl, cnt, hint); } void deallocate(pointer p, size_type cnt) { std::allocator_traits<Alloc>::deallocate(_impl, p, cnt); } size_type max_size() const { return std::allocator_traits<Alloc>::max_size(_impl); } bool operator==(const usm_allocator &other) const { return _impl == other._impl; } bool operator!=(const usm_allocator &other) const { return _impl != other._impl; } }; } // namespace deprecated inline void dpct_free(void *ptr, const sycl::queue &q) { if (ptr) { #ifdef DPCT_USM_LEVEL_NONE detail::mem_mgr::instance().mem_free(ptr); #else sycl::free(ptr, q.get_context()); #endif // DPCT_USM_LEVEL_NONE } } } // namespace detail #ifdef DPCT_USM_LEVEL_NONE /// Check if the pointer \p ptr represents device pointer or not. /// /// \param ptr The pointer to be checked. /// \returns true if \p ptr is a device pointer. template<class T> static inline bool is_device_ptr(T ptr) { if constexpr (std::is_pointer<T>::value) { return detail::mem_mgr::instance().is_device_ptr(ptr); } return false; } #endif /// Get the buffer and the offset of a piece of memory pointed to by \p ptr. /// /// \param ptr Pointer to a piece of memory. /// If NULL is passed as an argument, an exception will be thrown. /// \returns a pair containing both the buffer and the offset. static std::pair<buffer_t, size_t> get_buffer_and_offset(const void *ptr) { if (ptr) { auto alloc = detail::mem_mgr::instance().translate_ptr(ptr); size_t offset = (byte_t *)ptr - alloc.alloc_ptr; return std::make_pair(alloc.buffer, offset); } else { throw std::runtime_error( "NULL pointer argument in get_buffer_and_offset function is invalid"); } } /// Get the data pointed from \p ptr as a 1D buffer reinterpreted as type T. template <typename T> static sycl::buffer<T> get_buffer(const void *ptr) { if (!ptr) return sycl::buffer<T>(sycl::range<1>(0)); auto alloc = detail::mem_mgr::instance().translate_ptr(ptr); return alloc.buffer.reinterpret<T>( sycl::range<1>(alloc.size / sizeof(T))); } /// Get the buffer of a piece of memory pointed to by \p ptr. /// /// \param ptr Pointer to a piece of memory. /// \returns the buffer. static buffer_t get_buffer(const void *ptr) { return detail::mem_mgr::instance().translate_ptr(ptr).buffer; } /// A wrapper class contains an accessor and an offset. template <typename dataT, sycl::access_mode accessMode = sycl::access_mode::read_write> class access_wrapper { sycl::accessor<byte_t, 1, accessMode> accessor; size_t offset; public: /// Construct the accessor wrapper for memory pointed by \p ptr. /// /// \param ptr Pointer to memory. /// \param cgh The command group handler. access_wrapper(const void *ptr, sycl::handler &cgh) : accessor(get_buffer(ptr).get_access<accessMode>(cgh)), offset(0) { auto alloc = detail::mem_mgr::instance().translate_ptr(ptr); offset = (byte_t *)ptr - alloc.alloc_ptr; } /// Get the device pointer. /// /// \returns a device pointer with offset. dataT get_raw_pointer() const { return (dataT)(&accessor[0] + offset); } }; /// Get the accessor for memory pointed by \p ptr. /// /// \param ptr Pointer to memory. /// If NULL is passed as an argument, an exception will be thrown. /// \param cgh The command group handler. /// \returns an accessor. template <sycl::access_mode accessMode = sycl::access_mode::read_write> static sycl::accessor<byte_t, 1, accessMode> get_access(const void *ptr, sycl::handler &cgh) { if (ptr) { auto alloc = detail::mem_mgr::instance().translate_ptr(ptr); return alloc.buffer.get_access<accessMode>(cgh); } else { throw std::runtime_error( "NULL pointer argument in get_access function is invalid"); } } /// Allocate memory block on the device. /// \param num_bytes Number of bytes to allocate. /// \param q Queue to execute the allocate task. /// \returns A pointer to the newly allocated memory. template <typename T> static inline void *dpct_malloc(T num_bytes, sycl::queue &q = get_default_queue()) { return detail::dpct_malloc(static_cast<size_t>(num_bytes), q); } /// Get the host pointer from a buffer that is mapped to virtual pointer ptr. /// \param ptr Virtual Pointer mapped to device buffer /// \returns A host pointer template <typename T> static inline T *get_host_ptr(const void *ptr) { auto BufferOffset = get_buffer_and_offset(ptr); auto host_ptr = BufferOffset.first.get_host_access() .get_pointer(); return (T *)(host_ptr + BufferOffset.second); } /// Allocate memory block for 3D array on the device. /// \param size Size of the memory block, in bytes. /// \param q Queue to execute the allocate task. /// \returns A pitched_data object which stores the memory info. static inline pitched_data dpct_malloc(sycl::range<3> size, sycl::queue &q = get_default_queue()) { pitched_data pitch(nullptr, 0, size.get(0), size.get(1)); size_t pitch_size; pitch.set_data_ptr(detail::dpct_malloc(pitch_size, size.get(0), size.get(1), size.get(2), q)); pitch.set_pitch(pitch_size); return pitch; } /// Allocate memory block for 2D array on the device. /// \param [out] pitch Aligned size of x in bytes. /// \param x Range in dim x. /// \param y Range in dim y. /// \param q Queue to execute the allocate task. /// \returns A pointer to the newly allocated memory. static inline void *dpct_malloc(size_t &pitch, size_t x, size_t y, sycl::queue &q = get_default_queue()) { return detail::dpct_malloc(pitch, x, y, 1, q); } /// free /// \param ptr Point to free. /// \param q Queue to execute the free task. /// \returns no return value. static inline void dpct_free(void *ptr, sycl::queue &q = get_default_queue()) { detail::dpct_free(ptr, q); } /// Free the device memory pointed by a batch of pointers in \p pointers which /// are related to \p q after \p events completed. /// /// \param pointers The pointers point to the device memory requested to be freed. /// \param events The events to be waited. /// \param q The sycl::queue the memory relates to. inline void async_dpct_free(const std::vector<void *> &pointers, const std::vector<sycl::event> &events, sycl::queue &q = get_default_queue()) { q.submit([&](sycl::handler &cgh) { cgh.depends_on(events); cgh.host_task([=] { for (auto p : pointers) if (p) { detail::dpct_free(p, q); } }); }); } /// Synchronously copies \p size bytes from the address specified by \p from_ptr /// to the address specified by \p to_ptr. The value of \p direction is used to /// set the copy direction, it can be \a host_to_host, \a host_to_device, /// \a device_to_host, \a device_to_device or \a automatic. The function will /// return after the copy is completed. /// /// \param to_ptr Pointer to destination memory address. /// \param from_ptr Pointer to source memory address. /// \param size Number of bytes to be copied. /// \param direction Direction of the copy. /// \param q Queue to execute the copy task. /// \returns no return value. static void dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size, memcpy_direction direction = automatic, sycl::queue &q = get_default_queue()) { detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction).wait(); } /// Asynchronously copies \p size bytes from the address specified by \p /// from_ptr to the address specified by \p to_ptr. The value of \p direction is /// used to set the copy direction, it can be \a host_to_host, \a /// host_to_device, \a device_to_host, \a device_to_device or \a automatic. The /// return of the function does NOT guarantee the copy is completed. /// /// \param to_ptr Pointer to destination memory address. /// \param from_ptr Pointer to source memory address. /// \param size Number of bytes to be copied. /// \param direction Direction of the copy. /// \param q Queue to execute the copy task. /// \returns no return value. static void async_dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size, memcpy_direction direction = automatic, sycl::queue &q = dpct::get_default_queue()) { detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction); } /// Synchronously copies 2D matrix specified by \p x and \p y from the address /// specified by \p from_ptr to the address specified by \p to_ptr, while \p /// from_pitch and \p to_pitch are the range of dim x in bytes of the matrix /// specified by \p from_ptr and \p to_ptr. The value of \p direction is used to /// set the copy direction, it can be \a host_to_host, \a host_to_device, \a /// device_to_host, \a device_to_device or \a automatic. The function will /// return after the copy is completed. /// /// \param to_ptr Pointer to destination memory address. /// \param to_pitch Range of dim x in bytes of destination matrix. /// \param from_ptr Pointer to source memory address. /// \param from_pitch Range of dim x in bytes of source matrix. /// \param x Range of dim x of matrix to be copied. /// \param y Range of dim y of matrix to be copied. /// \param direction Direction of the copy. /// \param q Queue to execute the copy task. /// \returns no return value. static inline void dpct_memcpy(void *to_ptr, size_t to_pitch, const void *from_ptr, size_t from_pitch, size_t x, size_t y, memcpy_direction direction = automatic, sycl::queue &q = dpct::get_default_queue()) { sycl::event::wait(detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch, from_pitch, x, y, direction)); } /// Asynchronously copies 2D matrix specified by \p x and \p y from the address /// specified by \p from_ptr to the address specified by \p to_ptr, while \p /// \p from_pitch and \p to_pitch are the range of dim x in bytes of the matrix /// specified by \p from_ptr and \p to_ptr. The value of \p direction is used to /// set the copy direction, it can be \a host_to_host, \a host_to_device, \a /// device_to_host, \a device_to_device or \a automatic. The return of the /// function does NOT guarantee the copy is completed. /// /// \param to_ptr Pointer to destination memory address. /// \param to_pitch Range of dim x in bytes of destination matrix. /// \param from_ptr Pointer to source memory address. /// \param from_pitch Range of dim x in bytes of source matrix. /// \param x Range of dim x of matrix to be copied. /// \param y Range of dim y of matrix to be copied. /// \param direction Direction of the copy. /// \param q Queue to execute the copy task. /// \returns no return value. static inline void async_dpct_memcpy(void *to_ptr, size_t to_pitch, const void *from_ptr, size_t from_pitch, size_t x, size_t y, memcpy_direction direction = automatic, sycl::queue &q = get_default_queue()) { detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch, from_pitch, x, y, direction); } /// Synchronously copies a subset of a 3D matrix specified by \p to to another /// 3D matrix specified by \p from. The from and to position info are specified /// by \p from_pos and \p to_pos The copied matrix size is specified by \p size. /// The value of \p direction is used to set the copy direction, it can be \a /// host_to_host, \a host_to_device, \a device_to_host, \a device_to_device or /// \a automatic. The function will return after the copy is completed. /// /// \param to Destination matrix info. /// \param to_pos Position of destination. /// \param from Source matrix info. /// \param from_pos Position of destination. /// \param size Range of the submatrix to be copied. /// \param direction Direction of the copy. /// \param q Queue to execute the copy task. /// \returns no return value. static inline void dpct_memcpy(pitched_data to, sycl::id<3> to_pos, pitched_data from, sycl::id<3> from_pos, sycl::range<3> size, memcpy_direction direction = automatic, sycl::queue &q = dpct::get_default_queue()) { sycl::event::wait( detail::dpct_memcpy(q, to, to_pos, from, from_pos, size, direction)); } /// Asynchronously copies a subset of a 3D matrix specified by \p to to another /// 3D matrix specified by \p from. The from and to position info are specified /// by \p from_pos and \p to_pos The copied matrix size is specified by \p size. /// The value of \p direction is used to set the copy direction, it can be \a /// host_to_host, \a host_to_device, \a device_to_host, \a device_to_device or /// \a automatic. The return of the function does NOT guarantee the copy is /// completed. /// /// \param to Destination matrix info. /// \param to_pos Position of destination. /// \param from Source matrix info. /// \param from_pos Position of destination. /// \param size Range of the submatrix to be copied. /// \param direction Direction of the copy. /// \param q Queue to execute the copy task. /// \returns no return value. static inline void async_dpct_memcpy(pitched_data to, sycl::id<3> to_pos, pitched_data from, sycl::id<3> from_pos, sycl::range<3> size, memcpy_direction direction = automatic, sycl::queue &q = get_default_queue()) { detail::dpct_memcpy(q, to, to_pos, from, from_pos, size, direction); } /// Synchronously sets \p value to the first \p size bytes starting from \p /// dev_ptr. The function will return after the memset operation is completed. /// /// \param dev_ptr Pointer to the device memory address. /// \param value Value to be set. /// \param size Number of bytes to be set to the value. /// \param q The queue in which the operation is done. /// \returns no return value. static void dpct_memset(void *dev_ptr, int value, size_t size, sycl::queue &q = get_default_queue()) { detail::dpct_memset(q, dev_ptr, value, size).wait(); } /// Asynchronously sets \p value to the first \p size bytes starting from \p /// dev_ptr. The return of the function does NOT guarantee the memset operation /// is completed. /// /// \param dev_ptr Pointer to the device memory address. /// \param value Value to be set. /// \param size Number of bytes to be set to the value. /// \returns no return value. static void async_dpct_memset(void *dev_ptr, int value, size_t size, sycl::queue &q = dpct::get_default_queue()) { detail::dpct_memset(q, dev_ptr, value, size); } /// Sets \p value to the 2D memory region pointed by \p ptr in \p q. \p x and /// \p y specify the setted 2D memory size. \p pitch is the bytes in linear /// dimension, including padding bytes. The function will return after the /// memset operation is completed. /// /// \param ptr Pointer to the device memory region. /// \param pitch Bytes in linear dimension, including padding bytes. /// \param value Value to be set. /// \param x The setted memory size in linear dimension. /// \param y The setted memory size in second dimension. /// \param q The queue in which the operation is done. /// \returns no return value. static inline void dpct_memset(void *ptr, size_t pitch, int val, size_t x, size_t y, sycl::queue &q = get_default_queue()) { sycl::event::wait(detail::dpct_memset(q, ptr, pitch, val, x, y)); } /// Sets \p value to the 2D memory region pointed by \p ptr in \p q. \p x and /// \p y specify the setted 2D memory size. \p pitch is the bytes in linear /// dimension, including padding bytes. The return of the function does NOT /// guarantee the memset operation is completed. /// /// \param ptr Pointer to the device memory region. /// \param pitch Bytes in linear dimension, including padding bytes. /// \param value Value to be set. /// \param x The setted memory size in linear dimension. /// \param y The setted memory size in second dimension. /// \param q The queue in which the operation is done. /// \returns no return value. static inline void async_dpct_memset(void *ptr, size_t pitch, int val, size_t x, size_t y, sycl::queue &q = get_default_queue()) { detail::dpct_memset(q, ptr, pitch, val, x, y); } /// Sets \p value to the 3D memory region specified by \p pitch in \p q. \p size /// specify the setted 3D memory size. The function will return after the /// memset operation is completed. /// /// \param pitch Specify the 3D memory region. /// \param value Value to be set. /// \param size The setted 3D memory size. /// \param q The queue in which the operation is done. /// \returns no return value. static inline void dpct_memset(pitched_data pitch, int val, sycl::range<3> size, sycl::queue &q = get_default_queue()) { sycl::event::wait(detail::dpct_memset(q, pitch, val, size)); } /// Sets \p value to the 3D memory region specified by \p pitch in \p q. \p size /// specify the setted 3D memory size. The return of the function does NOT /// guarantee the memset operation is completed. /// /// \param pitch Specify the 3D memory region. /// \param value Value to be set. /// \param size The setted 3D memory size. /// \param q The queue in which the operation is done. /// \returns no return value. static inline void async_dpct_memset(pitched_data pitch, int val, sycl::range<3> size, sycl::queue &q = get_default_queue()) { detail::dpct_memset(q, pitch, val, size); } /// dpct accessor used as device function parameter. template <class T, memory_region Memory, size_t Dimension> class accessor; template <class T, memory_region Memory> class accessor<T, Memory, 3> { public: using memory_t = detail::memory_traits<Memory, T>; using element_t = typename memory_t::element_t; using pointer_t = typename memory_t::pointer_t; using accessor_t = typename memory_t::template accessor_t<3>; accessor(pointer_t data, const sycl::range<3> &in_range) : _data(data), _range(in_range) {} template <memory_region M = Memory> accessor(typename std::enable_if<M != local, const accessor_t>::type &acc) : accessor(acc, acc.get_range()) {} accessor(const accessor_t &acc, const sycl::range<3> &in_range) : accessor(acc.get_pointer(), in_range) {} accessor<T, Memory, 2> operator[](size_t index) const { sycl::range<2> sub(_range.get(1), _range.get(2)); return accessor<T, Memory, 2>(_data + index * sub.size(), sub); } pointer_t get_ptr() const { return _data; } private: pointer_t _data; sycl::range<3> _range; }; template <class T, memory_region Memory> class accessor<T, Memory, 2> { public: using memory_t = detail::memory_traits<Memory, T>; using element_t = typename memory_t::element_t; using pointer_t = typename memory_t::pointer_t; using accessor_t = typename memory_t::template accessor_t<2>; accessor(pointer_t data, const sycl::range<2> &in_range) : _data(data), _range(in_range) {} template <memory_region M = Memory> accessor(typename std::enable_if<M != local, const accessor_t>::type &acc) : accessor(acc, acc.get_range()) {} accessor(const accessor_t &acc, const sycl::range<2> &in_range) : accessor(acc.get_pointer(), in_range) {} pointer_t operator[](size_t index) const { return _data + _range.get(1) * index; } pointer_t get_ptr() const { return _data; } private: pointer_t _data; sycl::range<2> _range; }; namespace detail { /// Device variable with address space of shared, global or constant. template <class T, memory_region Memory, size_t Dimension> class device_memory { public: using accessor_t = typename detail::memory_traits<Memory, T>::template accessor_t<Dimension>; using value_t = typename detail::memory_traits<Memory, T>::value_t; using dpct_accessor_t = dpct::accessor<T, Memory, Dimension>; device_memory() : device_memory(sycl::range<Dimension>(1)) {} /// Constructor of 1-D array with initializer list device_memory( const sycl::range<Dimension> &in_range, std::initializer_list<value_t> &&init_list) : device_memory(in_range) { assert(init_list.size() <= in_range.size()); _host_ptr = (value_t *)std::malloc(_size); std::memset(_host_ptr, 0, _size); std::memcpy(_host_ptr, init_list.begin(), init_list.size() * sizeof(T)); } /// Constructor of 2-D array with initializer list template <size_t D = Dimension> device_memory( const typename std::enable_if<D == 2, sycl::range<2>>::type &in_range, std::initializer_list<std::initializer_list<value_t>> &&init_list) : device_memory(in_range) { assert(init_list.size() <= in_range[0]); _host_ptr = (value_t *)std::malloc(_size); std::memset(_host_ptr, 0, _size); auto tmp_data = _host_ptr; for (auto sub_list : init_list) { assert(sub_list.size() <= in_range[1]); std::memcpy(tmp_data, sub_list.begin(), sub_list.size() * sizeof(T)); tmp_data += in_range[1]; } } /// Constructor with range device_memory(const sycl::range<Dimension> &range_in) : _size(range_in.size() * sizeof(T)), _range(range_in), _reference(false), _host_ptr(nullptr), _device_ptr(nullptr) { static_assert( (Memory == global) || (Memory == constant) || (Memory == shared), "device memory region should be global, constant or shared"); // Make sure that singleton class mem_mgr and dev_mgr will destruct later // than this. detail::mem_mgr::instance(); dev_mgr::instance(); } /// Constructor with range template <class... Args> device_memory(Args... Arguments) : device_memory(sycl::range<Dimension>(Arguments...)) {} ~device_memory() { if (_device_ptr && !_reference) dpct::dpct_free(_device_ptr); if (_host_ptr) std::free(_host_ptr); } /// Allocate memory with default queue, and init memory if has initial value. void init() { init(dpct::get_default_queue()); } /// Allocate memory with specified queue, and init memory if has initial value. void init(sycl::queue &q) { if (_device_ptr) return; if (!_size) return; allocate_device(q); if (_host_ptr) detail::dpct_memcpy(q, _device_ptr, _host_ptr, _size, host_to_device); } /// The variable is assigned to a device pointer. void assign(value_t *src, size_t size) { this->~device_memory(); new (this) device_memory(src, size); } /// Get memory pointer of the memory object, which is virtual pointer when /// usm is not used, and device pointer when usm is used. value_t *get_ptr() { return get_ptr(get_default_queue()); } /// Get memory pointer of the memory object, which is virtual pointer when /// usm is not used, and device pointer when usm is used. value_t *get_ptr(sycl::queue &q) { init(q); return _device_ptr; } /// Get the device memory object size in bytes. size_t get_size() { return _size; } template <size_t D = Dimension> typename std::enable_if<D == 1, T>::type &operator[](size_t index) { init(); #ifdef DPCT_USM_LEVEL_NONE return dpct::get_buffer<typename std::enable_if<D == 1, T>::type>( _device_ptr) .template get_access<sycl::access_mode::read_write>()[index]; #else return _device_ptr[index]; #endif // DPCT_USM_LEVEL_NONE } #ifdef DPCT_USM_LEVEL_NONE /// Get sycl::accessor for the device memory object when usm is not used. accessor_t get_access(sycl::handler &cgh) { return get_buffer(_device_ptr) .template reinterpret<T, Dimension>(_range) .template get_access<detail::memory_traits<Memory, T>::mode, detail::memory_traits<Memory, T>::target>(cgh); } #else /// Get dpct::accessor with dimension info for the device memory object /// when usm is used and dimension is greater than 1. template <size_t D = Dimension> typename std::enable_if<D != 1, dpct_accessor_t>::type get_access(sycl::handler &cgh) { return dpct_accessor_t((T *)_device_ptr, _range); } #endif // DPCT_USM_LEVEL_NONE private: device_memory(value_t *memory_ptr, size_t size) : _size(size), _range(size / sizeof(T)), _reference(true), _device_ptr(memory_ptr) {} void allocate_device(sycl::queue &q) { #ifndef DPCT_USM_LEVEL_NONE if (Memory == shared) { _device_ptr = (value_t *)sycl::malloc_shared( _size, q.get_device(), q.get_context()); return; } #endif _device_ptr = (value_t *)detail::dpct_malloc(_size, q); } size_t _size; sycl::range<Dimension> _range; bool _reference; value_t *_host_ptr; value_t *_device_ptr; }; template <class T, memory_region Memory> class device_memory<T, Memory, 0> : public device_memory<T, Memory, 1> { public: using base = device_memory<T, Memory, 1>; using value_t = typename base::value_t; using accessor_t = typename detail::memory_traits<Memory, T>::template accessor_t<0>; /// Constructor with initial value. device_memory(const value_t &val) : base(sycl::range<1>(1), {val}) {} /// Default constructor device_memory() : base(1) {} #ifdef DPCT_USM_LEVEL_NONE /// Get sycl::accessor for the device memory object when usm is not used. accessor_t get_access(sycl::handler &cgh) { auto buf = get_buffer(base::get_ptr()) .template reinterpret<T, 1>(sycl::range<1>(1)); return accessor_t(buf, cgh); } #endif // DPCT_USM_LEVEL_NONE }; } template <class T, size_t Dimension> using global_memory = detail::device_memory<T, global, Dimension>; template <class T, size_t Dimension> using constant_memory = detail::device_memory<T, constant, Dimension>; template <class T, size_t Dimension> using shared_memory = detail::device_memory<T, shared, Dimension>; // dpct::deprecated:: is for functionality that was introduced for compatibility // purpose, but relies on deprecated C++ features, which are either removed or // will be removed in the future standards. // Direct use of deprecated functionality in this namespace should be avoided. namespace deprecated { template <typename T> using usm_host_allocator = detail::deprecated::usm_allocator<T, sycl::usm::alloc::host>; template <typename T> using usm_device_allocator = detail::deprecated::usm_allocator<T, sycl::usm::alloc::shared>; } // namespace deprecated class pointer_attributes { public: void init(const void *ptr, sycl::queue &q = dpct::get_default_queue()) { #ifdef DPCT_USM_LEVEL_NONE throw std::runtime_error( "dpct::pointer_attributes: only works for USM pointer."); #else memory_type = sycl::get_pointer_type(ptr, q.get_context()); device_pointer = (memory_type != sycl::usm::alloc::unknown) ? ptr : nullptr; host_pointer = (memory_type != sycl::usm::alloc::unknown) && (memory_type != sycl::usm::alloc::device) ? ptr : nullptr; sycl::device device_obj = sycl::get_pointer_device(ptr, q.get_context()); device_id = dpct::dev_mgr::instance().get_device_id(device_obj); #endif } sycl::usm::alloc get_memory_type() { return memory_type; } const void *get_device_pointer() { return device_pointer; } const void *get_host_pointer() { return host_pointer; } bool is_memory_shared() { return memory_type == sycl::usm::alloc::shared; } unsigned int get_device_id() { return device_id; } private: sycl::usm::alloc memory_type = sycl::usm::alloc::unknown; const void *device_pointer = nullptr; const void *host_pointer = nullptr; unsigned int device_id = 0; }; } // namespace dpct #endif // __DPCT_MEMORY_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/dpl_utils.hpp
//==---- dpl_utils.hpp ----------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_DPL_UTILS_HPP__ #define __DPCT_DPL_UTILS_HPP__ #define ONEDPL_USE_DPCPP_BACKEND 1 #define __USE_DPCT 1 #include <oneapi/dpl/execution> #include <oneapi/dpl/algorithm> #include <oneapi/dpl/numeric> #include "dpl_extras/memory.h" #include "dpl_extras/algorithm.h" #include "dpl_extras/numeric.h" #include "dpl_extras/iterators.h" #include "dpl_extras/vector.h" #include "dpl_extras/dpcpp_extensions.h" #endif // __DPCT_DPL_UTILS_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/math.hpp
//==---- math.hpp ---------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_MATH_HPP__ #define __DPCT_MATH_HPP__ #include <sycl/sycl.hpp> namespace dpct { namespace detail { template <typename VecT, class BinaryOperation, class = void> class vectorized_binary { public: inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op) { VecT v4; for (size_t i = 0; i < v4.size(); ++i) { v4[i] = binary_op(a[i], b[i]); } return v4; } }; template <typename VecT, class BinaryOperation> class vectorized_binary< VecT, BinaryOperation, std::void_t<std::invoke_result_t<BinaryOperation, VecT, VecT>>> { public: inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op) { return binary_op(a, b).template as<VecT>(); } }; template <typename T> bool isnan(const T a) { return sycl::isnan(a); } // TODO: Need add more specialization such as bfloat16 version. } // namespace detail /// Compute fast_length for variable-length array /// \param [in] a The array /// \param [in] len Length of the array /// \returns The computed fast_length inline float fast_length(const float *a, int len) { switch (len) { case 1: return a[0]; case 2: return sycl::fast_length(sycl::float2(a[0], a[1])); case 3: return sycl::fast_length(sycl::float3(a[0], a[1], a[2])); case 4: return sycl::fast_length(sycl::float4(a[0], a[1], a[2], a[3])); case 0: return 0; default: float f = 0; for (int i = 0; i < len; ++i) f += a[i] * a[i]; return sycl::sqrt(f); } } /// Calculate the square root of the input array. /// \param [in] a The array pointer /// \param [in] len Length of the array /// \returns The square root template <typename T> inline T length(const T *a, const int len) { switch (len) { case 1: return a[0]; case 2: return sycl::length(sycl::vec<T, 2>(a[0], a[1])); case 3: return sycl::length(sycl::vec<T, 3>(a[0], a[1], a[2])); case 4: return sycl::length(sycl::vec<T, 4>(a[0], a[1], a[2], a[3])); default: T ret = 0; for (int i = 0; i < len; ++i) ret += a[i] * a[i]; return sycl::sqrt(ret); } } /// Performs comparison. /// \param [in] a The first value /// \param [in] b The second value /// \param [in] binary_op functor that implements the binary operation /// \returns the comparison result template <typename T, class BinaryOperation> inline std::enable_if_t< std::is_same_v<std::invoke_result_t<BinaryOperation, T, T>, bool>, bool> compare(const T a, const T b, const BinaryOperation binary_op) { return binary_op(a, b); } template <typename T> inline std::enable_if_t< std::is_same_v<std::invoke_result_t<std::not_equal_to<>, T, T>, bool>, bool> compare(const T a, const T b, const std::not_equal_to<> binary_op) { return !detail::isnan(a) && !detail::isnan(b) && binary_op(a, b); } /// Performs unordered comparison. /// \param [in] a The first value /// \param [in] b The second value /// \param [in] binary_op functor that implements the binary operation /// \returns the comparison result template <typename T, class BinaryOperation> inline std::enable_if_t< std::is_same_v<std::invoke_result_t<BinaryOperation, T, T>, bool>, bool> unordered_compare(const T a, const T b, const BinaryOperation binary_op) { return detail::isnan(a) || detail::isnan(b) || binary_op(a, b); } /// Performs 2 element comparison and return true if both results are true. /// \param [in] a The first value /// \param [in] b The second value /// \param [in] binary_op functor that implements the binary operation /// \returns the comparison result template <typename T, class BinaryOperation> inline std::enable_if_t<T::size() == 2, bool> compare_both(const T a, const T b, const BinaryOperation binary_op) { return compare(a[0], b[0], binary_op) && compare(a[1], b[1], binary_op); } /// Performs 2 element unordered comparison and return true if both results are /// true. /// \param [in] a The first value /// \param [in] b The second value /// \param [in] binary_op functor that implements the binary operation /// \returns the comparison result template <typename T, class BinaryOperation> inline std::enable_if_t<T::size() == 2, bool> unordered_compare_both(const T a, const T b, const BinaryOperation binary_op) { return unordered_compare(a[0], b[0], binary_op) && unordered_compare(a[1], b[1], binary_op); } /// Performs 2 element comparison. /// \param [in] a The first value /// \param [in] b The second value /// \param [in] binary_op functor that implements the binary operation /// \returns the comparison result template <typename T, class BinaryOperation> inline std::enable_if_t<T::size() == 2, T> compare(const T a, const T b, const BinaryOperation binary_op) { return {compare(a[0], b[0], binary_op), compare(a[1], b[1], binary_op)}; } /// Performs 2 elements comparison, compare result of each element is 0 (false) /// or 0xffff (true), returns an unsigned int by composing compare result of two /// elements. /// \param [in] a The first value /// \param [in] b The second value /// \param [in] binary_op functor that implements the binary operation /// \returns the comparison result template <typename T, class BinaryOperation> inline unsigned compare_mask(const sycl::vec<T, 2> a, const sycl::vec<T, 2> b, const BinaryOperation binary_op) { return sycl::vec<short, 2>(-compare(a[0], b[0], binary_op), -compare(a[1], b[1], binary_op)) .as<sycl::vec<unsigned, 1>>(); } /// Performs 2 element unordered comparison. /// \param [in] a The first value /// \param [in] b The second value /// \param [in] binary_op functor that implements the binary operation /// \returns the comparison result template <typename T, class BinaryOperation> inline std::enable_if_t<T::size() == 2, T> unordered_compare(const T a, const T b, const BinaryOperation binary_op) { return {unordered_compare(a[0], b[0], binary_op), unordered_compare(a[1], b[1], binary_op)}; } /// Performs 2 elements unordered comparison, compare result of each element is /// 0 (false) or 0xffff (true), returns an unsigned int by composing compare /// result of two elements. /// \param [in] a The first value /// \param [in] b The second value /// \param [in] binary_op functor that implements the binary operation /// \returns the comparison result template <typename T, class BinaryOperation> inline unsigned unordered_compare_mask(const sycl::vec<T, 2> a, const sycl::vec<T, 2> b, const BinaryOperation binary_op) { return sycl::vec<short, 2>(-unordered_compare(a[0], b[0], binary_op), -unordered_compare(a[1], b[1], binary_op)) .as<sycl::vec<unsigned, 1>>(); } /// Determine whether 2 element value is NaN. /// \param [in] a The input value /// \returns the comparison result template <typename T> inline std::enable_if_t<T::size() == 2, T> isnan(const T a) { return {detail::isnan(a[0]), detail::isnan(a[1])}; } // min function overloads. // For floating-point types, `float` or `double` arguments are acceptable. // For integer types, `std::uint32_t`, `std::int32_t`, `std::uint64_t` or // `std::int64_t` type arguments are acceptable. inline double min(const double a, const float b) { return sycl::fmin(a, static_cast<double>(b)); } inline double min(const float a, const double b) { return sycl::fmin(static_cast<double>(a), b); } inline float min(const float a, const float b) { return sycl::fmin(a, b); } inline double min(const double a, const double b) { return sycl::fmin(a, b); } inline std::uint32_t min(const std::uint32_t a, const std::int32_t b) { return sycl::min(a, static_cast<std::uint32_t>(b)); } inline std::uint32_t min(const std::int32_t a, const std::uint32_t b) { return sycl::min(static_cast<std::uint32_t>(a), b); } inline std::int32_t min(const std::int32_t a, const std::int32_t b) { return sycl::min(a, b); } inline std::uint32_t min(const std::uint32_t a, const std::uint32_t b) { return sycl::min(a, b); } inline std::uint64_t min(const std::uint64_t a, const std::int64_t b) { return sycl::min(a, static_cast<std::uint64_t>(b)); } inline std::uint64_t min(const std::int64_t a, const std::uint64_t b) { return sycl::min(static_cast<std::uint64_t>(a), b); } inline std::int64_t min(const std::int64_t a, const std::int64_t b) { return sycl::min(a, b); } inline std::uint64_t min(const std::uint64_t a, const std::uint64_t b) { return sycl::min(a, b); } inline std::uint64_t min(const std::uint64_t a, const std::int32_t b) { return sycl::min(a, static_cast<std::uint64_t>(b)); } inline std::uint64_t min(const std::int32_t a, const std::uint64_t b) { return sycl::min(static_cast<std::uint64_t>(a), b); } inline std::uint64_t min(const std::uint64_t a, const std::uint32_t b) { return sycl::min(a, static_cast<std::uint64_t>(b)); } inline std::uint64_t min(const std::uint32_t a, const std::uint64_t b) { return sycl::min(static_cast<std::uint64_t>(a), b); } // max function overloads. // For floating-point types, `float` or `double` arguments are acceptable. // For integer types, `std::uint32_t`, `std::int32_t`, `std::uint64_t` or // `std::int64_t` type arguments are acceptable. inline double max(const double a, const float b) { return sycl::fmax(a, static_cast<double>(b)); } inline double max(const float a, const double b) { return sycl::fmax(static_cast<double>(a), b); } inline float max(const float a, const float b) { return sycl::fmax(a, b); } inline double max(const double a, const double b) { return sycl::fmax(a, b); } inline std::uint32_t max(const std::uint32_t a, const std::int32_t b) { return sycl::max(a, static_cast<std::uint32_t>(b)); } inline std::uint32_t max(const std::int32_t a, const std::uint32_t b) { return sycl::max(static_cast<std::uint32_t>(a), b); } inline std::int32_t max(const std::int32_t a, const std::int32_t b) { return sycl::max(a, b); } inline std::uint32_t max(const std::uint32_t a, const std::uint32_t b) { return sycl::max(a, b); } inline std::uint64_t max(const std::uint64_t a, const std::int64_t b) { return sycl::max(a, static_cast<std::uint64_t>(b)); } inline std::uint64_t max(const std::int64_t a, const std::uint64_t b) { return sycl::max(static_cast<std::uint64_t>(a), b); } inline std::int64_t max(const std::int64_t a, const std::int64_t b) { return sycl::max(a, b); } inline std::uint64_t max(const std::uint64_t a, const std::uint64_t b) { return sycl::max(a, b); } inline std::uint64_t max(const std::uint64_t a, const std::int32_t b) { return sycl::max(a, static_cast<std::uint64_t>(b)); } inline std::uint64_t max(const std::int32_t a, const std::uint64_t b) { return sycl::max(static_cast<std::uint64_t>(a), b); } inline std::uint64_t max(const std::uint64_t a, const std::uint32_t b) { return sycl::max(a, static_cast<std::uint64_t>(b)); } inline std::uint64_t max(const std::uint32_t a, const std::uint64_t b) { return sycl::max(static_cast<std::uint64_t>(a), b); } /// Performs relu saturation. /// \param [in] a The input value /// \returns the relu saturation result template <typename T> inline T relu(const T a) { if (!detail::isnan(a) && a < 0.f) return 0.f; return a; } template <class T> inline sycl::vec<T, 2> relu(const sycl::vec<T, 2> a) { return {relu(a[0]), relu(a[1])}; } /// Performs complex number multiply addition. /// \param [in] a The first value /// \param [in] b The second value /// \param [in] c The third value /// \returns the operation result template <typename T> inline sycl::vec<T, 2> complex_mul_add(const sycl::vec<T, 2> a, const sycl::vec<T, 2> b, const sycl::vec<T, 2> c) { return sycl::vec<T, 2>{a[0] * b[0] - a[1] * b[1] + c[0], a[0] * b[1] + a[1] * b[0] + c[1]}; } /// Performs 2 elements comparison and returns the bigger one. If either of /// inputs is NaN, then return NaN. /// \param [in] a The first value /// \param [in] b The second value /// \returns the bigger value template <typename T> inline T fmax_nan(const T a, const T b) { if (detail::isnan(a) || detail::isnan(b)) return NAN; return sycl::fmax(a, b); } template <typename T> inline sycl::vec<T, 2> fmax_nan(const sycl::vec<T, 2> a, const sycl::vec<T, 2> b) { return {fmax_nan(a[0], b[0]), fmax_nan(a[1], b[1])}; } /// Performs 2 elements comparison and returns the smaller one. If either of /// inputs is NaN, then return NaN. /// \param [in] a The first value /// \param [in] b The second value /// \returns the smaller value template <typename T> inline T fmin_nan(const T a, const T b) { if (detail::isnan(a) || detail::isnan(b)) return NAN; return sycl::fmin(a, b); } template <typename T> inline sycl::vec<T, 2> fmin_nan(const sycl::vec<T, 2> a, const sycl::vec<T, 2> b) { return {fmin_nan(a[0], b[0]), fmin_nan(a[1], b[1])}; } /// A sycl::abs wrapper functors. struct abs { template <typename T> auto operator()(const T x) const { return sycl::abs(x); } }; /// A sycl::abs_diff wrapper functors. struct abs_diff { template <typename T> auto operator()(const T x, const T y) const { return sycl::abs_diff(x, y); } }; /// A sycl::add_sat wrapper functors. struct add_sat { template <typename T> auto operator()(const T x, const T y) const { return sycl::add_sat(x, y); } }; /// A sycl::rhadd wrapper functors. struct rhadd { template <typename T> auto operator()(const T x, const T y) const { return sycl::rhadd(x, y); } }; /// A sycl::hadd wrapper functors. struct hadd { template <typename T> auto operator()(const T x, const T y) const { return sycl::hadd(x, y); } }; /// A sycl::max wrapper functors. struct maximum { template <typename T> auto operator()(const T x, const T y) const { return sycl::max(x, y); } }; /// A sycl::min wrapper functors. struct minimum { template <typename T> auto operator()(const T x, const T y) const { return sycl::min(x, y); } }; /// A sycl::sub_sat wrapper functors. struct sub_sat { template <typename T> auto operator()(const T x, const T y) const { return sycl::sub_sat(x, y); } }; /// Compute vectorized binary operation value for two values, with each value /// treated as a vector type \p VecT. /// \tparam [in] VecT The type of the vector /// \tparam [in] BinaryOperation The binary operation class /// \param [in] a The first value /// \param [in] b The second value /// \returns The vectorized binary operation value of the two values template <typename VecT, class BinaryOperation> inline unsigned vectorized_binary(unsigned a, unsigned b, const BinaryOperation binary_op) { sycl::vec<unsigned, 1> v0{a}, v1{b}; auto v2 = v0.as<VecT>(); auto v3 = v1.as<VecT>(); auto v4 = detail::vectorized_binary<VecT, BinaryOperation>()(v2, v3, binary_op); v0 = v4.template as<sycl::vec<unsigned, 1>>(); return v0; } /// Compute vectorized isgreater for two values, with each value treated as a /// vector type \p S. /// \tparam [in] S The type of the vector /// \tparam [in] T The type of the original values /// \param [in] a The first value /// \param [in] b The second value /// \returns The vectorized greater than of the two values template <typename S, typename T> inline T vectorized_isgreater(T a, T b) { sycl::vec<T, 1> v0{a}, v1{b}; auto v2 = v0.template as<S>(); auto v3 = v1.template as<S>(); auto v4 = v2 > v3; v0 = v4.template as<sycl::vec<T, 1>>(); return v0; } /// Compute vectorized max for two values, with each value treated as a vector /// type \p S. /// \tparam [in] S The type of the vector /// \tparam [in] T The type of the original values /// \param [in] a The first value /// \param [in] b The second value /// \returns The vectorized max of the two values template <typename S, typename T> inline T vectorized_max(T a, T b) { sycl::vec<T, 1> v0{a}, v1{b}; auto v2 = v0.template as<S>(); auto v3 = v1.template as<S>(); auto v4 = sycl::max(v2, v3); v0 = v4.template as<sycl::vec<T, 1>>(); return v0; } /// Compute vectorized min for two values, with each value treated as a vector /// type \p S. /// \tparam [in] S The type of the vector /// \tparam [in] T The type of the original values /// \param [in] a The first value /// \param [in] b The second value /// \returns The vectorized min of the two values template <typename S, typename T> inline T vectorized_min(T a, T b) { sycl::vec<T, 1> v0{a}, v1{b}; auto v2 = v0.template as<S>(); auto v3 = v1.template as<S>(); auto v4 = sycl::min(v2, v3); v0 = v4.template as<sycl::vec<T, 1>>(); return v0; } /// Compute vectorized unary operation for a value, with the value treated as a /// vector type \p VecT. /// \tparam [in] VecT The type of the vector /// \tparam [in] UnaryOperation The unary operation class /// \param [in] a The input value /// \returns The vectorized unary operation value of the input value template <typename VecT, class UnaryOperation> inline unsigned vectorized_unary(unsigned a, const UnaryOperation unary_op) { sycl::vec<unsigned, 1> v0{a}; auto v1 = v0.as<VecT>(); auto v2 = unary_op(v1); v0 = v2.template as<sycl::vec<unsigned, 1>>(); return v0; } /// Compute vectorized absolute difference for two values without modulo /// overflow, with each value treated as a vector type \p VecT. /// \tparam [in] VecT The type of the vector /// \param [in] a The first value /// \param [in] b The second value /// \returns The vectorized absolute difference of the two values template <typename VecT> inline unsigned vectorized_sum_abs_diff(unsigned a, unsigned b) { sycl::vec<unsigned, 1> v0{a}, v1{b}; auto v2 = v0.as<VecT>(); auto v3 = v1.as<VecT>(); auto v4 = sycl::abs_diff(v2, v3); unsigned sum = 0; for (size_t i = 0; i < v4.size(); ++i) { sum += v4[i]; } return sum; } } // namespace dpct #endif // __DPCT_MATH_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/blas_utils.hpp
//==---- blas_utils.hpp----------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_BLAS_UTILS_HPP__ #define __DPCT_BLAS_UTILS_HPP__ #include "memory.hpp" #include "util.hpp" #include "lib_common_utils.hpp" #include <sycl/sycl.hpp> #include <oneapi/mkl.hpp> #include <utility> #include <vector> #include <thread> namespace dpct { /// Get the value of \p s. /// Copy the data to host synchronously, then return the data. /// \param [in] p The pointer points the data. /// \param [in] q The queue where the memory copy should be executed. template <typename T> inline auto get_value(const T *s, sycl::queue &q) { return detail::get_value(s, q); } namespace detail { inline void mem_free(sycl::queue *exec_queue, std::vector<void *> pointers_array, sycl::event e) { e.wait(); for (auto p : pointers_array) sycl::free(p, *exec_queue); } inline int stride_for(int num_elems, int mem_align_in_elems) { return ((num_elems - 1) / mem_align_in_elems + 1) * mem_align_in_elems; } #ifndef DPCT_USM_LEVEL_NONE template<typename T> class working_memory { T *_input_ptr; T *_temp_ptr; bool _is_sycl_malloced = false; bool _is_scalar_value = false; sycl::queue _q; sycl::event _e; public: working_memory(size_t size, sycl::queue q) : _q(q) { _is_scalar_value = false; _temp_ptr = (T *)sycl::malloc_device(size, q); } working_memory(T *result_ptr, sycl::queue q) : _input_ptr(result_ptr), _q(q) { _is_scalar_value = true; _is_sycl_malloced = sycl::get_pointer_type(_input_ptr, _q.get_context()) != sycl::usm::alloc::unknown; if (!_is_sycl_malloced) _temp_ptr = sycl::malloc_shared<T>(1, _q); } auto get_ptr() { if (_is_scalar_value && _is_sycl_malloced) return _input_ptr; return _temp_ptr; } void set_event(sycl::event e) { _e = e; } ~working_memory() { if (_is_scalar_value) { if (!_is_sycl_malloced) { _q.memcpy(_input_ptr, _temp_ptr, sizeof(T)).wait(); sycl::free(_temp_ptr, _q); } } else { std::vector<void *> ptrs{_temp_ptr}; dpct::async_dpct_free(ptrs, {_e}); } } }; #endif template <typename Tx, typename Tr> inline void nrm2_impl(sycl::queue &q, int n, const void *x, int incx, void *result) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else #ifdef DPCT_USM_LEVEL_NONE auto x_buffer = dpct::get_buffer<Tx>(x); auto r_buffer = sycl::buffer<Tr, 1>(reinterpret_cast<Tr *>(result), sycl::range<1>(1)); if (dpct::is_device_ptr(result)) r_buffer = dpct::get_buffer<Tr>(result); oneapi::mkl::blas::column_major::nrm2(q, n, x_buffer, incx, r_buffer); #else working_memory<Tr> res_mem(reinterpret_cast<Tr *>(result), q); oneapi::mkl::blas::column_major::nrm2(q, n, reinterpret_cast<const Tx *>(x), incx, res_mem.get_ptr()); #endif #endif } template <bool is_conjugate, class Txy, class Tr> inline void dotuc_impl(sycl::queue &q, int n, const Txy *x, int incx, const Txy *y, int incy, Tr *result) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else #ifdef DPCT_USM_LEVEL_NONE auto x_buffer = dpct::get_buffer<Txy>(x); auto y_buffer = dpct::get_buffer<Txy>(y); auto r_buffer = sycl::buffer<Tr, 1>((Tr *)result, sycl::range<1>(1)); if (dpct::is_device_ptr(result)) r_buffer = dpct::get_buffer<Tr>(result); if constexpr (std::is_same_v<Txy, std::complex<float>> || std::is_same_v<Txy, std::complex<double>>) { if constexpr (is_conjugate) oneapi::mkl::blas::column_major::dotc(q, n, x_buffer, incx, y_buffer, incy, r_buffer); else oneapi::mkl::blas::column_major::dotu(q, n, x_buffer, incx, y_buffer, incy, r_buffer); } else oneapi::mkl::blas::column_major::dot(q, n, x_buffer, incx, y_buffer, incy, r_buffer); #else working_memory<Tr> res_mem(result, q); if constexpr (std::is_same_v<Txy, std::complex<float>> || std::is_same_v<Txy, std::complex<double>>) { if constexpr (is_conjugate) oneapi::mkl::blas::column_major::dotc(q, n, x, incx, y, incy, res_mem.get_ptr()); else oneapi::mkl::blas::column_major::dotu(q, n, x, incx, y, incy, res_mem.get_ptr()); } else oneapi::mkl::blas::column_major::dot(q, n, x, incx, y, incy, res_mem.get_ptr()); #endif #endif } template <bool is_conjugate> inline void dotuc(sycl::queue &q, int n, const void *x, library_data_t x_type, int incx, const void *y, library_data_t y_type, int incy, void *result, library_data_t result_type) { std::uint64_t key = detail::get_type_combination_id(x_type, y_type, result_type); switch (key) { case detail::get_type_combination_id(library_data_t::real_float, library_data_t::real_float, library_data_t::real_float): { detail::dotuc_impl<is_conjugate>( q, n, reinterpret_cast<const float *>(x), incx, reinterpret_cast<const float *>(y), incy, reinterpret_cast<float *>(result)); break; } case detail::get_type_combination_id(library_data_t::real_double, library_data_t::real_double, library_data_t::real_double): { detail::dotuc_impl<is_conjugate>( q, n, reinterpret_cast<const double *>(x), incx, reinterpret_cast<const double *>(y), incy, reinterpret_cast<double *>(result)); break; } case detail::get_type_combination_id(library_data_t::complex_float, library_data_t::complex_float, library_data_t::complex_float): { detail::dotuc_impl<is_conjugate>( q, n, reinterpret_cast<const std::complex<float> *>(x), incx, reinterpret_cast<const std::complex<float> *>(y), incy, reinterpret_cast<std::complex<float> *>(result)); break; } case detail::get_type_combination_id(library_data_t::complex_double, library_data_t::complex_double, library_data_t::complex_double): { detail::dotuc_impl<is_conjugate>( q, n, reinterpret_cast<const std::complex<double> *>(x), incx, reinterpret_cast<const std::complex<double> *>(y), incy, reinterpret_cast<std::complex<double> *>(result)); break; } case detail::get_type_combination_id(library_data_t::real_half, library_data_t::real_half, library_data_t::real_half): { detail::dotuc_impl<is_conjugate>( q, n, reinterpret_cast<const sycl::half *>(x), incx, reinterpret_cast<const sycl::half *>(y), incy, reinterpret_cast<sycl::half *>(result)); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } } template <class Tx, class Te> inline void scal_impl(sycl::queue &q, int n, const void *alpha, void *x, int incx) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else Te alpha_val = dpct::get_value(reinterpret_cast<const Te *>(alpha), q); auto data_x = get_memory(reinterpret_cast<Tx *>(x)); oneapi::mkl::blas::column_major::scal(q, n, alpha_val, data_x, incx); #endif } template <class Txy, class Te> inline void axpy_impl(sycl::queue &q, int n, const void *alpha, const void *x, int incx, void *y, int incy) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else Te alpha_val = dpct::get_value(reinterpret_cast<const Te *>(alpha), q); auto data_x = get_memory(reinterpret_cast<const Txy *>(x)); auto data_y = get_memory(reinterpret_cast<Txy *>(y)); oneapi::mkl::blas::column_major::axpy(q, n, alpha_val, data_x, incx, data_y, incy); #endif } template <class Txy, class Tc, class Ts> inline void rot_impl(sycl::queue &q, int n, void *x, int incx, void *y, int incy, const void *c, const void *s) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else Tc c_value = dpct::get_value(reinterpret_cast<const Tc *>(c), q); Ts s_value = dpct::get_value(reinterpret_cast<const Ts *>(s), q); auto data_x = get_memory(reinterpret_cast<Txy *>(x)); auto data_y = get_memory(reinterpret_cast<Txy *>(y)); oneapi::mkl::blas::column_major::rot(q, n, data_x, incx, data_y, incy, c_value, s_value); #endif } template <class Ta, class Tb, class Tc, class Ts> inline void gemm_impl(sycl::queue &q, oneapi::mkl::transpose a_trans, oneapi::mkl::transpose b_trans, int m, int n, int k, const void *alpha, const void *a, int lda, const void *b, int ldb, const void *beta, void *c, int ldc) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q); Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q); auto data_a = get_memory(reinterpret_cast<const Ta *>(a)); auto data_b = get_memory(reinterpret_cast<const Tb *>(b)); auto data_c = get_memory(reinterpret_cast<Tc *>(c)); oneapi::mkl::blas::column_major::gemm( q, a_trans, b_trans, m, n, k, alpha_value, data_a, lda, data_b, ldb, beta_value, data_c, ldc); #endif } template <class Ta, class Tb, class Tc, class Ts> inline void gemm_batch_impl(sycl::queue &q, oneapi::mkl::transpose a_trans, oneapi::mkl::transpose b_trans, int m, int n, int k, const void *alpha, const void **a, int lda, const void **b, int ldb, const void *beta, void **c, int ldc, int batch_size) { struct matrix_info_t { oneapi::mkl::transpose transpose_info[2]; Ts value_info[2]; std::int64_t size_info[3]; std::int64_t ld_info[3]; std::int64_t groupsize_info; }; Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q); Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q); matrix_info_t *matrix_info = (matrix_info_t *)std::malloc(sizeof(matrix_info_t)); matrix_info->transpose_info[0] = a_trans; matrix_info->transpose_info[1] = b_trans; matrix_info->value_info[0] = alpha_value; matrix_info->value_info[1] = beta_value; matrix_info->size_info[0] = m; matrix_info->size_info[1] = n; matrix_info->size_info[2] = k; matrix_info->ld_info[0] = lda; matrix_info->ld_info[1] = ldb; matrix_info->ld_info[2] = ldc; matrix_info->groupsize_info = batch_size; sycl::event e = oneapi::mkl::blas::column_major::gemm_batch( q, matrix_info->transpose_info, matrix_info->transpose_info + 1, matrix_info->size_info, matrix_info->size_info + 1, matrix_info->size_info + 2, matrix_info->value_info, reinterpret_cast<const Ta **>(a), matrix_info->ld_info, reinterpret_cast<const Tb **>(b), matrix_info->ld_info + 1, matrix_info->value_info + 1, reinterpret_cast<Tc **>(c), matrix_info->ld_info + 2, 1, &(matrix_info->groupsize_info)); q.submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { std::free(matrix_info); }); }); } template <class Ta, class Tb, class Tc, class Ts> inline void gemm_batch_impl(sycl::queue &q, oneapi::mkl::transpose a_trans, oneapi::mkl::transpose b_trans, int m, int n, int k, const void *alpha, const void *a, int lda, long long int stride_a, const void *b, int ldb, long long int stride_b, const void *beta, void *c, int ldc, long long int stride_c, int batch_size) { Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q); Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q); auto data_a = get_memory(reinterpret_cast<const Ta *>(a)); auto data_b = get_memory(reinterpret_cast<const Tb *>(b)); auto data_c = get_memory(reinterpret_cast<Tc *>(c)); oneapi::mkl::blas::column_major::gemm_batch( q, a_trans, b_trans, m, n, k, alpha_value, data_a, lda, stride_a, data_b, ldb, stride_b, beta_value, data_c, ldc, stride_c, batch_size); } template <bool is_hermitian, class T, class Tbeta> inline void rk_impl(sycl::queue &q, oneapi::mkl::uplo uplo, oneapi::mkl::transpose trans, int n, int k, const T *alpha, const T *a, int lda, const T *b, int ldb, const Tbeta *beta, T *c, int ldc) { // For symmetric matrix, this function performs: C = alpha*OP(A)*(OP(B))^T + beta*C // For Hermitian matrix, this function performs: C = alpha*OP(A)*(OP(B))^H + beta*C // The gemmt() function performs: C = alpha*OPA(A)*OPB(B) + beta*C // So the OPB need be updated before we call gemmt(). using Ty = typename dpct::DataType<T>::T2; using Ts = typename dpct::DataType<Tbeta>::T2; Ty alpha_value = dpct::get_value(reinterpret_cast<const Ty *>(alpha), q); Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q); oneapi::mkl::transpose trans_A = trans, trans_B = trans; int origin_b_rows = trans == oneapi::mkl::transpose::nontrans ? n : k; int origin_b_cols = trans == oneapi::mkl::transpose::nontrans ? k : n; if ((is_hermitian && trans == oneapi::mkl::transpose::trans) || (!is_hermitian && !std::is_floating_point_v<Ty> && trans == oneapi::mkl::transpose::conjtrans)) { // In this case, OPB need be a conjugate operation, // but only notrans, conjtrans and trans are available. // So we need do a conjtrans operation first, then do a trans operation. trans_B = oneapi::mkl::transpose::trans; auto data_a = get_memory(reinterpret_cast<const Ty *>(a)); auto data_c = get_memory(reinterpret_cast<Ty *>(c)); #ifdef DPCT_USM_LEVEL_NONE auto new_B_buffer = sycl::buffer<Ty, 1>(sycl::range<1>(origin_b_rows * origin_b_cols)); auto from_buffer = dpct::get_buffer<Ty>(b); oneapi::mkl::blas::column_major::omatcopy_batch( q, oneapi::mkl::transpose::conjtrans, origin_b_rows, origin_b_cols, Ts(1.0), from_buffer, ldb, origin_b_rows * ldb, new_B_buffer, origin_b_cols, origin_b_rows * origin_b_cols, 1); oneapi::mkl::blas::column_major::gemmt( q, uplo, trans_A, trans_B, n, k, alpha_value, data_a, lda, new_B_buffer, origin_b_cols, beta_value, data_c, ldc); #else working_memory<T> new_B(origin_b_rows * origin_b_cols * sizeof(T), q); oneapi::mkl::blas::column_major::omatcopy_batch( q, oneapi::mkl::transpose::conjtrans, origin_b_rows, origin_b_cols, Ts(1.0), reinterpret_cast<const Ty *>(b), ldb, origin_b_rows * ldb, reinterpret_cast<Ty *>(new_B.get_ptr()), origin_b_cols, origin_b_rows * origin_b_cols, 1); sycl::event e = oneapi::mkl::blas::column_major::gemmt( q, uplo, trans_A, trans_B, n, k, alpha_value, data_a, lda, reinterpret_cast<Ty *>(new_B.get_ptr()), origin_b_cols, beta_value, data_c, ldc); new_B.set_event(e); #endif } else { if constexpr (is_hermitian) { trans_B = trans == oneapi::mkl::transpose::nontrans ? oneapi::mkl::transpose::conjtrans : oneapi::mkl::transpose::nontrans; } else { trans_B = trans == oneapi::mkl::transpose::nontrans ? oneapi::mkl::transpose::trans : oneapi::mkl::transpose::nontrans; } auto data_a = get_memory(reinterpret_cast<const Ty *>(a)); auto data_b = get_memory(reinterpret_cast<const Ty *>(b)); auto data_c = get_memory(reinterpret_cast<Ty *>(c)); oneapi::mkl::blas::column_major::gemmt( q, uplo, trans_A, trans_B, n, k, alpha_value, data_a, lda, data_b, ldb, beta_value, data_c, ldc); } } template <class Ta, class Tb, class Ts> inline void trsm_batch_impl(sycl::queue &q, oneapi::mkl::side left_right, oneapi::mkl::uplo upper_lower, oneapi::mkl::transpose trans, oneapi::mkl::diag unit_diag, int m, int n, const void *alpha, const void **a, int lda, void **b, int ldb, int batch_size) { struct matrix_info_t { matrix_info_t(oneapi::mkl::side side_info, oneapi::mkl::uplo uplo_info, oneapi::mkl::transpose transpose_info, oneapi::mkl::diag diag_info, Ts value_info, std::int64_t m, std::int64_t n, std::int64_t lda, std::int64_t ldb, std::int64_t groupsize_info) : side_info(side_info), uplo_info(uplo_info), transpose_info(transpose_info), diag_info(diag_info), value_info(value_info), groupsize_info(groupsize_info) { size_info[0] = m; size_info[1] = n; ld_info[0] = lda; ld_info[1] = ldb; } oneapi::mkl::side side_info; oneapi::mkl::uplo uplo_info; oneapi::mkl::transpose transpose_info; oneapi::mkl::diag diag_info; Ts value_info; std::int64_t size_info[2]; std::int64_t ld_info[2]; std::int64_t groupsize_info; }; Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q); matrix_info_t *matrix_info = new matrix_info_t(left_right, upper_lower, trans, unit_diag, alpha_value, m, n, lda, ldb, batch_size); sycl::event e = oneapi::mkl::blas::column_major::trsm_batch( q, &(matrix_info->side_info), &(matrix_info->uplo_info), &(matrix_info->transpose_info), &(matrix_info->diag_info), matrix_info->size_info, matrix_info->size_info + 1, &(matrix_info->value_info), reinterpret_cast<const Ta **>(a), matrix_info->ld_info, reinterpret_cast<Tb **>(b), matrix_info->ld_info + 1, 1, &(matrix_info->groupsize_info)); q.submit([&](sycl::handler &cgh) { cgh.depends_on(e); cgh.host_task([=] { delete matrix_info; }); }); } template <typename T> inline void getrfnp_batch_wrapper(sycl::queue &exec_queue, int n, T *a[], int lda, int *info, int batch_size) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces " "Project does not support this API."); #else using Ty = typename DataType<T>::T2; // Set the info array value to 0 detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size); std::int64_t stride_a = n * lda; std::int64_t scratchpad_size = oneapi::mkl::lapack::getrfnp_batch_scratchpad_size<Ty>( exec_queue, n, n, lda, stride_a, batch_size); Ty *a_strided_mem = (Ty *)dpct::dpct_malloc(stride_a * batch_size * sizeof(Ty), exec_queue); T **host_a = (T **)malloc(batch_size * sizeof(T *)); dpct::dpct_memcpy(host_a, a, batch_size * sizeof(T *)); for (std::int64_t i = 0; i < batch_size; ++i) dpct::dpct_memcpy(a_strided_mem + i * stride_a, host_a[i], n * lda * sizeof(T)); #ifdef DPCT_USM_LEVEL_NONE { sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)}; auto a_buffer = get_buffer<Ty>(a_strided_mem); oneapi::mkl::lapack::getrfnp_batch(exec_queue, n, n, a_buffer, lda, stride_a, batch_size, scratchpad, scratchpad_size); } std::vector<sycl::event> events; for (std::int64_t i = 0; i < batch_size; ++i) events.push_back(detail::dpct_memcpy(exec_queue, host_a[i], a_strided_mem + i * stride_a, n * lda * sizeof(T), automatic)); #else Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue); sycl::event e = oneapi::mkl::lapack::getrfnp_batch( exec_queue, n, n, a_strided_mem, lda, stride_a, batch_size, scratchpad, scratchpad_size); std::vector<sycl::event> events; for (std::int64_t i = 0; i < batch_size; ++i) events.push_back(detail::dpct_memcpy(exec_queue, host_a[i], a_strided_mem + i * stride_a, n * lda * sizeof(T), automatic, {e})); std::vector<void *> ptrs{scratchpad, a_strided_mem}; dpct::async_dpct_free(ptrs, events, exec_queue); #endif exec_queue.submit([&](sycl::handler &cgh) { cgh.depends_on(events); cgh.host_task([=] { free(host_a); }); }); #endif } } // namespace detail inline oneapi::mkl::transpose get_transpose(int t) { if (t == 0) { return oneapi::mkl::transpose::nontrans; } else if (t == 1) { return oneapi::mkl::transpose::trans; } else { return oneapi::mkl::transpose::conjtrans; } } /// Computes the LU factorizations of a batch of general matrices. /// \param [in] exec_queue The queue where the routine should be executed. /// \param [in] n The order of the matrices. /// \param [in, out] a Array of pointers to matrices. These matrices will be /// overwritten by lower triangulars with unit diagonal elements and upper /// triangulars. /// \param [in] lda The leading dimension of the matrices. /// \param [out] ipiv An array stores the pivot indices. If \p ipiv is nullptr, /// non-pivoting LU factorization is computed. /// \param [out] info An array stores the error information. /// \param [in] batch_size The size of the batch. template <typename T> inline void getrf_batch_wrapper(sycl::queue &exec_queue, int n, T *a[], int lda, int *ipiv, int *info, int batch_size) { if (ipiv == nullptr) { detail::getrfnp_batch_wrapper(exec_queue, n, a, lda, info, batch_size); return; } using Ty = typename DataType<T>::T2; // Set the info array value to 0 detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size); #ifdef DPCT_USM_LEVEL_NONE std::int64_t stride_a = n * lda; std::int64_t stride_ipiv = n; std::int64_t scratchpad_size = oneapi::mkl::lapack::getrf_batch_scratchpad_size<Ty>( exec_queue, n, n, lda, stride_a, stride_ipiv, batch_size); T *a_buffer_ptr; a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T)); T **host_a = (T **)malloc(batch_size * sizeof(T *)); dpct_memcpy(host_a, a, batch_size * sizeof(T *)); for (std::int64_t i = 0; i < batch_size; ++i) dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T)); { sycl::buffer<std::int64_t, 1> ipiv_buf( sycl::range<1>(batch_size * stride_ipiv)); sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)}; auto a_buffer = get_buffer<Ty>(a_buffer_ptr); oneapi::mkl::lapack::getrf_batch(exec_queue, n, n, a_buffer, lda, stride_a, ipiv_buf, stride_ipiv, batch_size, scratchpad, scratchpad_size); auto to_buffer = get_buffer<int>(ipiv); exec_queue.submit([&](sycl::handler &cgh) { auto from_acc = ipiv_buf.get_access<sycl::access_mode::read>(cgh); auto to_acc = to_buffer.get_access<sycl::access_mode::write>(cgh); cgh.parallel_for<dpct_kernel_name<class getrf_device_int64_to_int, T>>( sycl::range<2>(batch_size, n), [=](sycl::id<2> id) { to_acc[id.get(0) * n + id.get(1)] = static_cast<int>(from_acc[id.get(0) * stride_ipiv + id.get(1)]); }); }); } // Copy back to the original buffers std::vector<sycl::event> events; for (std::int64_t i = 0; i < batch_size; ++i) events.push_back(detail::dpct_memcpy(exec_queue, host_a[i], a_buffer_ptr + i * stride_a, n * lda * sizeof(T), automatic)); std::vector<void *> ptrs{host_a}; std::thread mem_free_thread( [=](std::vector<void *> pointers_array, std::vector<sycl::event> events_array) { sycl::event::wait(events_array); for (auto p : pointers_array) free(p); }, ptrs, events); mem_free_thread.detach(); #else std::int64_t m_int64 = n; std::int64_t n_int64 = n; std::int64_t lda_int64 = lda; std::int64_t group_sizes = batch_size; std::int64_t scratchpad_size = oneapi::mkl::lapack::getrf_batch_scratchpad_size<Ty>( exec_queue, &m_int64, &n_int64, &lda_int64, 1, &group_sizes); Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue); std::int64_t *ipiv_int64 = sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue); std::int64_t **ipiv_int64_ptr = sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue); T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue); exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *)).wait(); for (std::int64_t i = 0; i < batch_size; ++i) ipiv_int64_ptr[i] = ipiv_int64 + n * i; oneapi::mkl::lapack::getrf_batch(exec_queue, &m_int64, &n_int64, (Ty **)a_shared, &lda_int64, ipiv_int64_ptr, 1, &group_sizes, scratchpad, scratchpad_size); sycl::event e = exec_queue.submit([&](sycl::handler &cgh) { cgh.parallel_for<dpct_kernel_name<class getrf_device_int64_to_int, T>>( sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) { ipiv[idx] = static_cast<int>(ipiv_int64[idx]); }); }); std::vector<void *> ptrs{scratchpad, ipiv_int64, ipiv_int64_ptr, a_shared}; async_dpct_free(ptrs, {e}, exec_queue); #endif } /// Solves a system of linear equations with a batch of LU-factored square /// coefficient matrices, with multiple right-hand sides. /// \param [in] exec_queue The queue where the routine should be executed. /// \param [in] trans Indicates the form of the linear equations. /// \param [in] n The order of the matrices. /// \param [in] nrhs The number of right hand sides. /// \param [in] a Array of pointers to matrices. /// \param [in] lda The leading dimension of the matrices in \p a. /// \param [in] ipiv An array stores the pivots. /// \param [in, out] b Array of pointers to matrices, whose columns are /// the right-hand sides for the systems of equations. /// \param [in] ldb The leading dimension of the matrices in \p b. /// \param [out] info A value stores the error information. /// \param [in] batch_size The size of the batch. template <typename T> inline void getrs_batch_wrapper(sycl::queue &exec_queue, oneapi::mkl::transpose trans, int n, int nrhs, const T *a[], int lda, int *ipiv, T *b[], int ldb, int *info, int batch_size) { using Ty = typename DataType<T>::T2; // Set the info value to 0 *info = 0; #ifdef DPCT_USM_LEVEL_NONE std::int64_t stride_a = n * lda; std::int64_t stride_b = nrhs * ldb; std::int64_t stride_ipiv = n; std::int64_t scratchpad_size = oneapi::mkl::lapack::getrs_batch_scratchpad_size<Ty>( exec_queue, trans, n, nrhs, lda, stride_a, stride_ipiv, ldb, stride_b, batch_size); T *a_buffer_ptr, *b_buffer_ptr; a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T)); b_buffer_ptr = (T *)dpct_malloc(stride_b * batch_size * sizeof(T)); T **host_a = (T **)malloc(batch_size * sizeof(T *)); T **host_b = (T **)malloc(batch_size * sizeof(T *)); dpct_memcpy(host_a, a, batch_size * sizeof(T *)); dpct_memcpy(host_b, b, batch_size * sizeof(T *)); for (std::int64_t i = 0; i < batch_size; ++i) { dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T)); dpct_memcpy(b_buffer_ptr + i * stride_b, host_b[i], nrhs * ldb * sizeof(T)); } { auto a_buffer = get_buffer<Ty>(a_buffer_ptr); auto b_buffer = get_buffer<Ty>(b_buffer_ptr); sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)}; sycl::buffer<std::int64_t, 1> ipiv_buf( sycl::range<1>(batch_size * stride_ipiv)); auto from_buf = get_buffer<int>(ipiv); exec_queue.submit([&](sycl::handler &cgh) { auto from_acc = from_buf.get_access<sycl::access_mode::read>(cgh); auto to_acc = ipiv_buf.get_access<sycl::access_mode::write>(cgh); cgh.parallel_for<dpct_kernel_name<class getrs_device_int64_to_int, T>>( sycl::range<2>(batch_size, n), [=](sycl::id<2> id) { to_acc[id.get(0) * stride_ipiv + id.get(1)] = static_cast<std::int64_t>(from_acc[id.get(0) * n + id.get(1)]); }); }); oneapi::mkl::lapack::getrs_batch(exec_queue, trans, n, nrhs, a_buffer, lda, stride_a, ipiv_buf, stride_ipiv, b_buffer, ldb, stride_b, batch_size, scratchpad, scratchpad_size); } // Copy back to the original buffers std::vector<sycl::event> events; for (std::int64_t i = 0; i < batch_size; ++i) events.push_back(detail::dpct_memcpy(exec_queue, host_b[i], b_buffer_ptr + i * stride_b, nrhs * ldb * sizeof(T), automatic)); std::vector<void *> ptrs{host_a, host_b}; std::thread mem_free_thread( [=](std::vector<void *> pointers_array, std::vector<sycl::event> events_array) { sycl::event::wait(events_array); for (auto p : pointers_array) free(p); }, ptrs, events); mem_free_thread.detach(); #else std::int64_t n_int64 = n; std::int64_t nrhs_int64 = nrhs; std::int64_t lda_int64 = lda; std::int64_t ldb_int64 = ldb; std::int64_t group_sizes = batch_size; std::int64_t scratchpad_size = oneapi::mkl::lapack::getrs_batch_scratchpad_size<Ty>( exec_queue, &trans, &n_int64, &nrhs_int64, &lda_int64, &ldb_int64, 1, &group_sizes); Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue); std::int64_t *ipiv_int64 = sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue); std::int64_t **ipiv_int64_ptr = sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue); T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue); T **b_shared = sycl::malloc_shared<T *>(batch_size, exec_queue); exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *)); exec_queue.memcpy(b_shared, b, batch_size * sizeof(T *)); exec_queue.submit([&](sycl::handler &cgh) { cgh.parallel_for<dpct_kernel_name<class getrs_device_int64_to_int, T>>( sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) { ipiv_int64[idx] = static_cast<std::int64_t>(ipiv[idx]); }); }).wait(); for (std::int64_t i = 0; i < batch_size; ++i) ipiv_int64_ptr[i] = ipiv_int64 + n * i; sycl::event e = oneapi::mkl::lapack::getrs_batch( exec_queue, &trans, &n_int64, &nrhs_int64, (Ty **)a_shared, &lda_int64, ipiv_int64_ptr, (Ty **)b_shared, &ldb_int64, 1, &group_sizes, scratchpad, scratchpad_size); std::vector<void *> ptrs{scratchpad, ipiv_int64_ptr, ipiv_int64, a_shared, b_shared}; async_dpct_free(ptrs, {e}, exec_queue); #endif } /// Computes the inverses of a batch of LU-factored matrices. /// \param [in] exec_queue The queue where the routine should be executed. /// \param [in] n The order of the matrices. /// \param [in] a Array of pointers to matrices. /// \param [in] lda The leading dimension of the matrices in \p a. /// \param [in] ipiv An array stores the pivots. /// \param [out] b Array of pointers to inverse matrices. /// \param [in] ldb The leading dimension of the matrices in \p b. /// \param [out] info An array stores the error information. /// \param [in] batch_size The size of the batch. template <typename T> inline void getri_batch_wrapper(sycl::queue &exec_queue, int n, const T *a[], int lda, int *ipiv, T *b[], int ldb, int *info, int batch_size) { using Ty = typename DataType<T>::T2; // Set the info array value to 0 detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size); #ifdef DPCT_USM_LEVEL_NONE std::int64_t stride_b = n * ldb; std::int64_t stride_ipiv = n; std::int64_t scratchpad_size = oneapi::mkl::lapack::getri_batch_scratchpad_size<Ty>( exec_queue, n, ldb, stride_b, stride_ipiv, batch_size); T *b_buffer_ptr; b_buffer_ptr = (T *)dpct_malloc(stride_b * batch_size * sizeof(T)); T **host_a = (T **)malloc(batch_size * sizeof(T *)); T **host_b = (T **)malloc(batch_size * sizeof(T *)); dpct_memcpy(host_a, a, batch_size * sizeof(T *)); dpct_memcpy(host_b, b, batch_size * sizeof(T *)); for (std::int64_t i = 0; i < batch_size; ++i) { // Need to create a copy of input matrices "a" to keep them unchanged. // Matrices "b" (copy of matrices "a") will be used as input and output // parameter in oneapi::mkl::lapack::getri_batch call. matrix_mem_copy(b_buffer_ptr + i * stride_b, host_a[i], ldb, lda, n, n, dpct::device_to_device, exec_queue); } { auto b_buffer = get_buffer<Ty>(b_buffer_ptr); sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)}; sycl::buffer<std::int64_t, 1> ipiv_buf( sycl::range<1>(batch_size * stride_ipiv)); auto from_buf = get_buffer<int>(ipiv); exec_queue.submit([&](sycl::handler &cgh) { auto from_acc = from_buf.get_access<sycl::access_mode::read>(cgh); auto to_acc = ipiv_buf.get_access<sycl::access_mode::write>(cgh); cgh.parallel_for<dpct_kernel_name<class getri_device_int64_to_int, T>>( sycl::range<2>(batch_size, n), [=](sycl::id<2> id) { to_acc[id.get(0) * stride_ipiv + id.get(1)] = static_cast<std::int64_t>(from_acc[id.get(0) * n + id.get(1)]); }); }); oneapi::mkl::lapack::getri_batch(exec_queue, n, b_buffer, ldb, stride_b, ipiv_buf, stride_ipiv, batch_size, scratchpad, scratchpad_size); } // Copy back to the original buffers std::vector<sycl::event> events; for (std::int64_t i = 0; i < batch_size; ++i) events.push_back(detail::dpct_memcpy(exec_queue, host_b[i], b_buffer_ptr + i * stride_b, n * ldb * sizeof(T), automatic)); std::vector<void *> ptrs{host_a, host_b}; std::thread mem_free_thread( [=](std::vector<void *> pointers_array, std::vector<sycl::event> events_array) { sycl::event::wait(events_array); for (auto p : pointers_array) free(p); }, ptrs, events); mem_free_thread.detach(); #else std::int64_t n_int64 = n; std::int64_t ldb_int64 = ldb; std::int64_t group_sizes = batch_size; std::int64_t scratchpad_size = oneapi::mkl::lapack::getri_batch_scratchpad_size<Ty>( exec_queue, &n_int64, &ldb_int64, 1, &group_sizes); Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue); std::int64_t *ipiv_int64 = sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue); std::int64_t **ipiv_int64_ptr = sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue); exec_queue.submit([&](sycl::handler &cgh) { cgh.parallel_for<dpct_kernel_name<class getri_device_int64_to_int, T>>( sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) { ipiv_int64[idx] = static_cast<std::int64_t>(ipiv[idx]); }); }); T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue); T **b_shared = sycl::malloc_shared<T *>(batch_size, exec_queue); exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *)); exec_queue.memcpy(b_shared, b, batch_size * sizeof(T *)).wait(); for (std::int64_t i = 0; i < batch_size; ++i) { ipiv_int64_ptr[i] = ipiv_int64 + n * i; // Need to create a copy of input matrices "a" to keep them unchanged. // Matrices "b" (copy of matrices "a") will be used as input and output // parameter in oneapi::mkl::lapack::getri_batch call. matrix_mem_copy(b_shared[i], a_shared[i], ldb, lda, n, n, dpct::device_to_device, exec_queue); } sycl::event e = oneapi::mkl::lapack::getri_batch( exec_queue, &n_int64, (Ty **)b_shared, &ldb_int64, ipiv_int64_ptr, 1, &group_sizes, scratchpad, scratchpad_size); std::vector<void *> ptrs{scratchpad, ipiv_int64_ptr, ipiv_int64, a_shared, b_shared}; async_dpct_free(ptrs, {e}, exec_queue); #endif } /// Computes the QR factorizations of a batch of general matrices. /// \param [in] exec_queue The queue where the routine should be executed. /// \param [in] m The number of rows in the matrices. /// \param [in] n The number of columns in the matrices. /// \param [in, out] a Array of pointers to matrices. These /// matrices will be overwritten by the factorization data. /// \param [in] lda The leading dimension of the matrices in \p a. /// \param [out] tau An array stores the scalars. /// \param [out] info A value stores the error information. /// \param [in] batch_size The size of the batch. template <typename T> inline void geqrf_batch_wrapper(sycl::queue exec_queue, int m, int n, T *a[], int lda, T *tau[], int *info, int batch_size) { using Ty = typename DataType<T>::T2; // Set the info value to 0 *info = 0; #ifdef DPCT_USM_LEVEL_NONE std::int64_t stride_a = n * lda; std::int64_t stride_tau = std::max(1, std::min(m, n)); std::int64_t scratchpad_size = oneapi::mkl::lapack::geqrf_batch_scratchpad_size<Ty>( exec_queue, m, n, lda, stride_a, stride_tau, batch_size); T *a_buffer_ptr, *tau_buffer_ptr; a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T)); tau_buffer_ptr = (T *)dpct_malloc(stride_tau * batch_size * sizeof(T)); T **host_a = (T **)malloc(batch_size * sizeof(T *)); T **host_tau = (T **)malloc(batch_size * sizeof(T *)); dpct_memcpy(host_a, a, batch_size * sizeof(T *)); dpct_memcpy(host_tau, tau, batch_size * sizeof(T *)); for (std::int64_t i = 0; i < batch_size; ++i) dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T)); { auto a_buffer = get_buffer<Ty>(a_buffer_ptr); auto tau_buffer = get_buffer<Ty>(tau_buffer_ptr); sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)}; oneapi::mkl::lapack::geqrf_batch(exec_queue, m, n, a_buffer, lda, stride_a, tau_buffer, stride_tau, batch_size, scratchpad, scratchpad_size); } // Copy back to the original buffers std::vector<sycl::event> events_a; std::vector<sycl::event> events_tau; for (std::int64_t i = 0; i < batch_size; ++i) { events_a.push_back(detail::dpct_memcpy(exec_queue, host_a[i], a_buffer_ptr + i * stride_a, n * lda * sizeof(T), automatic)); events_tau.push_back(detail::dpct_memcpy( exec_queue, host_tau[i], tau_buffer_ptr + i * stride_tau, std::max(1, std::min(m, n)) * sizeof(T), automatic)); } std::vector<void *> ptr_a{host_a}; std::vector<void *> ptr_tau{host_tau}; std::thread mem_free_thread_a( [=](std::vector<void *> pointers_array, std::vector<sycl::event> events_array) { sycl::event::wait(events_array); for (auto p : pointers_array) free(p); }, ptr_a, events_a); std::thread mem_free_thread_tau( [=](std::vector<void *> pointers_array, std::vector<sycl::event> events_array) { sycl::event::wait(events_array); for (auto p : pointers_array) free(p); }, ptr_tau, events_tau); mem_free_thread_a.detach(); mem_free_thread_tau.detach(); #else std::int64_t m_int64 = n; std::int64_t n_int64 = n; std::int64_t lda_int64 = lda; std::int64_t group_sizes = batch_size; std::int64_t scratchpad_size = oneapi::mkl::lapack::geqrf_batch_scratchpad_size<Ty>( exec_queue, &m_int64, &n_int64, &lda_int64, 1, &group_sizes); Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue); T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue); T **tau_shared = sycl::malloc_shared<T *>(batch_size, exec_queue); exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *)); exec_queue.memcpy(tau_shared, tau, batch_size * sizeof(T *)).wait(); sycl::event e = oneapi::mkl::lapack::geqrf_batch( exec_queue, &m_int64, &n_int64, (Ty **)a_shared, &lda_int64, (Ty **)tau_shared, 1, &group_sizes, scratchpad, scratchpad_size); std::vector<void *> ptrs{scratchpad, a_shared, tau_shared}; async_dpct_free(ptrs, {e}, exec_queue); #endif } /// Computes the Euclidean norm of a vector. /// \param [in] q The queue where the routine should be executed. /// \param [in] n Number of elements in vector x. /// \param [in] x Input vector x. /// \param [in] x_type Data type of the vector x. /// \param [in] incx Stride of vector x. /// \param [out] result The result scalar. /// \param [in] result_type Data type of the result. inline void nrm2(sycl::queue &q, int n, const void *x, library_data_t x_type, int incx, void *result, library_data_t result_type) { std::uint64_t key = detail::get_type_combination_id(x_type, result_type); switch (key) { case detail::get_type_combination_id(library_data_t::real_float, library_data_t::real_float): { detail::nrm2_impl<float, float>(q, n, x, incx, result); break; } case detail::get_type_combination_id(library_data_t::real_double, library_data_t::real_double): { detail::nrm2_impl<double, double>(q, n, x, incx, result); break; } case detail::get_type_combination_id(library_data_t::complex_float, library_data_t::real_float): { detail::nrm2_impl<std::complex<float>, float>( q, n, x, incx, result); break; } case detail::get_type_combination_id(library_data_t::complex_double, library_data_t::real_double): { detail::nrm2_impl<std::complex<double>, double>( q, n, x, incx, result); break; } case detail::get_type_combination_id(library_data_t::real_half, library_data_t::real_half): { detail::nrm2_impl<sycl::half, sycl::half>( q, n, x, incx, result); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } } /// Computes the dot product of two vectors. /// \param [in] q The queue where the routine should be executed. /// \param [in] n Number of elements in vector x. /// \param [in] x Input vector x. /// \param [in] x_type Data type of the vector x. /// \param [in] incx Stride of vector x. /// \param [in] y Input vector y. /// \param [in] y_type Data type of the vector y. /// \param [in] incy Stride of vector y. /// \param [out] result The result scalar. /// \param [in] result_type Data type of the result. inline void dot(sycl::queue &q, int n, const void *x, library_data_t x_type, int incx, const void *y, library_data_t y_type, int incy, void *result, library_data_t result_type) { detail::dotuc<false>(q, n, x, x_type, incx, y, y_type, incy, result, result_type); } /// Computes the dot product of two vectors, conjugating the first vector. /// \param [in] q The queue where the routine should be executed. /// \param [in] n Number of elements in vector x. /// \param [in] x Input vector x. /// \param [in] x_type Data type of the vector x. /// \param [in] incx Stride of vector x. /// \param [in] y Input vector y. /// \param [in] y_type Data type of the vector y. /// \param [in] incy Stride of vector y. /// \param [out] result The result scalar. /// \param [in] result_type Data type of the result. inline void dotc(sycl::queue &q, int n, const void *x, library_data_t x_type, int incx, const void *y, library_data_t y_type, int incy, void *result, library_data_t result_type) { detail::dotuc<true>(q, n, x, x_type, incx, y, y_type, incy, result, result_type); } /// Computes the product of a vector by a scalar. /// \param [in] q The queue where the routine should be executed. /// \param [in] n Number of elements in vector x. /// \param [in] alpha The scale factor alpha. /// \param [in] alpha_type The data type of alpha. /// \param [in, out] x Input/Output vector x. /// \param [in] x_type Data type of the vector x. /// \param [in] incx Stride of vector x. inline void scal(sycl::queue &q, int n, const void *alpha, library_data_t alpha_type, void *x, library_data_t x_type, int incx) { std::uint64_t key = detail::get_type_combination_id(x_type); switch (key) { case detail::get_type_combination_id(library_data_t::real_float): { detail::scal_impl<float, float>(q, n, alpha, x, incx); break; } case detail::get_type_combination_id(library_data_t::real_double): { detail::scal_impl<double, double>(q, n, alpha, x, incx); break; } case detail::get_type_combination_id(library_data_t::complex_float): { detail::scal_impl<std::complex<float>, std::complex<float>>(q, n, alpha, x, incx); break; } case detail::get_type_combination_id(library_data_t::complex_double): { detail::scal_impl<std::complex<double>, std::complex<double>>( q, n, alpha, x, incx); break; } case detail::get_type_combination_id(library_data_t::real_half): { float alpha_value = dpct::get_value(reinterpret_cast<const float *>(alpha), q); sycl::half alaph_half(alpha_value); detail::scal_impl<sycl::half, sycl::half>(q, n, &alaph_half, x, incx); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } } /// Computes a vector-scalar product and adds the result to a vector. /// \param [in] q The queue where the routine should be executed. /// \param [in] n Number of elements in vector x. /// \param [in] alpha The scale factor alpha. /// \param [in] alpha_type The data type of alpha. /// \param [in] x Input vector x. /// \param [in] x_type Data type of the vector x. /// \param [in] incx Stride of vector x. /// \param [in, out] y Input/Output vector y. /// \param [in] y_type Data type of the vector y. /// \param [in] incy Stride of vector y. inline void axpy(sycl::queue &q, int n, const void *alpha, library_data_t alpha_type, const void *x, library_data_t x_type, int incx, void *y, library_data_t y_type, int incy) { std::uint64_t key = detail::get_type_combination_id(x_type, alpha_type); switch (key) { case detail::get_type_combination_id(library_data_t::real_float, library_data_t::real_float): { detail::axpy_impl<float, float>(q, n, alpha, x, incx, y, incy); break; } case detail::get_type_combination_id(library_data_t::real_double, library_data_t::real_double): { detail::axpy_impl<double, double>(q, n, alpha, x, incx, y, incy); break; } case detail::get_type_combination_id(library_data_t::complex_float, library_data_t::complex_float): { detail::axpy_impl<std::complex<float>, std::complex<float>>( q, n, alpha, x, incx, y, incy); break; } case detail::get_type_combination_id(library_data_t::complex_double, library_data_t::complex_double): { detail::axpy_impl<std::complex<double>, std::complex<double>>( q, n, alpha, x, incx, y, incy); break; } case detail::get_type_combination_id(library_data_t::real_half, library_data_t::real_float): { float alpha_value = dpct::get_value(reinterpret_cast<const float *>(alpha), q); sycl::half alaph_half(alpha_value); detail::axpy_impl<sycl::half, sycl::half>(q, n, &alaph_half, x, incx, y, incy); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } } /// Performs rotation of points in the plane. /// \param [in] q The queue where the routine should be executed. /// \param [in] n Number of elements in vector x. /// \param [in, out] x Input/Output vector x. /// \param [in] x_type Data type of the vector x. /// \param [in] incx Stride of vector x. /// \param [in, out] y Input/Output vector y. /// \param [in] y_type Data type of the vector y. /// \param [in] incy Stride of vector y. /// \param [in] c Scaling factor. /// \param [in] s Scaling factor. /// \param [in] cs_type Data type of the scaling factors. inline void rot(sycl::queue &q, int n, void *x, library_data_t x_type, int incx, void *y, library_data_t y_type, int incy, const void *c, const void *s, library_data_t cs_type) { std::uint64_t key = detail::get_type_combination_id(x_type, cs_type); switch (key) { case detail::get_type_combination_id(library_data_t::real_float, library_data_t::real_float): { detail::rot_impl<float, float, float>(q, n, x, incx, y, incy, c, s); break; } case detail::get_type_combination_id(library_data_t::real_double, library_data_t::real_double): { detail::rot_impl<double, double, double>(q, n, x, incx, y, incy, c, s); break; } case detail::get_type_combination_id(library_data_t::complex_float, library_data_t::real_float): { detail::rot_impl<std::complex<float>, float, float>(q, n, x, incx, y, incy, c, s); break; } case detail::get_type_combination_id(library_data_t::complex_double, library_data_t::real_double): { detail::rot_impl<std::complex<double>, double, double>(q, n, x, incx, y, incy, c, s); break; } case detail::get_type_combination_id(library_data_t::complex_float, library_data_t::complex_float): { detail::rot_impl<std::complex<float>, float, std::complex<float>>(q, n, x, incx, y, incy, c, s); break; } case detail::get_type_combination_id(library_data_t::complex_double, library_data_t::complex_double): { detail::rot_impl<std::complex<double>, double, std::complex<double>>(q, n, x, incx, y, incy, c, s); break; } case detail::get_type_combination_id(library_data_t::real_half, library_data_t::real_half): { detail::rot_impl<sycl::half, sycl::half, sycl::half>(q, n, x, incx, y, incy, c, s); break; } case detail::get_type_combination_id(library_data_t::real_bfloat16, library_data_t::real_bfloat16): { detail::rot_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, oneapi::mkl::bfloat16>(q, n, x, incx, y, incy, c, s); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } } /// Computes matrix-matrix product with general matrices. /// \param [in] q The queue where the routine should be executed. /// \param [in] a_trans Specifies the operation applied to A. /// \param [in] b_trans Specifies the operation applied to B. /// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C. /// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C. /// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B). /// \param [in] alpha Scaling factor for the matrix-matrix product. /// \param [in] a Input matrix A. /// \param [in] a_type Data type of the matrix A. /// \param [in] lda Leading dimension of A. /// \param [in] b Input matrix B. /// \param [in] b_type Data type of the matrix B. /// \param [in] ldb Leading dimension of B. /// \param [in] beta Scaling factor for matrix C. /// \param [in, out] c Input/Output matrix C. /// \param [in] c_type Data type of the matrix C. /// \param [in] ldc Leading dimension of C. /// \param [in] scaling_type Data type of the scaling factors. inline void gemm(sycl::queue &q, oneapi::mkl::transpose a_trans, oneapi::mkl::transpose b_trans, int m, int n, int k, const void *alpha, const void *a, library_data_t a_type, int lda, const void *b, library_data_t b_type, int ldb, const void *beta, void *c, library_data_t c_type, int ldc, library_data_t scaling_type) { bool matched = false; if (scaling_type == library_data_t::real_float && c_type == library_data_t::complex_float) { scaling_type = library_data_t::complex_float; } else if (scaling_type == library_data_t::real_double && c_type == library_data_t::complex_double) { scaling_type = library_data_t::complex_double; } std::uint64_t key = detail::get_type_combination_id(a_type, b_type, c_type, scaling_type); switch (key) { case detail::get_type_combination_id( library_data_t::real_float, library_data_t::real_float, library_data_t::real_float, library_data_t::real_float): { detail::gemm_impl<float, float, float, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::real_double, library_data_t::real_double, library_data_t::real_double, library_data_t::real_double): { detail::gemm_impl<double, double, double, double>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::complex_float, library_data_t::complex_float, library_data_t::complex_float, library_data_t::complex_float): { detail::gemm_impl<std::complex<float>, std::complex<float>, std::complex<float>, std::complex<float>>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::complex_double, library_data_t::complex_double, library_data_t::complex_double, library_data_t::complex_double): { detail::gemm_impl<std::complex<double>, std::complex<double>, std::complex<double>, std::complex<double>>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_half, library_data_t::real_half): { detail::gemm_impl<sycl::half, sycl::half, sycl::half, sycl::half>(q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_float, library_data_t::real_float): { detail::gemm_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float, float>(q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_float, library_data_t::real_float): { detail::gemm_impl<sycl::half, sycl::half, float, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_half, library_data_t::real_float): { float alpha_value = dpct::get_value(reinterpret_cast<const float *>(alpha), q); float beta_value = dpct::get_value(reinterpret_cast<const float *>(beta), q); sycl::half alpha_half(alpha_value); sycl::half beta_half(beta_value); detail::gemm_impl<sycl::half, sycl::half, sycl::half, sycl::half>(q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, b, ldb, &beta_half, c, ldc); break; } case detail::get_type_combination_id( library_data_t::real_int8, library_data_t::real_int8, library_data_t::real_float, library_data_t::real_float): { detail::gemm_impl<std::int8_t, std::int8_t, float, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_float): { detail::gemm_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); break; } case detail::get_type_combination_id( library_data_t::real_int8, library_data_t::real_int8, library_data_t::real_int32, library_data_t::real_int32): { float alpha_float = dpct::get_value(reinterpret_cast<const std::int32_t *>(alpha), q); float beta_float = dpct::get_value(reinterpret_cast<const std::int32_t *>(beta), q); detail::gemm_impl<std::int8_t, std::int8_t, std::int32_t, float>( q, a_trans, b_trans, m, n, k, &alpha_float, a, lda, b, ldb, &beta_float, c, ldc); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } } /// Computes a batch of matrix-matrix product with general matrices. /// \param [in] q The queue where the routine should be executed. /// \param [in] a_trans Specifies the operation applied to A. /// \param [in] b_trans Specifies the operation applied to B. /// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C. /// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C. /// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B). /// \param [in] alpha Scaling factor for the matrix-matrix product. /// \param [in] a Input matrix A. /// \param [in] a_type Data type of the matrix A. /// \param [in] lda Leading dimension of A. /// \param [in] b Input matrix B. /// \param [in] b_type Data type of the matrix B. /// \param [in] ldb Leading dimension of B. /// \param [in] beta Scaling factor for matrix C. /// \param [in, out] c Input/Output matrix C. /// \param [in] c_type Data type of the matrix C. /// \param [in] ldc Leading dimension of C. /// \param [in] batch_size Specifies the number of matrix multiply operations to perform. /// \param [in] scaling_type Data type of the scaling factors. inline void gemm_batch(sycl::queue &q, oneapi::mkl::transpose a_trans, oneapi::mkl::transpose b_trans, int m, int n, int k, const void *alpha, const void *a[], library_data_t a_type, int lda, const void *b[], library_data_t b_type, int ldb, const void *beta, void *c[], library_data_t c_type, int ldc, int batch_size, library_data_t scaling_type) { #ifdef DPCT_USM_LEVEL_NONE throw std::runtime_error("this API is unsupported when USM level is none"); #else bool matched = false; if (scaling_type == library_data_t::real_float && c_type == library_data_t::complex_float) { scaling_type = library_data_t::complex_float; } else if (scaling_type == library_data_t::real_double && c_type == library_data_t::complex_double) { scaling_type = library_data_t::complex_double; } std::uint64_t key = detail::get_type_combination_id(a_type, b_type, c_type, scaling_type); switch (key) { case detail::get_type_combination_id( library_data_t::real_float, library_data_t::real_float, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl<float, float, float, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_double, library_data_t::real_double, library_data_t::real_double, library_data_t::real_double): { detail::gemm_batch_impl<double, double, double, double>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size); break; } case detail::get_type_combination_id( library_data_t::complex_float, library_data_t::complex_float, library_data_t::complex_float, library_data_t::complex_float): { detail::gemm_batch_impl<std::complex<float>, std::complex<float>, std::complex<float>, std::complex<float>>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size); break; } case detail::get_type_combination_id( library_data_t::complex_double, library_data_t::complex_double, library_data_t::complex_double, library_data_t::complex_double): { detail::gemm_batch_impl<std::complex<double>, std::complex<double>, std::complex<double>, std::complex<double>>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_half, library_data_t::real_half): { detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size); break; } #ifdef __INTEL_MKL__ case detail::get_type_combination_id( library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_float): { detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float, float>(q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_int8, library_data_t::real_int8, library_data_t::real_int32, library_data_t::real_int32): { float alpha_float = dpct::get_value(reinterpret_cast<const std::int32_t *>(alpha), q); float beta_float = dpct::get_value(reinterpret_cast<const std::int32_t *>(beta), q); detail::gemm_batch_impl<std::int8_t, std::int8_t, std::int32_t, float>(q, a_trans, b_trans, m, n, k, &alpha_float, a, lda, b, ldb, &beta_float, c, ldc, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_int8, library_data_t::real_int8, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl<std::int8_t, std::int8_t, float, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl<sycl::half, sycl::half, float, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size); break; } #endif case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_half, library_data_t::real_float): { float alpha_value = dpct::get_value(reinterpret_cast<const float *>(alpha), q); float beta_value = dpct::get_value(reinterpret_cast<const float *>(beta), q); sycl::half alpha_half(alpha_value); sycl::half beta_half(beta_value); detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>( q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, b, ldb, &beta_half, c, ldc, batch_size); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } #endif } /// Computes a batch of matrix-matrix product with general matrices. /// \param [in] q The queue where the routine should be executed. /// \param [in] a_trans Specifies the operation applied to A. /// \param [in] b_trans Specifies the operation applied to B. /// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C. /// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C. /// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B). /// \param [in] alpha Scaling factor for the matrix-matrix product. /// \param [in] a Input matrix A. /// \param [in] a_type Data type of the matrix A. /// \param [in] lda Leading dimension of A. /// \param [in] stride_a Stride between the different A matrices. /// \param [in] b Input matrix B. /// \param [in] b_type Data type of the matrix B. /// \param [in] ldb Leading dimension of B. /// \param [in] stride_b Stride between the different B matrices. /// \param [in] beta Scaling factor for matrix C. /// \param [in, out] c Input/Output matrix C. /// \param [in] c_type Data type of the matrix C. /// \param [in] ldc Leading dimension of C. /// \param [in] stride_c Stride between the different C matrices. /// \param [in] batch_size Specifies the number of matrix multiply operations to perform. /// \param [in] scaling_type Data type of the scaling factors. inline void gemm_batch(sycl::queue &q, oneapi::mkl::transpose a_trans, oneapi::mkl::transpose b_trans, int m, int n, int k, const void *alpha, const void *a, library_data_t a_type, int lda, long long int stride_a, const void *b, library_data_t b_type, int ldb, long long int stride_b, const void *beta, void *c, library_data_t c_type, int ldc, long long int stride_c, int batch_size, library_data_t scaling_type) { bool matched = false; if (scaling_type == library_data_t::real_float && c_type == library_data_t::complex_float) { scaling_type = library_data_t::complex_float; } else if (scaling_type == library_data_t::real_double && c_type == library_data_t::complex_double) { scaling_type = library_data_t::complex_double; } std::uint64_t key = detail::get_type_combination_id(a_type, b_type, c_type, scaling_type); switch (key) { case detail::get_type_combination_id( library_data_t::real_float, library_data_t::real_float, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl<float, float, float, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_double, library_data_t::real_double, library_data_t::real_double, library_data_t::real_double): { detail::gemm_batch_impl<double, double, double, double>( q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::complex_float, library_data_t::complex_float, library_data_t::complex_float, library_data_t::complex_float): { detail::gemm_batch_impl<std::complex<float>, std::complex<float>, std::complex<float>, std::complex<float>>( q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::complex_double, library_data_t::complex_double, library_data_t::complex_double, library_data_t::complex_double): { detail::gemm_batch_impl<std::complex<double>, std::complex<double>, std::complex<double>, std::complex<double>>( q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_half, library_data_t::real_half): { detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } #ifdef __INTEL_MKL__ case detail::get_type_combination_id( library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_float): { detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_bfloat16, library_data_t::real_bfloat16, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float, float>(q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_int8, library_data_t::real_int8, library_data_t::real_int32, library_data_t::real_int32): { detail::gemm_batch_impl<std::int8_t, std::int8_t, std::int32_t, std::int32_t>(q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_int8, library_data_t::real_int8, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl<std::int8_t, std::int8_t, float, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_float, library_data_t::real_float): { detail::gemm_batch_impl<sycl::half, sycl::half, float, float>( q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c, batch_size); break; } #endif case detail::get_type_combination_id( library_data_t::real_half, library_data_t::real_half, library_data_t::real_half, library_data_t::real_float): { float alpha_value = dpct::get_value(reinterpret_cast<const float *>(alpha), q); float beta_value = dpct::get_value(reinterpret_cast<const float *>(beta), q); sycl::half alpha_half(alpha_value); sycl::half beta_half(beta_value); detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>( q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, stride_a, b, ldb, stride_b, &beta_half, c, ldc, stride_c, batch_size); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } } /// This routines perform a special rank-k update of a symmetric matrix C by /// general matrices A and B. /// \param [in] q The queue where the routine should be executed. /// \param [in] uplo Specifies whether C's data is stored in its upper or lower triangle. /// \param [in] trans Specifies the operation to apply. /// \param [in] n The number of rows and columns in C. /// \param [in] k The inner dimension of matrix multiplications. /// \param [in] alpha Scaling factor for the rank-k update. /// \param [in] a Input matrix A. /// \param [in] lda Leading dimension of A. /// \param [in] b Input matrix B. /// \param [in] ldb Leading dimension of B. /// \param [in] beta Scaling factor for the rank-k update. /// \param [in, out] c Input/Output matrix C. /// \param [in] ldc Leading dimension of C. template <class T> inline void syrk(sycl::queue &q, oneapi::mkl::uplo uplo, oneapi::mkl::transpose trans, int n, int k, const T *alpha, const T *a, int lda, const T *b, int ldb, const T *beta, T *c, int ldc) { detail::rk_impl<false, T, T>(q, uplo, trans, n, k, alpha, a, lda, b, ldb, beta, c, ldc); } /// This routines perform a special rank-k update of a Hermitian matrix C by /// general matrices A and B. /// \param [in] q The queue where the routine should be executed. /// \param [in] uplo Specifies whether C's data is stored in its upper or lower triangle. /// \param [in] trans Specifies the operation to apply. /// \param [in] n The number of rows and columns in C. /// \param [in] k The inner dimension of matrix multiplications. /// \param [in] alpha Scaling factor for the rank-k update. /// \param [in] a Input matrix A. /// \param [in] lda Leading dimension of A. /// \param [in] b Input matrix B. /// \param [in] ldb Leading dimension of B. /// \param [in] beta Scaling factor for the rank-k update. /// \param [in, out] c Input/Output matrix C. /// \param [in] ldc Leading dimension of C. template <class T, class Tbeta> inline void herk(sycl::queue &q, oneapi::mkl::uplo uplo, oneapi::mkl::transpose trans, int n, int k, const T *alpha, const T *a, int lda, const T *b, int ldb, const Tbeta *beta, T *c, int ldc) { detail::rk_impl<true, T, Tbeta>(q, uplo, trans, n, k, alpha, a, lda, b, ldb, beta, c, ldc); } /// This routine performs a group of trsm operations. Each trsm solves an /// equation of the form op(A) * X = alpha * B or X * op(A) = alpha * B. /// \param [in] q The queue where the routine should be executed. /// \param [in] left_right Specifies A multiplies X on the left or on the right. /// \param [in] upper_lower Specifies A is upper or lower triangular. /// \param [in] trans Specifies the operation applied to A. /// \param [in] unit_diag Specifies whether A is unit triangular. /// \param [in] m Number of rows of the B matrices. /// \param [in] n Number of columns of the B matrices. /// \param [in] alpha Scaling factor for the solutions. /// \param [in] a Input matrices A. /// \param [in] a_type Data type of the matrices A. /// \param [in] lda Leading dimension of the matrices A. /// \param [in, out] b Input and output matrices B. /// \param [in] b_type Data type of the matrices B. /// \param [in] ldb Leading dimension of the matrices B. /// \param [in] batch_size Specifies the number of trsm operations to perform. /// \param [in] scaling_type Data type of the scaling factors. inline void trsm_batch(sycl::queue &q, oneapi::mkl::side left_right, oneapi::mkl::uplo upper_lower, oneapi::mkl::transpose trans, oneapi::mkl::diag unit_diag, int m, int n, const void *alpha, const void **a, library_data_t a_type, int lda, void **b, library_data_t b_type, int ldb, int batch_size, library_data_t scaling_type) { #ifdef DPCT_USM_LEVEL_NONE throw std::runtime_error("this API is unsupported when USM level is none"); #else std::uint64_t key = detail::get_type_combination_id(a_type, b_type, scaling_type); switch (key) { case detail::get_type_combination_id(library_data_t::real_float, library_data_t::real_float, library_data_t::real_float): { detail::trsm_batch_impl<float, float, float>(q, left_right, upper_lower, trans, unit_diag, m, n, alpha, a, lda, b, ldb, batch_size); break; } case detail::get_type_combination_id(library_data_t::real_double, library_data_t::real_double, library_data_t::real_double): { detail::trsm_batch_impl<double, double, double>( q, left_right, upper_lower, trans, unit_diag, m, n, alpha, a, lda, b, ldb, batch_size); break; } case detail::get_type_combination_id(library_data_t::complex_float, library_data_t::complex_float, library_data_t::complex_float): { detail::trsm_batch_impl<std::complex<float>, std::complex<float>, std::complex<float>>(q, left_right, upper_lower, trans, unit_diag, m, n, alpha, a, lda, b, ldb, batch_size); break; } case detail::get_type_combination_id(library_data_t::complex_double, library_data_t::complex_double, library_data_t::complex_double): { detail::trsm_batch_impl<std::complex<double>, std::complex<double>, std::complex<double>>(q, left_right, upper_lower, trans, unit_diag, m, n, alpha, a, lda, b, ldb, batch_size); break; } default: throw std::runtime_error("the combination of data type is unsupported"); } #endif } /// Computes a triangular matrix-general matrix product. /// \param [in] q The queue where the routine should be executed. /// \param [in] left_right Specifies A is on the left or right side of the /// multiplication. /// \param [in] upper_lower Specifies A is upper or lower triangular. /// \param [in] trans Specifies the operation applied to A. /// \param [in] unit_diag Specifies whether A is unit triangular. /// \param [in] m Number of rows of B. /// \param [in] n Number of columns of B. /// \param [in] alpha Scaling factor for the matrix-matrix product. /// \param [in] a Input matrices A. /// \param [in] lda Leading dimension of the matrices A. /// \param [in] b Input matrices B. /// \param [in] ldb Leading dimension of the matrices B. /// \param [out] c Output matrices C. /// \param [in] ldc Leading dimension of the matrices C. template <class T> inline void trmm(sycl::queue &q, oneapi::mkl::side left_right, oneapi::mkl::uplo upper_lower, oneapi::mkl::transpose trans, oneapi::mkl::diag unit_diag, int m, int n, const T *alpha, const T *a, int lda, const T *b, int ldb, T *c, int ldc) { using Ty = typename DataType<T>::T2; auto alpha_val = dpct::get_value(alpha, q); if (b != c) { dpct::matrix_mem_copy(c, b, ldc, ldb, m, n, dpct::device_to_device, q); } auto data_a = detail::get_memory(reinterpret_cast<const Ty *>(a)); auto data_c = detail::get_memory(reinterpret_cast<Ty *>(c)); oneapi::mkl::blas::column_major::trmm(q, left_right, upper_lower, trans, unit_diag, m, n, alpha_val, data_a, lda, data_c, ldc); } } // namespace dpct #endif // __DPCT_BLAS_UTILS_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/atomic.hpp
//==---- atomic.hpp -------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_ATOMIC_HPP__ #define __DPCT_ATOMIC_HPP__ #include <sycl/sycl.hpp> namespace dpct { /// Atomically add the value operand to the value at the addr and assign the /// result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand The value to add to the value at \p addr. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> inline T atomic_fetch_add(T *addr, T operand) { auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_add(operand); } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device, typename T1, typename T2> inline T1 atomic_fetch_add(T1 *addr, T2 operand) { auto atm = sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_add(operand); } /// Atomically add the value operand to the value at the addr and assign the /// result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand The value to add to the value at \p addr. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space> inline T atomic_fetch_add(T *addr, T operand, sycl::memory_order memoryOrder) { switch (memoryOrder) { case sycl::memory_order::relaxed: return atomic_fetch_add<T, addressSpace, sycl::memory_order::relaxed, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::acq_rel: return atomic_fetch_add<T, addressSpace, sycl::memory_order::acq_rel, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::seq_cst: return atomic_fetch_add<T, addressSpace, sycl::memory_order::seq_cst, sycl::memory_scope::device>(addr, operand); default: assert(false && "Invalid memory_order for atomics. Valid memory_order for " "atomics are: sycl::memory_order::relaxed, " "sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!"); } } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, typename T1, typename T2> inline T1 atomic_fetch_add(T1 *addr, T2 operand, sycl::memory_order memoryOrder) { atomic_fetch_add<T1, addressSpace>(addr, operand, memoryOrder); } /// Atomically subtract the value operand from the value at the addr and assign /// the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand The value to subtract from the value at \p addr /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> inline T atomic_fetch_sub(T *addr, T operand) { auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_sub(operand); } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device, typename T1, typename T2> inline T1 atomic_fetch_sub(T1 *addr, T2 operand) { auto atm = sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_sub(operand); } /// Atomically subtract the value operand from the value at the addr and assign /// the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand The value to subtract from the value at \p addr /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space> inline T atomic_fetch_sub(T *addr, T operand, sycl::memory_order memoryOrder) { switch (memoryOrder) { case sycl::memory_order::relaxed: return atomic_fetch_sub<T, addressSpace, sycl::memory_order::relaxed, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::acq_rel: return atomic_fetch_sub<T, addressSpace, sycl::memory_order::acq_rel, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::seq_cst: return atomic_fetch_sub<T, addressSpace, sycl::memory_order::seq_cst, sycl::memory_scope::device>(addr, operand); default: assert(false && "Invalid memory_order for atomics. Valid memory_order for " "atomics are: sycl::memory_order::relaxed, " "sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!"); } } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, typename T1, typename T2> inline T1 atomic_fetch_sub(T1 *addr, T2 operand, sycl::memory_order memoryOrder) { atomic_fetch_sub<T1, addressSpace>(addr, operand, memoryOrder); } /// Atomically perform a bitwise AND between the value operand and the value at the addr /// and assign the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand The value to use in bitwise AND operation with the value at the \p addr. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> inline T atomic_fetch_and(T *addr, T operand) { auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_and(operand); } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device, typename T1, typename T2> inline T1 atomic_fetch_and(T1 *addr, T2 operand) { auto atm = sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_and(operand); } /// Atomically perform a bitwise AND between the value operand and the value at the addr /// and assign the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand The value to use in bitwise AND operation with the value at the \p addr. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space> inline T atomic_fetch_and(T *addr, T operand, sycl::memory_order memoryOrder) { switch (memoryOrder) { case sycl::memory_order::relaxed: return atomic_fetch_and<T, addressSpace, sycl::memory_order::relaxed, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::acq_rel: return atomic_fetch_and<T, addressSpace, sycl::memory_order::acq_rel, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::seq_cst: return atomic_fetch_and<T, addressSpace, sycl::memory_order::seq_cst, sycl::memory_scope::device>(addr, operand); default: assert(false && "Invalid memory_order for atomics. Valid memory_order for " "atomics are: sycl::memory_order::relaxed, " "sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!"); } } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, typename T1, typename T2> inline T1 atomic_fetch_and(T1 *addr, T2 operand, sycl::memory_order memoryOrder) { atomic_fetch_and<T1, addressSpace>(addr, operand, memoryOrder); } /// Atomically or the value at the addr with the value operand, and assign /// the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand The value to use in bitwise OR operation with the value at the \p addr. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> inline T atomic_fetch_or(T *addr, T operand) { auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_or(operand); } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device, typename T1, typename T2> inline T1 atomic_fetch_or(T1 *addr, T2 operand) { auto atm = sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_or(operand); } /// Atomically or the value at the addr with the value operand, and assign /// the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand The value to use in bitwise OR operation with the value at the \p addr. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space> inline T atomic_fetch_or(T *addr, T operand, sycl::memory_order memoryOrder) { switch (memoryOrder) { case sycl::memory_order::relaxed: return atomic_fetch_or<T, addressSpace, sycl::memory_order::relaxed, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::acq_rel: return atomic_fetch_or<T, addressSpace, sycl::memory_order::acq_rel, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::seq_cst: return atomic_fetch_or<T, addressSpace, sycl::memory_order::seq_cst, sycl::memory_scope::device>(addr, operand); default: assert(false && "Invalid memory_order for atomics. Valid memory_order for " "atomics are: sycl::memory_order::relaxed, " "sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!"); } } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, typename T1, typename T2> inline T1 atomic_fetch_or(T1 *addr, T2 operand, sycl::memory_order memoryOrder) { atomic_fetch_or<T1, addressSpace>(addr, operand, memoryOrder); } /// Atomically xor the value at the addr with the value operand, and assign /// the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand The value to use in bitwise XOR operation with the value at the \p addr. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> inline T atomic_fetch_xor(T *addr, T operand) { auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_xor(operand); } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device, typename T1, typename T2> inline T1 atomic_fetch_xor(T1 *addr, T2 operand) { auto atm = sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_xor(operand); } /// Atomically xor the value at the addr with the value operand, and assign /// the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand The value to use in bitwise XOR operation with the value at the \p addr. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space> inline T atomic_fetch_xor(T *addr, T operand, sycl::memory_order memoryOrder) { switch (memoryOrder) { case sycl::memory_order::relaxed: return atomic_fetch_xor<T, addressSpace, sycl::memory_order::relaxed, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::acq_rel: return atomic_fetch_xor<T, addressSpace, sycl::memory_order::acq_rel, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::seq_cst: return atomic_fetch_xor<T, addressSpace, sycl::memory_order::seq_cst, sycl::memory_scope::device>(addr, operand); default: assert(false && "Invalid memory_order for atomics. Valid memory_order for " "atomics are: sycl::memory_order::relaxed, " "sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!"); } } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, typename T1, typename T2> inline T1 atomic_fetch_xor(T1 *addr, T2 operand, sycl::memory_order memoryOrder) { atomic_fetch_xor<T1, addressSpace>(addr, operand, memoryOrder); } /// Atomically calculate the minimum of the value at addr and the value operand /// and assign the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> inline T atomic_fetch_min(T *addr, T operand) { auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_min(operand); } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device, typename T1, typename T2> inline T1 atomic_fetch_min(T1 *addr, T2 operand) { auto atm = sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_min(operand); } /// Atomically calculate the minimum of the value at addr and the value operand /// and assign the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space> inline T atomic_fetch_min(T *addr, T operand, sycl::memory_order memoryOrder) { switch (memoryOrder) { case sycl::memory_order::relaxed: return atomic_fetch_min<T, addressSpace, sycl::memory_order::relaxed, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::acq_rel: return atomic_fetch_min<T, addressSpace, sycl::memory_order::acq_rel, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::seq_cst: return atomic_fetch_min<T, addressSpace, sycl::memory_order::seq_cst, sycl::memory_scope::device>(addr, operand); default: assert(false && "Invalid memory_order for atomics. Valid memory_order for " "atomics are: sycl::memory_order::relaxed, " "sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!"); } } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, typename T1, typename T2> inline T1 atomic_fetch_min(T1 *addr, T2 operand, sycl::memory_order memoryOrder) { atomic_fetch_min<T1, addressSpace>(addr, operand, memoryOrder); } /// Atomically calculate the maximum of the value at addr and the value operand /// and assign the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> inline T atomic_fetch_max(T *addr, T operand) { auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_max(operand); } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device, typename T1, typename T2> inline T1 atomic_fetch_max(T1 *addr, T2 operand) { auto atm = sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.fetch_max(operand); } /// Atomically calculate the maximum of the value at addr and the value operand /// and assign the result to the value at addr. /// \param [in, out] addr The pointer to the data. /// \param operand. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space> inline T atomic_fetch_max(T *addr, T operand, sycl::memory_order memoryOrder) { switch (memoryOrder) { case sycl::memory_order::relaxed: return atomic_fetch_max<T, addressSpace, sycl::memory_order::relaxed, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::acq_rel: return atomic_fetch_max<T, addressSpace, sycl::memory_order::acq_rel, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::seq_cst: return atomic_fetch_max<T, addressSpace, sycl::memory_order::seq_cst, sycl::memory_scope::device>(addr, operand); default: assert(false && "Invalid memory_order for atomics. Valid memory_order for " "atomics are: sycl::memory_order::relaxed, " "sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!"); } } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, typename T1, typename T2> inline T1 atomic_fetch_max(T1 *addr, T2 operand, sycl::memory_order memoryOrder) { atomic_fetch_max<T1, addressSpace>(addr, operand, memoryOrder); } /// Atomically set \p operand to the value stored in \p addr, if old value stored in /// \p addr is equal to zero or greater than \p operand, else decrease the value stored /// in \p addr. /// \param [in, out] addr The pointer to the data. /// \param operand The threshold value. /// \param memoryOrder The memory ordering used. /// \returns The old value stored in \p addr. template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> inline unsigned int atomic_fetch_compare_dec(unsigned int *addr, unsigned int operand) { auto atm = sycl::atomic_ref<unsigned int, memoryOrder, memoryScope, addressSpace>(addr[0]); unsigned int old; while (true) { old = atm.load(); if (old == 0 || old > operand) { if (atm.compare_exchange_strong(old, operand)) break; } else if (atm.compare_exchange_strong(old, old - 1)) break; } return old; } /// Atomically increment the value stored in \p addr if old value stored in \p /// addr is less than \p operand, else set 0 to the value stored in \p addr. /// \param [in, out] addr The pointer to the data. /// \param operand The threshold value. /// \param memoryOrder The memory ordering used. /// \returns The old value stored in \p addr. template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> inline unsigned int atomic_fetch_compare_inc(unsigned int *addr, unsigned int operand) { auto atm = sycl::atomic_ref<unsigned int, memoryOrder, memoryScope, addressSpace>(addr[0]); unsigned int old; while (true) { old = atm.load(); if (old >= operand) { if (atm.compare_exchange_strong(old, 0)) break; } else if (atm.compare_exchange_strong(old, old + 1)) break; } return old; } /// Atomically increment the value stored in \p addr if old value stored in \p /// addr is less than \p operand, else set 0 to the value stored in \p addr. /// \param [in, out] addr The pointer to the data. /// \param operand The threshold value. /// \param memoryOrder The memory ordering used. /// \returns The old value stored in \p addr. template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space> inline unsigned int atomic_fetch_compare_inc(unsigned int *addr, unsigned int operand, sycl::memory_order memoryOrder) { switch (memoryOrder) { case sycl::memory_order::relaxed: return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::relaxed, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::acq_rel: return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::acq_rel, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::seq_cst: return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::seq_cst, sycl::memory_scope::device>(addr, operand); default: assert(false && "Invalid memory_order for atomics. Valid memory_order for " "atomics are: sycl::memory_order::relaxed, " "sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!"); } } /// Atomically exchange the value at the address addr with the value operand. /// \param [in, out] addr The pointer to the data. /// \param operand The value to be exchanged with the value pointed by \p addr. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> inline T atomic_exchange(T *addr, T operand) { auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.exchange(operand); } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device, typename T1, typename T2> inline T1 atomic_exchange(T1 *addr, T2 operand) { auto atm = sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]); return atm.exchange(operand); } /// Atomically exchange the value at the address addr with the value operand. /// \param [in, out] addr The pointer to the data. /// \param operand The value to be exchanged with the value pointed by \p addr. /// \param memoryOrder The memory ordering used. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space> inline T atomic_exchange(T *addr, T operand, sycl::memory_order memoryOrder) { switch (memoryOrder) { case sycl::memory_order::relaxed: return atomic_exchange<T, addressSpace, sycl::memory_order::relaxed, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::acq_rel: return atomic_exchange<T, addressSpace, sycl::memory_order::acq_rel, sycl::memory_scope::device>(addr, operand); case sycl::memory_order::seq_cst: return atomic_exchange<T, addressSpace, sycl::memory_order::seq_cst, sycl::memory_scope::device>(addr, operand); default: assert(false && "Invalid memory_order for atomics. Valid memory_order for " "atomics are: sycl::memory_order::relaxed, " "sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!"); } } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, typename T1, typename T2> inline T1 atomic_exchange(T1 *addr, T2 operand, sycl::memory_order memoryOrder) { atomic_exchange<T1, addressSpace>(addr, operand, memoryOrder); } /// Atomically compare the value at \p addr to the value expected and exchange /// with the value desired if the value at \p addr is equal to the value expected. /// Returns the value at the \p addr before the call. /// \param [in, out] addr Multi_ptr. /// \param expected The value to compare against the value at \p addr. /// \param desired The value to assign to \p addr if the value at \p addr is expected. /// \param success The memory ordering used when comparison succeeds. /// \param fail The memory ordering used when comparison fails. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> T atomic_compare_exchange_strong( sycl::multi_ptr<T, addressSpace> addr, T expected, T desired, sycl::memory_order success = sycl::memory_order::relaxed, sycl::memory_order fail = sycl::memory_order::relaxed) { auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(*addr); atm.compare_exchange_strong(expected, desired, success, fail); return expected; } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device, typename T1, typename T2, typename T3> T1 atomic_compare_exchange_strong( sycl::multi_ptr<T1, addressSpace> addr, T2 expected, T3 desired, sycl::memory_order success = sycl::memory_order::relaxed, sycl::memory_order fail = sycl::memory_order::relaxed) { auto atm = sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(*addr); T1 expected_value = expected; atm.compare_exchange_strong(expected_value, desired, success, fail); return expected_value; } /// Atomically compare the value at \p addr to the value expected and exchange /// with the value desired if the value at \p addr is equal to the value expected. /// Returns the value at the \p addr before the call. /// \param [in] addr The pointer to the data. /// \param expected The value to compare against the value at \p addr. /// \param desired The value to assign to \p addr if the value at \p addr is expected. /// \param success The memory ordering used when comparison succeeds. /// \param fail The memory ordering used when comparison fails. /// \returns The value at the \p addr before the call. template <typename T, sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device> T atomic_compare_exchange_strong( T *addr, T expected, T desired, sycl::memory_order success = sycl::memory_order::relaxed, sycl::memory_order fail = sycl::memory_order::relaxed) { auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]); atm.compare_exchange_strong(expected, desired, success, fail); return expected; } template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space, sycl::memory_order memoryOrder = sycl::memory_order::relaxed, sycl::memory_scope memoryScope = sycl::memory_scope::device, typename T1, typename T2, typename T3> T1 atomic_compare_exchange_strong( T1 *addr, T2 expected, T3 desired, sycl::memory_order success = sycl::memory_order::relaxed, sycl::memory_order fail = sycl::memory_order::relaxed) { T1 expected_value = expected; auto atm = sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]); atm.compare_exchange_strong(expected_value, desired, success, fail); return expected_value; } /// Atomic extension to implement standard APIs in std::atomic namespace detail{ template <typename T> struct IsValidAtomicType { static constexpr bool value = (std::is_same<T, int>::value || std::is_same<T, unsigned int>::value || std::is_same<T, long>::value || std::is_same<T, unsigned long>::value || std::is_same<T, long long>::value || std::is_same<T, unsigned long long>::value || std::is_same<T, float>::value || std::is_same<T, double>::value || std::is_pointer<T>::value); }; } // namespace detail template <typename T, sycl::memory_scope DefaultScope = sycl::memory_scope::system, sycl::memory_order DefaultOrder = sycl::memory_order::seq_cst, sycl::access::address_space Space = sycl::access::address_space::generic_space> class atomic{ static_assert( detail::IsValidAtomicType<T>::value, "Invalid atomic type. Valid types are int, unsigned int, long, " "unsigned long, long long, unsigned long long, float, double " "and pointer types"); T __d; public: /// default memory synchronization order static constexpr sycl::memory_order default_read_order = sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space>::default_read_order; static constexpr sycl::memory_order default_write_order = sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space>::default_write_order; static constexpr sycl::memory_scope default_scope = DefaultScope; static constexpr sycl::memory_order default_read_modify_write_order = DefaultOrder; /// Default constructor. constexpr atomic() noexcept = default; /// Constructor with initialize value. constexpr atomic(T d) noexcept : __d(d){}; /// atomically replaces the value of the referenced object with a non-atomic argument /// \param operand The value to replace the pointed value. /// \param memoryOrder The memory ordering used. /// \param memoryScope The memory scope used. void store(T operand, sycl::memory_order memoryOrder = default_write_order, sycl::memory_scope memoryScope = default_scope) noexcept { sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d); atm.store(operand, memoryOrder, memoryScope); } /// atomically obtains the value of the referenced object /// \param memoryOrder The memory ordering used. /// \param memoryScope The memory scope used. /// \returns The value of the referenced object T load(sycl::memory_order memoryOrder = default_read_order, sycl::memory_scope memoryScope = default_scope) const noexcept { sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm( const_cast<T &>(__d)); return atm.load(memoryOrder, memoryScope); } /// atomically replaces the value of the referenced object and obtains the value held previously /// \param operand The value to replace the pointed value. /// \param memoryOrder The memory ordering used. /// \param memoryScope The memory scope used. /// \returns The value of the referenced object before the call. T exchange(T operand, sycl::memory_order memoryOrder = default_read_modify_write_order, sycl::memory_scope memoryScope = default_scope) noexcept { sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d); return atm.exchange(operand, memoryOrder, memoryScope); } /// atomically compares the value of the referenced object with non-atomic argument /// and performs atomic exchange if equal or atomic load if not /// \param expected The value expected to be found in the object referenced by the atomic_ref object /// \param desired The value to store in the referenced object if it is as expected /// \param success The memory models for the read-modify-write /// \param failure The memory models for load operations /// \param memoryScope The memory scope used. /// \returns true if the referenced object was successfully changed, false otherwise. bool compare_exchange_weak( T &expected, T desired, sycl::memory_order success, sycl::memory_order failure, sycl::memory_scope memoryScope = default_scope) noexcept { sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d); return atm.compare_exchange_weak(expected, desired, success, failure, memoryScope); } /// \param expected The value expected to be found in the object referenced by the atomic_ref object /// \param desired The value to store in the referenced object if it is as expected /// \param memoryOrder The memory synchronization ordering for operations /// \param memoryScope The memory scope used. /// \returns true if the referenced object was successfully changed, false otherwise. bool compare_exchange_weak(T &expected, T desired, sycl::memory_order memoryOrder = default_read_modify_write_order, sycl::memory_scope memoryScope = default_scope) noexcept { sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d); return atm.compare_exchange_weak(expected, desired, memoryOrder, memoryScope); } /// atomically compares the value of the referenced object with non-atomic argument /// and performs atomic exchange if equal or atomic load if not /// \param expected The value expected to be found in the object referenced by the atomic_ref object /// \param desired The value to store in the referenced object if it is as expected /// \param success The memory models for the read-modify-write /// \param failure The memory models for load operations /// \param memoryScope The memory scope used. /// \returns true if the referenced object was successfully changed, false otherwise. bool compare_exchange_strong( T &expected, T desired, sycl::memory_order success, sycl::memory_order failure, sycl::memory_scope memoryScope = default_scope) noexcept { sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d); return atm.compare_exchange_strong(expected, desired, success, failure, memoryScope); } /// \param expected The value expected to be found in the object referenced by the atomic_ref object /// \param desired The value to store in the referenced object if it is as expected /// \param memoryOrder The memory synchronization ordering for operations /// \param memoryScope The memory scope used. /// \returns true if the referenced object was successfully changed, false otherwise. bool compare_exchange_strong(T &expected, T desired, sycl::memory_order memoryOrder = default_read_modify_write_order, sycl::memory_scope memoryScope = default_scope) noexcept { sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d); return atm.compare_exchange_strong(expected, desired, memoryOrder, memoryScope); } /// atomically adds the argument to the value stored in the atomic object and obtains the value held previously /// \param operand The other argument of arithmetic addition /// \param memoryOrder The memory ordering used. /// \param memoryScope The memory scope used. /// \returns The value of the referenced object before the call. T fetch_add(T operand, sycl::memory_order memoryOrder = default_read_modify_write_order, sycl::memory_scope memoryScope = default_scope) noexcept { sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d); return atm.fetch_add(operand, memoryOrder, memoryScope); } /// atomically subtracts the argument from the value stored in the atomic object and obtains the value held previously /// \param operand The other argument of arithmetic subtraction /// \param memoryOrder The memory ordering used. /// \param memoryScope The memory scope used. /// \returns The value of the referenced object before the call. T fetch_sub(T operand, sycl::memory_order memoryOrder = default_read_modify_write_order, sycl::memory_scope memoryScope = default_scope) noexcept { sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d); return atm.fetch_sub(operand, memoryOrder, memoryScope); } }; } // namespace dpct #endif // __DPCT_ATOMIC_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/rng_utils.hpp
//==---- rng_utils.hpp ----------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_RNG_UTILS_HPP__ #define __DPCT_RNG_UTILS_HPP__ #include <sycl/sycl.hpp> #include <oneapi/mkl.hpp> #ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this. #include <oneapi/mkl/rng/device.hpp> #endif #include "device.hpp" #include "lib_common_utils.hpp" namespace dpct { namespace rng { #ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this. namespace device { /// The random number generator on device. /// \tparam engine_t The device random number generator engine. It can only be /// oneapi::mkl::rng::device::mrg32k3a<1> or /// oneapi::mkl::rng::device::mrg32k3a<4> or /// oneapi::mkl::rng::device::philox4x32x10<1> or /// oneapi::mkl::rng::device::philox4x32x10<4>. template <typename engine_t> class rng_generator { static_assert( std::disjunction_v< std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<1>>, std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<4>>, std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<1>>, std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<4>>, std::is_same<engine_t, oneapi::mkl::rng::device::mcg59<1>>>, "engine_t can only be oneapi::mkl::rng::device::mrg32k3a<1> or " "oneapi::mkl::rng::device::mrg32k3a<4> or " "oneapi::mkl::rng::device::philox4x32x10<1> or " "oneapi::mkl::rng::device::philox4x32x10<4> or " "oneapi::mkl::rng::device::mcg59<1>."); static constexpr bool _is_engine_vec_size_one = std::disjunction_v< std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<1>>, std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<1>>, std::is_same<engine_t, oneapi::mkl::rng::device::mcg59<1>>>; static constexpr std::uint64_t default_seed = 0; oneapi::mkl::rng::device::bits<std::uint32_t> _distr_bits; oneapi::mkl::rng::device::uniform_bits<std::uint32_t> _distr_uniform_bits; oneapi::mkl::rng::device::gaussian<float> _distr_gaussian_float; oneapi::mkl::rng::device::gaussian<double> _distr_gaussian_double; oneapi::mkl::rng::device::lognormal<float> _distr_lognormal_float; oneapi::mkl::rng::device::lognormal<double> _distr_lognormal_double; oneapi::mkl::rng::device::poisson<std::uint32_t> _distr_poisson; oneapi::mkl::rng::device::uniform<float> _distr_uniform_float; oneapi::mkl::rng::device::uniform<double> _distr_uniform_double; engine_t _engine; public: /// Default constructor of rng_generator rng_generator() { _engine = engine_t(default_seed); } /// Constructor of rng_generator if engine type is not mcg59 /// \param [in] seed The seed to initialize the engine state. /// \param [in] num_to_skip Set the number of elements need to be skipped. /// The number is calculated as: num_to_skip[0] + num_to_skip[1] * 2^64 + /// num_to_skip[2] * 2^128 + ... + num_to_skip[n-1] * 2^(64*(n-1)) template <typename T = engine_t, typename std::enable_if<!std::is_same_v< T, oneapi::mkl::rng::device::mcg59<1>>>::type * = nullptr> rng_generator(std::uint64_t seed, std::initializer_list<std::uint64_t> num_to_skip) { _engine = engine_t(seed, num_to_skip); } /// Constructor of rng_generator if engine type is mcg59 /// \param [in] seed The seed to initialize the engine state. /// \param [in] num_to_skip Set the number of elements need to be skipped. template <typename T = engine_t, typename std::enable_if<std::is_same_v< T, oneapi::mkl::rng::device::mcg59<1>>>::type * = nullptr> rng_generator(std::uint64_t seed, std::uint64_t num_to_skip) { _engine = engine_t(seed, num_to_skip); } /// Generate random number(s) obeys distribution \tparam distr_t. /// \tparam T The distribution of the random number. It can only be /// oneapi::mkl::rng::device::bits<std::uint32_t>, /// oneapi::mkl::rng::device::uniform_bits<std::uint32_t>, /// oneapi::mkl::rng::device::gaussian<float>, /// oneapi::mkl::rng::device::gaussian<double>, /// oneapi::mkl::rng::device::lognormal<float>, /// oneapi::mkl::rng::device::lognormal<double>, /// oneapi::mkl::rng::device::poisson<std::uint32_t>, /// oneapi::mkl::rng::device::uniform<float> or /// oneapi::mkl::rng::device::uniform<double> /// \tparam vec_size The length of the return vector. It can only be 1, 2 /// or 4. /// \param distr_params The parameter(s) for lognormal or poisson /// distribution. /// \return The vector of the random number(s). template <typename distr_t, int vec_size, class... distr_params_t> auto generate(distr_params_t... distr_params) { static_assert(vec_size == 1 || vec_size == 2 || vec_size == 4, "vec_size is not supported."); static_assert( std::disjunction_v< std::is_same<distr_t, oneapi::mkl::rng::device::bits<std::uint32_t>>, std::is_same<distr_t, oneapi::mkl::rng::device::uniform_bits<std::uint32_t>>, std::is_same<distr_t, oneapi::mkl::rng::device::gaussian<float>>, std::is_same<distr_t, oneapi::mkl::rng::device::gaussian<double>>, std::is_same<distr_t, oneapi::mkl::rng::device::lognormal<float>>, std::is_same<distr_t, oneapi::mkl::rng::device::lognormal<double>>, std::is_same<distr_t, oneapi::mkl::rng::device::poisson<std::uint32_t>>, std::is_same<distr_t, oneapi::mkl::rng::device::uniform<float>>, std::is_same<distr_t, oneapi::mkl::rng::device::uniform<double>>>, "distribution is not supported."); if constexpr (std::is_same_v< distr_t, oneapi::mkl::rng::device::bits<std::uint32_t>>) { return generate_vec<vec_size>(_distr_bits); } if constexpr (std::is_same_v< distr_t, oneapi::mkl::rng::device::uniform_bits<std::uint32_t>>) { return generate_vec<vec_size>(_distr_uniform_bits); } if constexpr (std::is_same_v<distr_t, oneapi::mkl::rng::device::gaussian<float>>) { return generate_vec<vec_size>(_distr_gaussian_float); } if constexpr (std::is_same_v<distr_t, oneapi::mkl::rng::device::gaussian<double>>) { return generate_vec<vec_size>(_distr_gaussian_double); } if constexpr (std::is_same_v<distr_t, oneapi::mkl::rng::device::lognormal<float>>) { return generate_vec<vec_size>(_distr_lognormal_float, distr_params..., 0.0f, 1.0f); } if constexpr (std::is_same_v<distr_t, oneapi::mkl::rng::device::lognormal<double>>) { return generate_vec<vec_size>(_distr_lognormal_double, distr_params..., 0.0, 1.0); } if constexpr (std::is_same_v<distr_t, oneapi::mkl::rng::device::poisson< std::uint32_t>>) { return generate_vec<vec_size>(_distr_poisson, distr_params...); } if constexpr (std::is_same_v<distr_t, oneapi::mkl::rng::device::uniform<float>>) { return generate_vec<vec_size>(_distr_uniform_float); } if constexpr (std::is_same_v<distr_t, oneapi::mkl::rng::device::uniform<double>>) { return generate_vec<vec_size>(_distr_uniform_double); } } /// Get the random number generator engine. /// \return The reference of the internal random number generator engine. engine_t &get_engine() { return _engine; } private: template <int vec_size, typename distr_t, class... distr_params_t> auto generate_vec(distr_t &distr, distr_params_t... distr_params) { if constexpr (sizeof...(distr_params_t)) { typename distr_t::param_type pt(distr_params...); distr.param(pt); } if constexpr (vec_size == 4) { if constexpr (_is_engine_vec_size_one) { sycl::vec<typename distr_t::result_type, 4> res; res.x() = oneapi::mkl::rng::device::generate(distr, _engine); res.y() = oneapi::mkl::rng::device::generate(distr, _engine); res.z() = oneapi::mkl::rng::device::generate(distr, _engine); res.w() = oneapi::mkl::rng::device::generate(distr, _engine); return res; } else { return oneapi::mkl::rng::device::generate(distr, _engine); } } else if constexpr (vec_size == 1) { if constexpr (_is_engine_vec_size_one) { return oneapi::mkl::rng::device::generate(distr, _engine); } else { return oneapi::mkl::rng::device::generate_single(distr, _engine); } } else if constexpr (vec_size == 2) { if constexpr (_is_engine_vec_size_one) { sycl::vec<typename distr_t::result_type, 2> res; res.x() = oneapi::mkl::rng::device::generate(distr, _engine); res.y() = oneapi::mkl::rng::device::generate(distr, _engine); return res; } else { sycl::vec<typename distr_t::result_type, 2> res; res.x() = oneapi::mkl::rng::device::generate_single(distr, _engine); res.y() = oneapi::mkl::rng::device::generate_single(distr, _engine); return res; } } } }; } // namespace device #endif namespace host { namespace detail { class rng_generator_base { public: /// Set the seed of host rng_generator. /// \param seed The engine seed. virtual void set_seed(const std::uint64_t seed) = 0; /// Set the dimensions of host rng_generator. /// \param dimensions The engine dimensions. virtual void set_dimensions(const std::uint32_t dimensions) = 0; /// Set the queue of host rng_generator. /// \param queue The engine queue. virtual void set_queue(sycl::queue *queue) = 0; /// Generate unsigned int random number(s) with 'uniform_bits' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. virtual inline void generate_uniform_bits(unsigned int *output, std::int64_t n) = 0; /// Generate unsigned long long random number(s) with 'uniform_bits' /// distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. virtual inline void generate_uniform_bits(unsigned long long *output, std::int64_t n) = 0; /// Generate float random number(s) with 'lognormal' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. /// \param m Mean of associated normal distribution /// \param s Standard deviation of associated normal distribution. virtual inline void generate_lognormal(float *output, std::int64_t n, float m, float s) = 0; /// Generate double random number(s) with 'lognormal' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. /// \param m Mean of associated normal distribution /// \param s Standard deviation of associated normal distribution. virtual inline void generate_lognormal(double *output, std::int64_t n, double m, double s) = 0; /// Generate float random number(s) with 'gaussian' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. /// \param mean Mean of normal distribution /// \param stddev Standard deviation of normal distribution. virtual inline void generate_gaussian(float *output, std::int64_t n, float mean, float stddev) = 0; /// Generate double random number(s) with 'gaussian' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. /// \param mean Mean of normal distribution /// \param stddev Standard deviation of normal distribution. virtual inline void generate_gaussian(double *output, std::int64_t n, double mean, double stddev) = 0; /// Generate unsigned int random number(s) with 'poisson' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. /// \param lambda Lambda for the Poisson distribution. virtual inline void generate_poisson(unsigned int *output, std::int64_t n, double lambda) = 0; /// Generate float random number(s) with 'uniform' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. virtual inline void generate_uniform(float *output, std::int64_t n) = 0; /// Generate double random number(s) with 'uniform' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. virtual inline void generate_uniform(double *output, std::int64_t n) = 0; /// Skip ahead several random number(s). /// \param num_to_skip The number of random numbers to be skipped. virtual void skip_ahead(const std::uint64_t num_to_skip) = 0; protected: sycl::queue *_queue{&dpct::get_default_queue()}; std::uint64_t _seed{0}; std::uint32_t _dimensions{1}; }; /// The random number generator on host. template <typename engine_t = oneapi::mkl::rng::philox4x32x10> class rng_generator : public rng_generator_base { public: /// Constructor of rng_generator. rng_generator() : _engine(create_engine(_queue, _seed, _dimensions)) {} /// Set the seed of host rng_generator. /// \param seed The engine seed. void set_seed(const std::uint64_t seed) { if (seed == _seed) { return; } _seed = seed; _engine = create_engine(_queue, _seed, _dimensions); } /// Set the dimensions of host rng_generator. /// \param dimensions The engine dimensions. void set_dimensions(const std::uint32_t dimensions) { if (dimensions == _dimensions) { return; } _dimensions = dimensions; _engine = create_engine(_queue, _seed, _dimensions); } /// Set the queue of host rng_generator. /// \param queue The engine queue. void set_queue(sycl::queue *queue) { if (queue == _queue) { return; } _queue = queue; _engine = create_engine(_queue, _seed, _dimensions); } /// Generate unsigned int random number(s) with 'uniform_bits' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. inline void generate_uniform_bits(unsigned int *output, std::int64_t n) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) " "Interfaces Project does not support this API."); #else static_assert(sizeof(unsigned int) == sizeof(std::uint32_t)); generate<oneapi::mkl::rng::uniform_bits<std::uint32_t>>( (std::uint32_t *)output, n); #endif } /// Generate unsigned long long random number(s) with 'uniform_bits' /// distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. inline void generate_uniform_bits(unsigned long long *output, std::int64_t n) { #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) " "Interfaces Project does not support this API."); #else static_assert(sizeof(unsigned long long) == sizeof(std::uint64_t)); generate<oneapi::mkl::rng::uniform_bits<std::uint64_t>>( (std::uint64_t *)output, n); #endif } /// Generate float random number(s) with 'lognormal' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. /// \param m Mean of associated normal distribution /// \param s Standard deviation of associated normal distribution. inline void generate_lognormal(float *output, std::int64_t n, float m, float s) { generate<oneapi::mkl::rng::lognormal<float>>(output, n, m, s); } /// Generate double random number(s) with 'lognormal' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. /// \param m Mean of associated normal distribution /// \param s Standard deviation of associated normal distribution. inline void generate_lognormal(double *output, std::int64_t n, double m, double s) { generate<oneapi::mkl::rng::lognormal<double>>(output, n, m, s); } /// Generate float random number(s) with 'gaussian' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. /// \param mean Mean of normal distribution /// \param stddev Standard deviation of normal distribution. inline void generate_gaussian(float *output, std::int64_t n, float mean, float stddev) { generate<oneapi::mkl::rng::gaussian<float>>(output, n, mean, stddev); } /// Generate double random number(s) with 'gaussian' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. /// \param mean Mean of normal distribution /// \param stddev Standard deviation of normal distribution. inline void generate_gaussian(double *output, std::int64_t n, double mean, double stddev) { generate<oneapi::mkl::rng::gaussian<double>>(output, n, mean, stddev); } /// Generate unsigned int random number(s) with 'poisson' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. /// \param lambda Lambda for the Poisson distribution. inline void generate_poisson(unsigned int *output, std::int64_t n, double lambda) { generate<oneapi::mkl::rng::poisson<unsigned int>>(output, n, lambda); } /// Generate float random number(s) with 'uniform' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. inline void generate_uniform(float *output, std::int64_t n) { generate<oneapi::mkl::rng::uniform<float>>(output, n); } /// Generate double random number(s) with 'uniform' distribution. /// \param output The pointer of the first random number. /// \param n The number of random numbers. inline void generate_uniform(double *output, std::int64_t n) { generate<oneapi::mkl::rng::uniform<double>>(output, n); } /// Skip ahead several random number(s). /// \param num_to_skip The number of random numbers to be skipped. void skip_ahead(const std::uint64_t num_to_skip) { #ifndef __INTEL_MKL__ oneapi::mkl::rng::skip_ahead(_engine, num_to_skip); #else if constexpr (std::is_same_v<engine_t, oneapi::mkl::rng::mt2203>) throw std::runtime_error("no skip_ahead method of mt2203 engine."); else oneapi::mkl::rng::skip_ahead(_engine, num_to_skip); #endif } private: static inline engine_t create_engine(sycl::queue *queue, const std::uint64_t seed, const std::uint32_t dimensions) { #ifdef __INTEL_MKL__ return std::is_same_v<engine_t, oneapi::mkl::rng::sobol> ? engine_t(*queue, dimensions) : engine_t(*queue, seed); #else return engine_t(*queue, seed); #endif } template <typename distr_t, typename buffer_t, class... distr_params_t> void generate(buffer_t *output, const std::int64_t n, const distr_params_t... distr_params) { auto output_buf = dpct::detail::get_memory(output); oneapi::mkl::rng::generate(distr_t(distr_params...), _engine, n, output_buf); } engine_t _engine{}; }; } // namespace detail } // namespace host enum class random_engine_type { philox4x32x10, mrg32k3a, mt2203, mt19937, sobol, mcg59 }; typedef std::shared_ptr<rng::host::detail::rng_generator_base> host_rng_ptr; /// Create a host random number generator. /// \param type The random engine type. /// \return The pointer of random number generator. inline host_rng_ptr create_host_rng(const random_engine_type type) { switch (type) { case random_engine_type::philox4x32x10: return std::make_shared< rng::host::detail::rng_generator<oneapi::mkl::rng::philox4x32x10>>(); case random_engine_type::mrg32k3a: return std::make_shared< rng::host::detail::rng_generator<oneapi::mkl::rng::mrg32k3a>>(); #ifndef __INTEL_MKL__ throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) " "Interfaces Project does not support this API."); #else case random_engine_type::mt2203: return std::make_shared< rng::host::detail::rng_generator<oneapi::mkl::rng::mt2203>>(); case random_engine_type::mt19937: return std::make_shared< rng::host::detail::rng_generator<oneapi::mkl::rng::mt19937>>(); case random_engine_type::sobol: return std::make_shared< rng::host::detail::rng_generator<oneapi::mkl::rng::sobol>>(); case random_engine_type::mcg59: return std::make_shared< rng::host::detail::rng_generator<oneapi::mkl::rng::mcg59>>(); #endif } } } // namespace rng } // namespace dpct #endif // __DPCT_RNG_UTILS_HPP__
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/dpl_extras/numeric.h
//==---- numeric.h --------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_NUMERIC_H__ #define __DPCT_NUMERIC_H__ namespace dpct { template <typename Policy, typename InputIt1, typename InputIt2, typename T> T inner_product(Policy &&policy, InputIt1 first1, InputIt1 last1, InputIt2 first2, T init) { return std::transform_reduce(std::forward<Policy>(policy), first1, last1, first2, init); } template <typename Policy, typename InputIt1, typename InputIt2, typename T, typename BinaryOperation1, typename BinaryOperation2> T inner_product(Policy &&policy, InputIt1 first1, InputIt1 last1, InputIt2 first2, T init, BinaryOperation1 op1, BinaryOperation2 op2) { return std::transform_reduce(std::forward<Policy>(policy), first1, last1, first2, init, op1, op2); } } // end namespace dpct #endif
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/dpl_extras/iterators.h
//==---- iterators.h ------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_ITERATORS_H__ #define __DPCT_ITERATORS_H__ #include <oneapi/dpl/iterator> #include "functional.h" namespace dpct { namespace internal { // Wrapper class returned from a dereferenced transform_iterator which was // created using // make_transform_output_iterator(). Used to apply the supplied transform // function when writing into an object of this class. // // Example: // int a[] = {0, 1, 2, 3, 4}; // int* p = a; // auto f = [](auto v) {return v*v;}; // auto tr_out = dpct::make_transform_output_iterator(p+1, f); // auto wrap = *tr_out; // wrap is a transform_output_ref_wrapper // std::cout<<*(p+1)<<std::endl; // '1' // wrap = 2; // apply function, store 2*2=4 // std::cout<<*(p+1)<<std::endl; // '4' template <typename T, typename _UnaryFunc> class transform_output_ref_wrapper { private: T __my_reference_; _UnaryFunc __my_unary_func_; public: template <typename U> transform_output_ref_wrapper(U &&__reference, _UnaryFunc __unary_func) : __my_reference_(std::forward<U>(__reference)), __my_unary_func_(__unary_func) {} // When writing to an object of this type, apply the supplied unary function, // then write to the wrapped reference template <typename UnaryInputType> transform_output_ref_wrapper &operator=(const UnaryInputType &e) { __my_reference_ = __my_unary_func_(e); return *this; } }; // Unary functor to create a transform_output_reference_wrapper when a // transform_iterator is dereferenced, so that a // the supplied unary function may be applied on write, resulting in a // transform_output_iterator template <typename _UnaryFunc> struct _Unary_Out { _Unary_Out(_UnaryFunc __f_) : __f(__f_) {} _UnaryFunc __f; template <typename T> auto operator()(T &&val) const { return transform_output_ref_wrapper<T, _UnaryFunc>(std::forward<T>(val), __f); } }; } // end namespace internal using std::advance; using std::distance; template <typename T> oneapi::dpl::counting_iterator<T> make_counting_iterator(const T &input) { return oneapi::dpl::counting_iterator<T>(input); } template <typename _Tp> class constant_iterator { public: typedef std::false_type is_hetero; typedef std::true_type is_passed_directly; typedef std::ptrdiff_t difference_type; typedef _Tp value_type; typedef _Tp *pointer; // There is no storage behind the iterator, so we return a value instead of // reference. typedef const _Tp reference; typedef const _Tp const_reference; typedef std::random_access_iterator_tag iterator_category; explicit constant_iterator(_Tp __init) : __my_value_(__init), __my_counter_(0) {} private: // used to construct iterator instances with different counter values required // by arithmetic operators constant_iterator(const _Tp &__value, const difference_type &__offset) : __my_value_(__value), __my_counter_(__offset) {} public: // non-const variants of access operators are not provided so unintended // writes are caught at compile time. const_reference operator*() const { return __my_value_; } const_reference operator[](difference_type) const { return __my_value_; } difference_type operator-(const constant_iterator &__it) const { return __my_counter_ - __it.__my_counter_; } constant_iterator &operator+=(difference_type __forward) { __my_counter_ += __forward; return *this; } constant_iterator &operator-=(difference_type __backward) { return *this += -__backward; } constant_iterator &operator++() { return *this += 1; } constant_iterator &operator--() { return *this -= 1; } constant_iterator operator++(int) { constant_iterator __it(*this); ++(*this); return __it; } constant_iterator operator--(int) { constant_iterator __it(*this); --(*this); return __it; } constant_iterator operator-(difference_type __backward) const { return constant_iterator(__my_value_, __my_counter_ - __backward); } constant_iterator operator+(difference_type __forward) const { return constant_iterator(__my_value_, __my_counter_ + __forward); } friend constant_iterator operator+(difference_type __forward, const constant_iterator __it) { return __it + __forward; } bool operator==(const constant_iterator &__it) const { return __my_value_ == __it.__my_value_ && this->__my_counter_ == __it.__my_counter_; } bool operator!=(const constant_iterator &__it) const { return !(*this == __it); } bool operator<(const constant_iterator &__it) const { return *this - __it < 0; } bool operator>(const constant_iterator &__it) const { return __it < *this; } bool operator<=(const constant_iterator &__it) const { return !(*this > __it); } bool operator>=(const constant_iterator &__it) const { return !(*this < __it); } private: _Tp __my_value_; uint64_t __my_counter_; }; template <typename _Tp> constant_iterator<_Tp> make_constant_iterator(_Tp __value) { return constant_iterator<_Tp>(__value); } // key_value_pair class to represent a key and value, specifically a // dereferenced arg_index_input_iterator template <typename _KeyTp, typename _ValueTp> class key_value_pair { public: key_value_pair() = default; key_value_pair(const _KeyTp &_key, const _ValueTp &_value) : key(_key), value(_value) {} bool operator==(const key_value_pair<_KeyTp, _ValueTp> &_kvp) const { return (key == _kvp.key) && (value == _kvp.value); } bool operator!=(const key_value_pair<_KeyTp, _ValueTp> &_kvp) const { return (key != _kvp.key) || (value != _kvp.value); } _KeyTp key; _ValueTp value; }; namespace detail { template <typename KeyTp, typename _ValueTp> struct make_key_value_pair { template <typename ValRefTp> key_value_pair<KeyTp, _ValueTp> operator()(const oneapi::dpl::__internal::tuple<KeyTp, ValRefTp> &tup) const { return ::dpct::key_value_pair<KeyTp, _ValueTp>(::std::get<0>(tup), ::std::get<1>(tup)); } }; template <class T> struct __zip_iterator_impl; template <class... Ts> struct __zip_iterator_impl<std::tuple<Ts...>> { using type = oneapi::dpl::zip_iterator<Ts...>; }; } // end namespace detail // dpct::zip_iterator can only accept std::tuple type as template argument for // compatibility purpose. Please use oneapi::dpl::zip_iterator if you want to // pass iterator's types directly. template <typename... Ts> using zip_iterator = typename detail::__zip_iterator_impl<Ts...>::type; // arg_index_input_iterator is an iterator over a input iterator, with a index. // When dereferenced, it returns a key_value_pair, which can be interrogated for // the index key or the value from the input iterator template <typename InputIteratorT, typename OffsetT = ptrdiff_t, typename OutputValueT = typename ::std::iterator_traits<InputIteratorT>::value_type> class arg_index_input_iterator : public oneapi::dpl::transform_iterator< oneapi::dpl::zip_iterator<oneapi::dpl::counting_iterator<OffsetT>, InputIteratorT>, detail::make_key_value_pair<OffsetT, OutputValueT>> { using arg_index_input_iterator_wrap = oneapi::dpl::transform_iterator< oneapi::dpl::zip_iterator<oneapi::dpl::counting_iterator<OffsetT>, InputIteratorT>, detail::make_key_value_pair<OffsetT, OutputValueT>>; public: typedef OffsetT difference_type; // signal to __get_sycl_range that this iterator is as a direct pass iterator using is_zip = ::std::true_type; arg_index_input_iterator(const arg_index_input_iterator_wrap &__arg_wrap) : arg_index_input_iterator_wrap(__arg_wrap) {} arg_index_input_iterator(InputIteratorT __iter) : arg_index_input_iterator_wrap( oneapi::dpl::make_zip_iterator( oneapi::dpl::counting_iterator(OffsetT(0)), __iter), detail::make_key_value_pair<OffsetT, OutputValueT>()) {} arg_index_input_iterator &operator=(const arg_index_input_iterator &__input) { arg_index_input_iterator_wrap::operator=(__input); return *this; } arg_index_input_iterator &operator++() { arg_index_input_iterator_wrap::operator++(); return *this; } arg_index_input_iterator &operator--() { arg_index_input_iterator_wrap::operator--(); return *this; } arg_index_input_iterator operator++(int) { arg_index_input_iterator __it(*this); ++(*this); return __it; } arg_index_input_iterator operator--(int) { arg_index_input_iterator __it(*this); --(*this); return __it; } arg_index_input_iterator operator+(difference_type __forward) const { return arg_index_input_iterator( arg_index_input_iterator_wrap::operator+(__forward)); } arg_index_input_iterator operator-(difference_type __backward) const { return arg_index_input_iterator( arg_index_input_iterator_wrap::operator-(__backward)); } arg_index_input_iterator &operator+=(difference_type __forward) { arg_index_input_iterator_wrap::operator+=(__forward); return *this; } arg_index_input_iterator &operator-=(difference_type __backward) { arg_index_input_iterator_wrap::operator-=(__backward); return *this; } friend arg_index_input_iterator operator+(difference_type __forward, const arg_index_input_iterator &__it) { return __it + __forward; } difference_type operator-(const arg_index_input_iterator &__it) const { return arg_index_input_iterator_wrap::operator-(__it); } bool operator==(const arg_index_input_iterator &__it) const { return arg_index_input_iterator_wrap::operator==(__it); } bool operator!=(const arg_index_input_iterator &__it) const { return !(*this == __it); } bool operator<(const arg_index_input_iterator &__it) const { return *this - __it < 0; } bool operator>(const arg_index_input_iterator &__it) const { return __it < *this; } bool operator<=(const arg_index_input_iterator &__it) const { return !(*this > __it); } bool operator>=(const arg_index_input_iterator &__it) const { return !(*this < __it); } // returns an arg_index_input_iterator with the same iter position, but a // count reset to 0 arg_index_input_iterator create_normalized() { return arg_index_input_iterator( ::std::get<1>(arg_index_input_iterator_wrap::base().base())); } }; template <typename IterT> struct io_iterator_pair { inline io_iterator_pair() : selector(false) {} inline io_iterator_pair(const IterT &first, const IterT &second) : selector(false) { iter[0] = first; iter[1] = second; } inline IterT first() const { return selector ? iter[1] : iter[0]; } inline IterT second() const { return selector ? iter[0] : iter[1]; } inline void swap() { selector = !selector; } bool selector; IterT iter[2]; }; template <typename _Iter, typename _UnaryFunc> auto make_transform_output_iterator(_Iter __it, _UnaryFunc __unary_func) { return oneapi::dpl::transform_iterator( __it, internal::_Unary_Out<_UnaryFunc>(__unary_func)); } } // end namespace dpct #endif
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/dpl_extras/algorithm.h
//==---- algorithm.h ------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_ALGORITHM_H__ #define __DPCT_ALGORITHM_H__ #include <oneapi/dpl/execution> #include <oneapi/dpl/algorithm> #include <oneapi/dpl/numeric> #include "functional.h" #include "iterators.h" #include "vector.h" namespace dpct { template <typename Policy, typename Iter1, typename Iter2, typename Pred, typename T> void replace_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p, const T &new_value) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); std::transform(std::forward<Policy>(policy), first, last, mask, first, internal::replace_if_fun<typename std::iterator_traits<Iter1>::value_type, Pred>(p, new_value)); } template <typename Policy, typename Iter1, typename Iter2, typename Iter3, typename Pred, typename T> Iter3 replace_copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Iter3 result, Pred p, const T &new_value) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); return std::transform(std::forward<Policy>(policy), first, last, mask, result, internal::replace_if_fun<typename std::iterator_traits<Iter3>::value_type, Pred>(p, new_value)); } template <typename Policy, typename Iter1, typename Iter2, typename Pred> internal::enable_if_hetero_execution_policy<Policy, Iter1> remove_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); using oneapi::dpl::make_zip_iterator; using policy_type = typename std::decay<Policy>::type; using internal::__buffer; using ValueType = typename std::iterator_traits<Iter1>::value_type; __buffer<ValueType> _tmp(std::distance(first, last)); auto end = std::copy_if( std::forward<Policy>(policy), make_zip_iterator(first, mask), make_zip_iterator(last, mask + std::distance(first, last)), make_zip_iterator(_tmp.get(), oneapi::dpl::discard_iterator()), internal::negate_predicate_key_fun<Pred>(p)); return std::copy(std::forward<Policy>(policy), _tmp.get(), std::get<0>(end.base()), first); } template <typename Policy, typename Iter1, typename Iter2, typename Pred> typename std::enable_if<!internal::is_hetero_execution_policy< typename std::decay<Policy>::type>::value, Iter1>::type remove_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); using oneapi::dpl::make_zip_iterator; using policy_type = typename std::decay<Policy>::type; using ValueType = typename std::iterator_traits<Iter1>::value_type; std::vector<ValueType> _tmp(std::distance(first, last)); auto end = std::copy_if( policy, make_zip_iterator(first, mask), make_zip_iterator(last, mask + std::distance(first, last)), make_zip_iterator(_tmp.begin(), oneapi::dpl::discard_iterator()), internal::negate_predicate_key_fun<Pred>(p)); return std::copy(policy, _tmp.begin(), std::get<0>(end.base()), first); } template <typename Policy, typename Iter1, typename Iter2, typename Iter3, typename Pred> Iter3 remove_copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Iter3 result, Pred p) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); using oneapi::dpl::make_zip_iterator; auto ret_val = std::remove_copy_if( std::forward<Policy>(policy), make_zip_iterator(first, mask), make_zip_iterator(last, mask + std::distance(first, last)), make_zip_iterator(result, oneapi::dpl::discard_iterator()), internal::predicate_key_fun<Pred>(p)); return std::get<0>(ret_val.base()); } template <class Policy, class Iter1, class Iter2, class BinaryPred> std::pair<Iter1, Iter2> unique(Policy &&policy, Iter1 keys_first, Iter1 keys_last, Iter2 values_first, BinaryPred binary_pred) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::unique( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first, values_first), oneapi::dpl::make_zip_iterator( keys_last, values_first + std::distance(keys_first, keys_last)), internal::compare_key_fun<BinaryPred>(binary_pred)); auto n1 = std::distance( oneapi::dpl::make_zip_iterator(keys_first, values_first), ret_val); return std::make_pair(keys_first + n1, values_first + n1); } template <class Policy, class Iter1, class Iter2> std::pair<Iter1, Iter2> unique(Policy &&policy, Iter1 keys_first, Iter1 keys_last, Iter2 values_first) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); using T = typename std::iterator_traits<Iter1>::value_type; return unique(std::forward<Policy>(policy), keys_first, keys_last, values_first, std::equal_to<T>()); } template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4, class BinaryPred> std::pair<Iter3, Iter4> unique_copy(Policy &&policy, Iter1 keys_first, Iter1 keys_last, Iter2 values_first, Iter3 keys_result, Iter4 values_result, BinaryPred binary_pred) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::unique_copy( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first, values_first), oneapi::dpl::make_zip_iterator( keys_last, values_first + std::distance(keys_first, keys_last)), oneapi::dpl::make_zip_iterator(keys_result, values_result), internal::unique_fun<BinaryPred>(binary_pred)); auto n1 = std::distance( oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val); return std::make_pair(keys_result + n1, values_result + n1); } template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4> std::pair<Iter3, Iter4> unique_copy(Policy &&policy, Iter1 keys_first, Iter1 keys_last, Iter2 values_first, Iter3 keys_result, Iter4 values_result) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); using T = typename std::iterator_traits<Iter1>::value_type; auto comp = std::equal_to<T>(); return unique_copy(std::forward<Policy>(policy), keys_first, keys_last, values_first, keys_result, values_result, comp); } template <typename Policy, typename Iter, typename Pred> Iter partition_point(Policy &&policy, Iter first, Iter last, Pred p) { static_assert( std::is_same<typename std::iterator_traits<Iter>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); if (std::is_partitioned(std::forward<Policy>(policy), first, last, p)) return std::find_if_not(std::forward<Policy>(policy), first, last, p); else return first; } template <typename Policy, typename Iter1, typename Iter2, typename Iter3, typename Pred> Iter3 copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Iter3 result, Pred pred) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::copy_if( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, mask), oneapi::dpl::make_zip_iterator(last, mask + std::distance(first, last)), oneapi::dpl::make_zip_iterator(result, oneapi::dpl::discard_iterator()), internal::predicate_key_fun<Pred>(pred)); return std::get<0>(ret_val.base()); } template <class Policy, class Iter1, class Iter2, class UnaryOperation, class Pred> Iter2 transform_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 result, UnaryOperation unary_op, Pred pred) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); using T = typename std::iterator_traits<Iter1>::value_type; const auto n = std::distance(first, last); std::for_each( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, result), oneapi::dpl::make_zip_iterator(first, result) + n, internal::transform_if_fun<T, Pred, UnaryOperation>(pred, unary_op)); return result + n; } template <class Policy, class Iter1, class Iter2, class Iter3, class UnaryOperation, class Pred> Iter3 transform_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Iter3 result, UnaryOperation unary_op, Pred pred) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); using T = typename std::iterator_traits<Iter1>::value_type; using Ref1 = typename std::iterator_traits<Iter1>::reference; using Ref2 = typename std::iterator_traits<Iter2>::reference; const auto n = std::distance(first, last); std::for_each( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, mask, result), oneapi::dpl::make_zip_iterator(first, mask, result) + n, internal::transform_if_unary_zip_mask_fun<T, Pred, UnaryOperation>( pred, unary_op)); return result + n; } template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4, class BinaryOperation, class Pred> Iter4 transform_if(Policy &&policy, Iter1 first1, Iter1 last1, Iter2 first2, Iter3 mask, Iter4 result, BinaryOperation binary_op, Pred pred) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); const auto n = std::distance(first1, last1); using ZipIterator = typename oneapi::dpl::zip_iterator<Iter1, Iter2, Iter3, Iter4>; using T = typename std::iterator_traits<ZipIterator>::value_type; std::for_each( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first1, first2, mask, result), oneapi::dpl::make_zip_iterator(last1, first2 + n, mask + n, result + n), internal::transform_if_zip_mask_fun<T, Pred, BinaryOperation>(pred, binary_op)); return result + n; } template <typename Policy, typename InputIter1, typename InputIter2, typename OutputIter> void scatter(Policy &&policy, InputIter1 first, InputIter1 last, InputIter2 map, OutputIter result) { static_assert( std::is_same<typename std::iterator_traits<InputIter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same< typename std::iterator_traits<InputIter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same< typename std::iterator_traits<OutputIter>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); oneapi::dpl::copy(policy, first, last, oneapi::dpl::make_permutation_iterator(result, map)); } template <typename Policy, typename InputIter1, typename InputIter2, typename OutputIter> OutputIter gather(Policy &&policy, InputIter1 map_first, InputIter1 map_last, InputIter2 input_first, OutputIter result) { static_assert( std::is_same<typename std::iterator_traits<InputIter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same< typename std::iterator_traits<InputIter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same< typename std::iterator_traits<OutputIter>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto perm_begin = oneapi::dpl::make_permutation_iterator(input_first, map_first); const int n = ::std::distance(map_first, map_last); return oneapi::dpl::copy(policy, perm_begin, perm_begin + n, result); } template <typename Policy, typename InputIter1, typename InputIter2, typename InputIter3, typename OutputIter, typename Predicate> void scatter_if(Policy &&policy, InputIter1 first, InputIter1 last, InputIter2 map, InputIter3 mask, OutputIter result, Predicate pred) { static_assert( std::is_same<typename std::iterator_traits<InputIter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same< typename std::iterator_traits<InputIter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same< typename std::iterator_traits<InputIter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same< typename std::iterator_traits<OutputIter>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); transform_if(policy, first, last, mask, oneapi::dpl::make_permutation_iterator(result, map), [=](auto &&v) { return v; }, [=](auto &&m) { return pred(m); }); } template <typename Policy, typename InputIter1, typename InputIter2, typename InputIter3, typename OutputIter, typename Predicate> OutputIter gather_if(Policy &&policy, InputIter1 map_first, InputIter1 map_last, InputIter2 mask, InputIter3 input_first, OutputIter result, Predicate pred) { static_assert( std::is_same<typename std::iterator_traits<InputIter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same< typename std::iterator_traits<InputIter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same< typename std::iterator_traits<InputIter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same< typename std::iterator_traits<OutputIter>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto perm_begin = oneapi::dpl::make_permutation_iterator(input_first, map_first); const int n = std::distance(map_first, map_last); return transform_if(policy, perm_begin, perm_begin + n, mask, result, [=](auto &&v) { return v; }, [=](auto &&m) { return pred(m); }); } template <typename Policy, typename Iter1, typename Iter2, typename Iter3, typename Iter4, typename Iter5, typename Iter6> std::pair<Iter5, Iter6> merge(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2, Iter5 keys_result, Iter6 values_result) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter5>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter6>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto n1 = std::distance(keys_first1, keys_last1); auto n2 = std::distance(keys_first2, keys_last2); std::merge(std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first1, values_first1), oneapi::dpl::make_zip_iterator(keys_last1, values_first1 + n1), oneapi::dpl::make_zip_iterator(keys_first2, values_first2), oneapi::dpl::make_zip_iterator(keys_last2, values_first2 + n2), oneapi::dpl::make_zip_iterator(keys_result, values_result), internal::compare_key_fun<>()); return std::make_pair(keys_result + n1 + n2, values_result + n1 + n2); } template <typename Policy, typename Iter1, typename Iter2, typename Iter3, typename Iter4, typename Iter5, typename Iter6, typename Comp> std::pair<Iter5, Iter6> merge(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2, Iter5 keys_result, Iter6 values_result, Comp comp) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter5>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter6>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto n1 = std::distance(keys_first1, keys_last1); auto n2 = std::distance(keys_first2, keys_last2); std::merge(std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first1, values_first1), oneapi::dpl::make_zip_iterator(keys_last1, values_first1 + n1), oneapi::dpl::make_zip_iterator(keys_first2, values_first2), oneapi::dpl::make_zip_iterator(keys_last2, values_first2 + n2), oneapi::dpl::make_zip_iterator(keys_result, values_result), internal::compare_key_fun<Comp>(comp)); return std::make_pair(keys_result + n1 + n2, values_result + n1 + n2); } template <class Policy, class Iter, class T> void iota(Policy &&policy, Iter first, Iter last, T init, T step) { static_assert( std::is_same<typename std::iterator_traits<Iter>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); using DiffSize = typename std::iterator_traits<Iter>::difference_type; std::transform( std::forward<Policy>(policy), oneapi::dpl::counting_iterator<DiffSize>(0), oneapi::dpl::counting_iterator<DiffSize>(std::distance(first, last)), first, internal::sequence_fun<T>(init, step)); } template <class Policy, class Iter, class T> void iota(Policy &&policy, Iter first, Iter last, T init) { static_assert( std::is_same<typename std::iterator_traits<Iter>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); iota(std::forward<Policy>(policy), first, last, init, T(1)); } template <class Policy, class Iter> void iota(Policy &&policy, Iter first, Iter last) { static_assert( std::is_same<typename std::iterator_traits<Iter>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); using DiffSize = typename std::iterator_traits<Iter>::difference_type; iota(std::forward<Policy>(policy), first, last, DiffSize(0), DiffSize(1)); } template <class Policy, class Iter1, class Iter2, class Comp> void sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last, Iter2 values_first, Comp comp) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto first = oneapi::dpl::make_zip_iterator(keys_first, values_first); auto last = first + std::distance(keys_first, keys_last); std::sort(std::forward<Policy>(policy), first, last, internal::compare_key_fun<Comp>(comp)); } template <class Policy, class Iter1, class Iter2> void sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last, Iter2 values_first) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); sort(std::forward<Policy>(policy), keys_first, keys_last, values_first, internal::__less()); } template <class Policy, class Iter1, class Iter2, class Comp> void stable_sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last, Iter2 values_first, Comp comp) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); std::stable_sort( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first, values_first), oneapi::dpl::make_zip_iterator( keys_last, values_first + std::distance(keys_first, keys_last)), internal::compare_key_fun<Comp>(comp)); } template <class Policy, class Iter1, class Iter2> void stable_sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last, Iter2 values_first) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); stable_sort(std::forward<Policy>(policy), keys_first, keys_last, values_first, internal::__less()); } template <class Policy, class Iter, class Operator> void for_each_index(Policy &&policy, Iter first, Iter last, Operator unary_op) { static_assert( std::is_same<typename std::iterator_traits<Iter>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); using DiffSize = typename std::iterator_traits<Iter>::difference_type; std::transform( std::forward<Policy>(policy), oneapi::dpl::counting_iterator<DiffSize>(0), oneapi::dpl::counting_iterator<DiffSize>(std::distance(first, last)), first, unary_op); } template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4, class Iter5> std::pair<Iter4, Iter5> set_intersection(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1, Iter4 keys_result, Iter5 values_result) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter5>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::set_intersection( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first1, values_first1), oneapi::dpl::make_zip_iterator( keys_last1, values_first1 + std::distance(keys_first1, keys_last1)), oneapi::dpl::make_zip_iterator(keys_first2, oneapi::dpl::discard_iterator()), oneapi::dpl::make_zip_iterator(keys_last2, oneapi::dpl::discard_iterator()), oneapi::dpl::make_zip_iterator(keys_result, values_result), internal::compare_key_fun<>()); auto n1 = std::distance( oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val); return std::make_pair(keys_result + n1, values_result + n1); } template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4, class Iter5, class Comp> std::pair<Iter4, Iter5> set_intersection(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1, Iter4 keys_result, Iter5 values_result, Comp comp) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter5>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::set_intersection( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first1, values_first1), oneapi::dpl::make_zip_iterator( keys_last1, values_first1 + std::distance(keys_first1, keys_last1)), oneapi::dpl::make_zip_iterator(keys_first2, oneapi::dpl::discard_iterator()), oneapi::dpl::make_zip_iterator(keys_last2, oneapi::dpl::discard_iterator()), oneapi::dpl::make_zip_iterator(keys_result, values_result), internal::compare_key_fun<Comp>(comp)); auto n1 = std::distance( oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val); return std::make_pair(keys_result + n1, values_result + n1); } template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4, class Iter5, class Iter6> std::pair<Iter5, Iter6> set_symmetric_difference(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2, Iter5 keys_result, Iter6 values_result) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter5>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter6>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::set_symmetric_difference( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first1, values_first1), oneapi::dpl::make_zip_iterator( keys_last1, values_first1 + std::distance(keys_first1, keys_last1)), oneapi::dpl::make_zip_iterator(keys_first2, values_first2), oneapi::dpl::make_zip_iterator( keys_last2, values_first2 + std::distance(keys_first2, keys_last2)), oneapi::dpl::make_zip_iterator(keys_result, values_result), internal::compare_key_fun<>()); auto n1 = std::distance( oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val); return std::make_pair(keys_result + n1, values_result + n1); } template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4, class Iter5, class Iter6, class Comp> std::pair<Iter5, Iter6> set_symmetric_difference(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2, Iter5 keys_result, Iter6 values_result, Comp comp) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter5>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter6>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::set_symmetric_difference( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first1, values_first1), oneapi::dpl::make_zip_iterator( keys_last1, values_first1 + std::distance(keys_first1, keys_last1)), oneapi::dpl::make_zip_iterator(keys_first2, values_first2), oneapi::dpl::make_zip_iterator( keys_last2, values_first2 + std::distance(keys_first2, keys_last2)), oneapi::dpl::make_zip_iterator(keys_result, values_result), internal::compare_key_fun<Comp>(comp)); auto n1 = std::distance( oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val); return std::make_pair(keys_result + n1, values_result + n1); } template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4, class Iter5, class Iter6> std::pair<Iter5, Iter6> set_difference(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2, Iter5 keys_result, Iter6 values_result) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter5>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter6>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::set_difference( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first1, values_first1), oneapi::dpl::make_zip_iterator( keys_last1, values_first1 + std::distance(keys_first1, keys_last1)), oneapi::dpl::make_zip_iterator(keys_first2, values_first2), oneapi::dpl::make_zip_iterator( keys_last2, values_first2 + std::distance(keys_first2, keys_last2)), oneapi::dpl::make_zip_iterator(keys_result, values_result), internal::compare_key_fun<>()); auto n1 = std::distance( oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val); return std::make_pair(keys_result + n1, values_result + n1); } template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4, class Iter5, class Iter6, class Comp> std::pair<Iter5, Iter6> set_difference(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2, Iter5 keys_result, Iter6 values_result, Comp comp) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter5>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter6>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::set_difference( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first1, values_first1), oneapi::dpl::make_zip_iterator( keys_last1, values_first1 + std::distance(keys_first1, keys_last1)), oneapi::dpl::make_zip_iterator(keys_first2, values_first2), oneapi::dpl::make_zip_iterator( keys_last2, values_first2 + std::distance(keys_first2, keys_last2)), oneapi::dpl::make_zip_iterator(keys_result, values_result), internal::compare_key_fun<Comp>(comp)); auto n1 = std::distance( oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val); return std::make_pair(keys_result + n1, values_result + n1); } template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4, class Iter5, class Iter6> internal::enable_if_execution_policy<Policy, std::pair<Iter5, Iter6>> set_union(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2, Iter5 keys_result, Iter6 values_result) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter5>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter6>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::set_union( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first1, values_first1), oneapi::dpl::make_zip_iterator( keys_last1, values_first1 + std::distance(keys_first1, keys_last1)), oneapi::dpl::make_zip_iterator(keys_first2, values_first2), oneapi::dpl::make_zip_iterator( keys_last2, values_first2 + std::distance(keys_first2, keys_last2)), oneapi::dpl::make_zip_iterator(keys_result, values_result), internal::compare_key_fun<>()); auto n1 = std::distance( oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val); return std::make_pair(keys_result + n1, values_result + n1); } template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4, class Iter5, class Iter6, class Comp> internal::enable_if_execution_policy<Policy, std::pair<Iter5, Iter6>> set_union(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2, Iter5 keys_result, Iter6 values_result, Comp comp) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter5>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter6>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::set_union( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(keys_first1, values_first1), oneapi::dpl::make_zip_iterator( keys_last1, values_first1 + std::distance(keys_first1, keys_last1)), oneapi::dpl::make_zip_iterator(keys_first2, values_first2), oneapi::dpl::make_zip_iterator( keys_last2, values_first2 + std::distance(keys_first2, keys_last2)), oneapi::dpl::make_zip_iterator(keys_result, values_result), internal::compare_key_fun<Comp>(comp)); auto n1 = std::distance( oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val); return std::make_pair(keys_result + n1, values_result + n1); } template <typename Policy, typename Iter1, typename Iter2, typename Iter3, typename Iter4, typename Pred> internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>> stable_partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Iter3 out_true, Iter4 out_false, Pred p) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); auto ret_val = std::partition_copy( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, mask), oneapi::dpl::make_zip_iterator(last, mask + std::distance(first, last)), oneapi::dpl::make_zip_iterator(out_true, oneapi::dpl::discard_iterator()), oneapi::dpl::make_zip_iterator(out_false, oneapi::dpl::discard_iterator()), internal::predicate_key_fun<Pred>(p)); return std::make_pair(std::get<0>(ret_val.first.base()), std::get<0>(ret_val.second.base())); } template <typename Policy, typename Iter1, typename Iter3, typename Iter4, typename Pred> internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>> stable_partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter3 out_true, Iter4 out_false, Pred p) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); return std::partition_copy(std::forward<Policy>(policy), first, last, out_true, out_false, p); } template <typename Policy, typename Iter1, typename Iter2, typename Iter3, typename Iter4, typename Pred> internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>> partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Iter3 out_true, Iter4 out_false, Pred p) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter3>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter4>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); return stable_partition_copy(std::forward<Policy>(policy), first, last, mask, out_true, out_false, p); } template <typename Policy, typename Iter1, typename Iter2, typename Pred> internal::enable_if_hetero_execution_policy<Policy, Iter1> stable_partition(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); typedef typename std::decay<Policy>::type policy_type; internal::__buffer<typename std::iterator_traits<Iter1>::value_type> _tmp( std::distance(first, last)); std::copy(std::forward<Policy>(policy), mask, mask + std::distance(first, last), _tmp.get()); auto ret_val = std::stable_partition(std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, _tmp.get()), oneapi::dpl::make_zip_iterator( last, _tmp.get() + std::distance(first, last)), internal::predicate_key_fun<Pred>(p)); return std::get<0>(ret_val.base()); } template <typename Policy, typename Iter1, typename Iter2, typename Pred> typename std::enable_if<!internal::is_hetero_execution_policy< typename std::decay<Policy>::type>::value, Iter1>::type stable_partition(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); typedef typename std::decay<Policy>::type policy_type; std::vector<typename std::iterator_traits<Iter1>::value_type> _tmp( std::distance(first, last)); std::copy(std::forward<Policy>(policy), mask, mask + std::distance(first, last), _tmp.begin()); auto ret_val = std::stable_partition( std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, _tmp.begin()), oneapi::dpl::make_zip_iterator(last, _tmp.begin() + std::distance(first, last)), internal::predicate_key_fun<Pred>(p)); return std::get<0>(ret_val.base()); } template <typename Policy, typename Iter1, typename Iter2, typename Pred> internal::enable_if_execution_policy<Policy, Iter1> partition(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) { static_assert( std::is_same<typename std::iterator_traits<Iter1>::iterator_category, std::random_access_iterator_tag>::value && std::is_same<typename std::iterator_traits<Iter2>::iterator_category, std::random_access_iterator_tag>::value, "Iterators passed to algorithms must be random-access iterators."); return stable_partition(std::forward<Policy>(policy), first, last, mask, p); } template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename value_t, typename value_out_t> inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value && dpct::internal::is_iterator<key_out_t>::value && dpct::internal::is_iterator<value_t>::value && dpct::internal::is_iterator<value_out_t>::value> sort_pairs(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, value_t values_in, value_out_t values_out, int64_t n, bool descending = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8); template <typename _ExecutionPolicy, typename key_t, typename key_out_t> inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value && dpct::internal::is_iterator<key_out_t>::value> sort_keys(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n, bool descending = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8); namespace internal { // Transforms key to a specific bit range and sorts the transformed key template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename transformed_key_t> inline void transform_and_sort(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n, bool descending, int begin_bit, int end_bit) { using key_t_value_t = typename std::iterator_traits<key_t>::value_type; auto trans_key = translate_key<key_t_value_t, transformed_key_t>(begin_bit, end_bit); // Use of the comparison operator that is not simply std::greater() or // std::less() will result in // not using radix sort which will cost some performance. However, this is // necessary to provide the transformation of the key to the bitrange // desired. auto partial_sort_with_comp = [&](const auto &comp) { return oneapi::dpl::partial_sort_copy( std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n, keys_out, keys_out + n, [=](const auto a, const auto b) { return comp(trans_key(a), trans_key(b)); }); }; if (descending) partial_sort_with_comp(::std::greater<transformed_key_t>()); else partial_sort_with_comp(::std::less<transformed_key_t>()); } template <typename _ExecutionPolicy, typename key_t, typename key_out_t> inline void sort_only(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n, bool descending) { using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type; if constexpr (::std::is_floating_point<key_t_value_t>::value) { if (descending) { // Comparison operator that is not std::greater() ensures stability of // -0.0 and 0.0 // at the cost of some performance because radix sort will not be used. auto comp_descending = [=](const auto a, const auto b) { return a > b; }; oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n, keys_out, keys_out + n, comp_descending); } else { // Comparison operator that is not std::less() ensures stability of -0.0 // and 0.0 // at the cost of some performance because radix sort will not be used. auto comp_ascending = [=](const auto a, const auto b) { return a < b; }; oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n, keys_out, keys_out + n, comp_ascending); } } else { if (descending) { oneapi::dpl::partial_sort_copy( ::std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n, keys_out, keys_out + n, ::std::greater<key_t_value_t>()); } else { oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n, keys_out, keys_out + n); } } } // Transforms key from a pair to a specific bit range and sorts the pairs by the // transformed key template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename transform_key_t, typename value_t, typename value_out_t> inline void transform_and_sort_pairs(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, value_t values_in, value_out_t values_out, int64_t n, bool descending, int begin_bit, int end_bit) { using key_t_value_t = typename std::iterator_traits<key_t>::value_type; auto zip_input = oneapi::dpl::zip_iterator(keys_in, values_in); auto zip_output = oneapi::dpl::zip_iterator(keys_out, values_out); auto trans_key = translate_key<key_t_value_t, transform_key_t>(begin_bit, end_bit); // Use of the comparison operator that is not simply std::greater() or // std::less() will result in // not using radix sort which will cost some performance. However, this is // necessary to provide the transformation of the key to the bitrange desired // and also to select the key from the zipped pair. auto load_val = [=](const auto a) { return trans_key(std::get<0>(a)); }; auto partial_sort_with_comp = [&](const auto &comp) { return oneapi::dpl::partial_sort_copy( std::forward<_ExecutionPolicy>(policy), zip_input, zip_input + n, zip_output, zip_output + n, [=](const auto a, const auto b) { return comp(load_val(a), load_val(b)); }); }; if (descending) partial_sort_with_comp(::std::greater<key_t_value_t>()); else partial_sort_with_comp(::std::less<key_t_value_t>()); } template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename value_t, typename value_out_t> inline void sort_only_pairs(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, value_t values_in, value_out_t values_out, int64_t n, bool descending) { using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type; auto zip_input = oneapi::dpl::zip_iterator(keys_in, values_in); auto zip_output = oneapi::dpl::zip_iterator(keys_out, values_out); // Use of the comparison operator that is not simply std::greater() or // std::less() will result in // not using radix sort which will cost some performance. However, this is // necessary to select the key from the zipped pair. auto load_val = [=](const auto a) { return std::get<0>(a); }; auto partial_sort_with_comp = [&](const auto &comp) { return oneapi::dpl::partial_sort_copy( std::forward<_ExecutionPolicy>(policy), zip_input, zip_input + n, zip_output, zip_output + n, [=](const auto a, const auto b) { return comp(load_val(a), load_val(b)); }); }; if (descending) partial_sort_with_comp(::std::greater<key_t_value_t>()); else partial_sort_with_comp(::std::less<key_t_value_t>()); } // overload for key_out_t != std::nullptr_t template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename value_t, typename value_out_t> typename ::std::enable_if<!::std::is_null_pointer<key_out_t>::value>::type sort_pairs_impl(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, value_t values_in, value_out_t values_out, int64_t n, bool descending, int begin_bit, int end_bit) { using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type; int clipped_begin_bit = ::std::max(begin_bit, 0); int clipped_end_bit = ::std::min((::std::uint64_t)end_bit, sizeof(key_t_value_t) * 8); int num_bytes = (clipped_end_bit - clipped_begin_bit - 1) / 8 + 1; auto transform_and_sort_pairs_f = [&](auto x) { using T = typename ::std::decay_t<decltype(x)>; internal::transform_and_sort_pairs<decltype(policy), key_t, key_out_t, T, value_t, value_out_t>( ::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in, values_out, n, descending, clipped_begin_bit, clipped_end_bit); }; if (clipped_end_bit - clipped_begin_bit == sizeof(key_t_value_t) * 8) { internal::sort_only_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in, values_out, n, descending); } else if (num_bytes == 1) { transform_and_sort_pairs_f.template operator()<uint8_t>(0); } else if (num_bytes == 2) { transform_and_sort_pairs_f.template operator()<uint16_t>(0); } else if (num_bytes <= 4) { transform_and_sort_pairs_f.template operator()<uint32_t>(0); } else // if (num_bytes <= 8) { transform_and_sort_pairs_f.template operator()<uint64_t>(0); } } // overload for key_out_t == std::nullptr_t template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename value_t, typename value_out_t> typename ::std::enable_if<::std::is_null_pointer<key_out_t>::value>::type sort_pairs_impl(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, value_t values_in, value_out_t values_out, int64_t n, bool descending, int begin_bit, int end_bit) { // create temporary keys_out to discard, memory footprint could be improved by // a specialized iterator with a single // unchanging dummy key_t element using key_t_value_t = typename std::iterator_traits<key_t>::value_type; sycl::buffer<key_t_value_t, 1> temp_keys_out{sycl::range<1>(n)}; internal::sort_pairs_impl(std::forward<_ExecutionPolicy>(policy), keys_in, oneapi::dpl::begin(temp_keys_out), values_in, values_out, n, descending, begin_bit, end_bit); } template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename value_t, typename value_out_t, typename OffsetIteratorT> inline void segmented_sort_pairs_by_parallel_sorts( _ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, value_out_t values_in, value_t values_out, int64_t n, int64_t nsegments, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { using offset_type = typename ::std::iterator_traits<OffsetIteratorT>::value_type; ::std::vector<offset_type> host_accessible_offset_starts(nsegments); ::std::vector<offset_type> host_accessible_offset_ends(nsegments); // make offsets accessible on host ::std::copy(::std::forward<_ExecutionPolicy>(policy), begin_offsets, begin_offsets + nsegments, host_accessible_offset_starts.begin()); ::std::copy(::std::forward<_ExecutionPolicy>(policy), end_offsets, end_offsets + nsegments, host_accessible_offset_ends.begin()); for (::std::uint64_t i = 0; i < nsegments; i++) { uint64_t segment_begin = host_accessible_offset_starts[i]; uint64_t segment_end = ::std::min(n, (int64_t)host_accessible_offset_ends[i]); if (segment_begin < segment_end) { ::dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in + segment_begin, keys_out + segment_begin, values_in + segment_begin, values_out + segment_begin, segment_end - segment_begin, descending, begin_bit, end_bit); } } } template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename OffsetIteratorT> inline void segmented_sort_keys_by_parallel_sorts( _ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n, int64_t nsegments, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { using offset_type = typename ::std::iterator_traits<OffsetIteratorT>::value_type; ::std::vector<offset_type> host_accessible_offset_starts(nsegments); ::std::vector<offset_type> host_accessible_offset_ends(nsegments); // make offsets accessible on host ::std::copy(::std::forward<_ExecutionPolicy>(policy), begin_offsets, begin_offsets + nsegments, host_accessible_offset_starts.begin()); ::std::copy(::std::forward<_ExecutionPolicy>(policy), end_offsets, end_offsets + nsegments, host_accessible_offset_ends.begin()); for (::std::uint64_t i = 0; i < nsegments; i++) { uint64_t segment_begin = host_accessible_offset_starts[i]; uint64_t segment_end = ::std::min(n, (int64_t)host_accessible_offset_ends[i]); if (segment_begin < segment_end) { ::dpct::sort_keys(::std::forward<_ExecutionPolicy>(policy), keys_in + segment_begin, keys_out + segment_begin, segment_end - segment_begin, descending, begin_bit, end_bit); } } } template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename value_t, typename value_out_t, typename OffsetIteratorT> inline void segmented_sort_pairs_by_parallel_for_of_sorts( _ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, value_t values_in, value_out_t values_out, int64_t n, int64_t nsegments, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { policy.queue().submit([&](sycl::handler &cgh) { cgh.parallel_for(nsegments, [=](sycl::id<1> i) { uint64_t segment_begin = begin_offsets[i]; uint64_t segment_end = ::std::min(n, (int64_t)end_offsets[i]); if (segment_begin == segment_end) { return; } ::dpct::sort_pairs(::std::execution::seq, keys_in + segment_begin, keys_out + segment_begin, values_in + segment_begin, values_out + segment_begin, segment_end - segment_begin, descending, begin_bit, end_bit); }); }); policy.queue().wait(); } template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename OffsetIteratorT> inline void segmented_sort_keys_by_parallel_for_of_sorts( _ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n, int64_t nsegments, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { policy.queue().submit([&](sycl::handler &cgh) { cgh.parallel_for(nsegments, [=](sycl::id<1> i) { uint64_t segment_begin = begin_offsets[i]; uint64_t segment_end = ::std::min(n, (int64_t)end_offsets[i]); if (segment_begin == segment_end) { return; } ::dpct::sort_keys(::std::execution::seq, keys_in + segment_begin, keys_out + segment_begin, segment_end - segment_begin, descending, begin_bit, end_bit); }); }); policy.queue().wait(); } template <typename _ExecutionPolicy, typename OffsetIteratorT> inline void mark_segments(_ExecutionPolicy &&policy, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, int64_t n, int64_t nsegments, sycl::buffer<::std::size_t, 1> segments) { ::std::size_t work_group_size = policy.queue() .get_device() .template get_info<sycl::info::device::max_work_group_size>(); auto sg_sizes = policy.queue() .get_device() .template get_info<sycl::info::device::sub_group_sizes>(); ::std::size_t sub_group_size = sg_sizes.empty() ? 0 : sg_sizes.back(); float avg_seg_size = (float)n / (float)nsegments; if (avg_seg_size > work_group_size) { // If average segment size is larger than workgroup, use workgroup to // coordinate to mark segments policy.queue() .submit([&](sycl::handler &h) { auto segments_acc = segments.get_access<sycl::access_mode::write>(h); h.parallel_for(work_group_size, ([=](sycl::id<1> id) { for (::std::size_t seg = 0; seg < nsegments; seg++) { ::std::size_t i = begin_offsets[seg]; ::std::size_t end = end_offsets[seg]; while (i + id < end) { segments_acc[i + id] = seg; i += work_group_size; } } })); }) .wait(); } else if (sub_group_size > 0 && avg_seg_size > sub_group_size / 2) { // If average segment size is larger than half a subgroup, use subgroup to // coordinate to mark segments policy.queue() .submit([&](sycl::handler &h) { auto segments_acc = segments.get_access<sycl::access_mode::write>(h); h.parallel_for( sycl::nd_range<1>{work_group_size, work_group_size}, ([=](sycl::nd_item<1> item) { auto sub_group = item.get_sub_group(); ::std::size_t num_subgroups = sub_group.get_group_range().size(); ::std::size_t local_size = sub_group.get_local_range().size(); ::std::size_t sub_group_id = sub_group.get_group_id(); while (sub_group_id < nsegments) { ::std::size_t subgroup_local_id = sub_group.get_local_id(); ::std::size_t i = begin_offsets[sub_group_id]; ::std::size_t end = end_offsets[sub_group_id]; while (i + subgroup_local_id < end) { segments_acc[i + subgroup_local_id] = sub_group_id; i += local_size; } sub_group_id += num_subgroups; } })); }) .wait(); } else { // If average segment size is small as compared to subgroup, use single // work item to mark each segment policy.queue() .submit([&](sycl::handler &h) { auto segments_acc = segments.get_access<sycl::access_mode::write>(h); h.parallel_for(nsegments, ([=](sycl::id<1> seg) { for (::std::size_t i = begin_offsets[seg]; i < end_offsets[seg]; i++) { segments_acc[i] = seg; } })); }) .wait(); } } template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename OffsetIteratorT> inline void segmented_sort_keys_by_two_pair_sorts( _ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n, int64_t nsegments, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { sycl::buffer<::std::size_t, 1> segments{sycl::range<1>(n)}; sycl::buffer<::std::size_t, 1> segments_sorted{sycl::range<1>(n)}; using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type; sycl::buffer<key_t_value_t, 1> keys_temp{sycl::range<1>(n)}; mark_segments(::std::forward<_ExecutionPolicy>(policy), begin_offsets, end_offsets, n, nsegments, segments); // Part 1: Sort by keys keeping track of which segment were in dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in, oneapi::dpl::begin(keys_temp), oneapi::dpl::begin(segments), oneapi::dpl::begin(segments_sorted), n, descending); // Part 2: Sort the segments with a stable sort to get back sorted segments. dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), oneapi::dpl::begin(segments_sorted), oneapi::dpl::begin(segments), oneapi::dpl::begin(keys_temp), keys_out, n, false); } template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename value_t, typename value_out_t, typename OffsetIteratorT> inline void segmented_sort_pairs_by_two_pair_sorts( _ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, value_out_t values_in, value_t values_out, int64_t n, int64_t nsegments, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { sycl::buffer<::std::size_t, 1> segments{sycl::range<1>(n)}; sycl::buffer<::std::size_t, 1> segments_sorted{sycl::range<1>(n)}; using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type; sycl::buffer<key_t_value_t, 1> keys_temp{sycl::range<1>(n)}; using value_t_value_t = typename ::std::iterator_traits<value_t>::value_type; sycl::buffer<value_t_value_t, 1> values_temp{sycl::range<1>(n)}; mark_segments(::std::forward<_ExecutionPolicy>(policy), begin_offsets, end_offsets, n, nsegments, segments); auto zip_seg_vals = oneapi::dpl::make_zip_iterator(oneapi::dpl::begin(segments), values_in); auto zip_seg_vals_out = oneapi::dpl::make_zip_iterator( oneapi::dpl::begin(segments_sorted), oneapi::dpl::begin(values_temp)); // Part 1: Sort by keys keeping track of which segment were in dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in, oneapi::dpl::begin(keys_temp), zip_seg_vals, zip_seg_vals_out, n, descending); auto zip_keys_vals = oneapi::dpl::make_zip_iterator( oneapi::dpl::begin(keys_temp), oneapi::dpl::begin(values_temp)); auto zip_keys_vals_out = oneapi::dpl::make_zip_iterator(keys_out, values_out); // Part 2: Sort the segments with a stable sort to get back sorted segments. dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), oneapi::dpl::begin(segments_sorted), oneapi::dpl::begin(segments), zip_keys_vals, zip_keys_vals_out, n, false); } } // end namespace internal template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename value_t, typename value_out_t> inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value && dpct::internal::is_iterator<key_out_t>::value && dpct::internal::is_iterator<value_t>::value && dpct::internal::is_iterator<value_out_t>::value> sort_pairs(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, value_t values_in, value_out_t values_out, int64_t n, bool descending, int begin_bit, int end_bit) { internal::sort_pairs_impl(std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in, values_out, n, descending, begin_bit, end_bit); } template <typename _ExecutionPolicy, typename key_t, typename value_t> inline void sort_pairs( _ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys, io_iterator_pair<value_t> &values, int64_t n, bool descending = false, bool do_swap_iters = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys.first(), keys.second(), values.first(), values.second(), n, descending, begin_bit, end_bit); if (do_swap_iters) { keys.swap(); values.swap(); } } template <typename _ExecutionPolicy, typename key_t, typename key_out_t> inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value && dpct::internal::is_iterator<key_out_t>::value> sort_keys(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n, bool descending, int begin_bit, int end_bit) { using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type; int clipped_begin_bit = ::std::max(begin_bit, 0); int clipped_end_bit = ::std::min((::std::uint64_t)end_bit, sizeof(key_t_value_t) * 8); int num_bytes = (clipped_end_bit - clipped_begin_bit - 1) / 8 + 1; auto transform_and_sort_f = [&](auto x) { using T = typename ::std::decay_t<decltype(x)>; internal::transform_and_sort<decltype(policy), key_t, key_out_t, T>( ::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n, descending, clipped_begin_bit, clipped_end_bit); }; if (clipped_end_bit - clipped_begin_bit == sizeof(key_t_value_t) * 8) { internal::sort_only(::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n, descending); } else if (num_bytes == 1) { transform_and_sort_f.template operator()<uint8_t>(0); } else if (num_bytes == 2) { transform_and_sort_f.template operator()<uint16_t>(0); } else if (num_bytes <= 4) { transform_and_sort_f.template operator()<uint32_t>(0); } else // if (num_bytes <= 8) { transform_and_sort_f.template operator()<uint64_t>(0); } } template <typename _ExecutionPolicy, typename key_t> inline void sort_keys( _ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys, int64_t n, bool descending = false, bool do_swap_iters = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { sort_keys(std::forward<_ExecutionPolicy>(policy), keys.first(), keys.second(), n, descending, begin_bit, end_bit); if (do_swap_iters) keys.swap(); } template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename OffsetIteratorT> inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value && dpct::internal::is_iterator<key_out_t>::value> segmented_sort_keys( _ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n, int64_t nsegments, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { int compute_units = policy.queue() .get_device() .template get_info<sycl::info::device::max_compute_units>(); auto sg_sizes = policy.queue() .get_device() .template get_info<sycl::info::device::sub_group_sizes>(); int subgroup_size = sg_sizes.empty() ? 1 : sg_sizes.back(); // parallel for of serial sorts when we have sufficient number of segments for // load balance when number of segments is large as compared to our target // compute capability if (nsegments > compute_units * (policy.queue().get_device().is_gpu() ? subgroup_size : 1)) { dpct::internal::segmented_sort_keys_by_parallel_for_of_sorts( ::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n, nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit); } else if (nsegments < 512) // for loop of parallel sorts when we have a small // number of total sorts to limit total overhead { dpct::internal::segmented_sort_keys_by_parallel_sorts( ::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n, nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit); } else // decent catch all using 2 full sorts { dpct::internal::segmented_sort_keys_by_two_pair_sorts( ::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n, nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit); } } template <typename _ExecutionPolicy, typename key_t, typename OffsetIteratorT> inline void segmented_sort_keys( _ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys, int64_t n, int64_t nsegments, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, bool descending = false, bool do_swap_iters = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { segmented_sort_keys(::std::forward<_ExecutionPolicy>(policy), keys.first(), keys.second(), n, nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit); if (do_swap_iters) { keys.swap(); } } template <typename _ExecutionPolicy, typename key_t, typename key_out_t, typename value_t, typename value_out_t, typename OffsetIteratorT> inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value && dpct::internal::is_iterator<key_out_t>::value && dpct::internal::is_iterator<value_t>::value && dpct::internal::is_iterator<value_out_t>::value> segmented_sort_pairs( _ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, value_t values_in, value_out_t values_out, int64_t n, int64_t nsegments, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { int compute_units = policy.queue() .get_device() .template get_info<sycl::info::device::max_compute_units>(); auto sg_sizes = policy.queue() .get_device() .template get_info<sycl::info::device::sub_group_sizes>(); int subgroup_size = sg_sizes.empty() ? 1 : sg_sizes.back(); // parallel for of serial sorts when we have sufficient number of segments for // load balance when number of segments is large as compared to our target // compute capability if (nsegments > compute_units * (policy.queue().get_device().is_gpu() ? subgroup_size : 1)) { dpct::internal::segmented_sort_pairs_by_parallel_for_of_sorts( ::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in, values_out, n, nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit); } else if (nsegments < 512) // for loop of parallel sorts when we have a small // number of total sorts to limit total overhead { dpct::internal::segmented_sort_pairs_by_parallel_sorts( ::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in, values_out, n, nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit); } else // decent catch all using 2 full sorts { dpct::internal::segmented_sort_pairs_by_two_pair_sorts( ::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in, values_out, n, nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit); } } template <typename _ExecutionPolicy, typename key_t, typename value_t, typename OffsetIteratorT> inline void segmented_sort_pairs( _ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys, io_iterator_pair<value_t> &values, int64_t n, int64_t nsegments, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, bool descending = false, bool do_swap_iters = false, int begin_bit = 0, int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8) { segmented_sort_pairs(std::forward<_ExecutionPolicy>(policy), keys.first(), keys.second(), values.first(), values.second(), n, nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit); if (do_swap_iters) { keys.swap(); values.swap(); } } template <typename _ExecutionPolicy, typename Iter1, typename Iter2> inline void reduce_argmax(_ExecutionPolicy &&policy, Iter1 input, Iter2 output, ::std::size_t n) { dpct::arg_index_input_iterator<decltype(input), int> input_arg_idx(input); auto ret = ::std::max_element( ::std::forward<_ExecutionPolicy>(policy), input_arg_idx, input_arg_idx + n, [](const auto &a, const auto &b) { return (a.value < b.value); }); ::std::copy(::std::forward<_ExecutionPolicy>(policy), ret, ret + 1, output); } template <typename _ExecutionPolicy, typename Iter1, typename Iter2> inline void reduce_argmin(_ExecutionPolicy &&policy, Iter1 input, Iter2 output, ::std::size_t n) { dpct::arg_index_input_iterator<decltype(input), int> input_arg_idx(input); auto ret = ::std::min_element( ::std::forward<_ExecutionPolicy>(policy), input_arg_idx, input_arg_idx + n, [](const auto &a, const auto &b) { return (a.value < b.value); }); ::std::copy(::std::forward<_ExecutionPolicy>(policy), ret, ret + 1, output); } template <typename _ExecutionPolicy, typename Iter1, typename ValueLessComparable, typename StrictWeakOrdering> inline ::std::pair<Iter1, Iter1> equal_range(_ExecutionPolicy &&policy, Iter1 start, Iter1 end, const ValueLessComparable &value, StrictWeakOrdering comp) { ::std::vector<::std::int64_t> res_lower(1); ::std::vector<::std::int64_t> res_upper(1); ::std::vector<ValueLessComparable> value_vec(1, value); ::oneapi::dpl::lower_bound(policy, start, end, value_vec.begin(), value_vec.end(), res_lower.begin(), comp); ::oneapi::dpl::upper_bound(::std::forward<_ExecutionPolicy>(policy), start, end, value_vec.begin(), value_vec.end(), res_upper.begin(), comp); auto result = ::std::make_pair(start + res_lower[0], start + res_upper[0]); return result; } template <typename _ExecutionPolicy, typename Iter1, typename ValueLessComparable> inline ::std::pair<Iter1, Iter1> equal_range(_ExecutionPolicy &&policy, Iter1 start, Iter1 end, const ValueLessComparable &value) { return equal_range(::std::forward<_ExecutionPolicy>(policy), start, end, value, internal::__less()); } template <typename Policy, typename Iter1, typename Iter2, typename Iter3> inline ::std::enable_if_t< dpct::internal::is_iterator<Iter1>::value && dpct::internal::is_iterator<Iter2>::value && internal::is_hetero_execution_policy<::std::decay_t<Policy>>::value> segmented_reduce_argmin(Policy &&policy, Iter1 keys_in, Iter2 keys_out, ::std::int64_t nsegments, Iter3 begin_offsets, Iter3 end_offsets) { policy.queue().submit([&](sycl::handler &cgh) { cgh.parallel_for(nsegments, [=](sycl::id<1> i) { if (end_offsets[i] <= begin_offsets[i]) { keys_out[i] = dpct::key_value_pair( 1, ::std::numeric_limits< typename ::std::iterator_traits<Iter1>::value_type>::max()); } else { dpct::arg_index_input_iterator<Iter1, int> arg_index(keys_in + begin_offsets[i]); keys_out[i] = *::std::min_element( arg_index, arg_index + (end_offsets[i] - begin_offsets[i]), [](const auto &a, const auto &b) { return a.value < b.value; }); } }); }); policy.queue().wait(); } template <typename Policy, typename Iter1, typename Iter2, typename Iter3> inline ::std::enable_if_t< dpct::internal::is_iterator<Iter1>::value && dpct::internal::is_iterator<Iter2>::value && internal::is_hetero_execution_policy<::std::decay_t<Policy>>::value> segmented_reduce_argmax(Policy &&policy, Iter1 keys_in, Iter2 keys_out, ::std::int64_t nsegments, Iter3 begin_offsets, Iter3 end_offsets) { policy.queue().submit([&](sycl::handler &cgh) { cgh.parallel_for(nsegments, [=](sycl::id<1> i) { if (end_offsets[i] <= begin_offsets[i]) { keys_out[i] = dpct::key_value_pair( 1, ::std::numeric_limits< typename ::std::iterator_traits<Iter1>::value_type>::lowest()); } else { dpct::arg_index_input_iterator<Iter1, int> arg_index(keys_in + begin_offsets[i]); keys_out[i] = *::std::max_element( arg_index, arg_index + (end_offsets[i] - begin_offsets[i]), [](const auto &a, const auto &b) { return a.value < b.value; }); } }); }); policy.queue().wait(); } } // end namespace dpct #endif
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/dpl_extras/memory.h
//==---- memory.h ---------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_MEMORY_H__ #define __DPCT_MEMORY_H__ #include <sycl/sycl.hpp> // Memory management section: // device_pointer, device_reference, swap, device_iterator, malloc_device, // device_new, free_device, device_delete namespace dpct { namespace detail { template <typename T> struct make_allocatable { using type = T; }; template <> struct make_allocatable<void> { using type = dpct::byte_t; }; #if defined(__LIBSYCL_MAJOR_VERSION) && defined(__LIBSYCL_MINOR_VERSION) && \ defined(__LIBSYCL_PATCH_VERSION) #define _DPCT_LIBSYCL_VERSION \ (__LIBSYCL_MAJOR_VERSION * 10000 + __LIBSYCL_MINOR_VERSION * 100 + \ __LIBSYCL_PATCH_VERSION) #else #define _DPCT_LIBSYCL_VERSION 0 #endif template <typename _DataT> using __buffer_allocator = #if _DPCT_LIBSYCL_VERSION >= 60000 sycl::buffer_allocator<typename make_allocatable<_DataT>::type>; #else sycl::buffer_allocator; #endif } // namespace detail #ifdef DPCT_USM_LEVEL_NONE template <typename T, sycl::access_mode Mode = sycl::access_mode::read_write, typename Allocator = detail::__buffer_allocator<T>> class device_pointer; #else template <typename T> class device_pointer; #endif template <typename T> struct device_reference { using pointer = device_pointer<T>; using value_type = T; template <typename OtherT> device_reference(const device_reference<OtherT> &input) : value(input.value) {} device_reference(const pointer &input) : value((*input).value) {} device_reference(value_type &input) : value(input) {} template <typename OtherT> device_reference &operator=(const device_reference<OtherT> &input) { value = input; return *this; }; device_reference &operator=(const device_reference &input) { T val = input.value; value = val; return *this; }; device_reference &operator=(const value_type &x) { value = x; return *this; }; pointer operator&() const { return pointer(&value); }; operator value_type() const { return T(value); } device_reference &operator++() { ++value; return *this; }; device_reference &operator--() { --value; return *this; }; device_reference operator++(int) { device_reference ref(*this); ++(*this); return ref; }; device_reference operator--(int) { device_reference ref(*this); --(*this); return ref; }; device_reference &operator+=(const T &input) { value += input; return *this; }; device_reference &operator-=(const T &input) { value -= input; return *this; }; device_reference &operator*=(const T &input) { value *= input; return *this; }; device_reference &operator/=(const T &input) { value /= input; return *this; }; device_reference &operator%=(const T &input) { value %= input; return *this; }; device_reference &operator&=(const T &input) { value &= input; return *this; }; device_reference &operator|=(const T &input) { value |= input; return *this; }; device_reference &operator^=(const T &input) { value ^= input; return *this; }; device_reference &operator<<=(const T &input) { value <<= input; return *this; }; device_reference &operator>>=(const T &input) { value >>= input; return *this; }; void swap(device_reference &input) { T tmp = (*this); *this = (input); input = (tmp); } T &value; }; template <typename T> void swap(device_reference<T> &x, device_reference<T> &y) { x.swap(y); } template <typename T> void swap(T &x, T &y) { T tmp = x; x = y; y = tmp; } namespace internal { // struct for checking if iterator is heterogeneous or not template <typename Iter, typename Void = void> // for non-heterogeneous iterators struct is_hetero_iterator : std::false_type {}; template <typename Iter> // for heterogeneous iterators struct is_hetero_iterator< Iter, typename std::enable_if<Iter::is_hetero::value, void>::type> : std::true_type {}; } // namespace internal #ifdef DPCT_USM_LEVEL_NONE template <typename T, sycl::access_mode Mode, typename Allocator> class device_iterator; template <typename ValueType, typename Allocator, typename Derived> class device_pointer_base { protected: sycl::buffer<ValueType, 1, Allocator> buffer; std::size_t idx; public: using pointer = ValueType *; using difference_type = std::make_signed<std::size_t>::type; device_pointer_base(sycl::buffer<ValueType, 1> in, std::size_t i = 0) : buffer(in), idx(i) {} #ifdef __USE_DPCT template <typename OtherT> device_pointer_base(OtherT *ptr) : buffer( dpct::detail::mem_mgr::instance() .translate_ptr(ptr) .buffer.template reinterpret<ValueType, 1>(sycl::range<1>( dpct::detail::mem_mgr::instance().translate_ptr(ptr).size / sizeof(ValueType)))), idx(ptr - (ValueType*)dpct::detail::mem_mgr::instance() .translate_ptr(ptr).alloc_ptr) {} #endif device_pointer_base(const std::size_t count) : buffer(sycl::range<1>(count / sizeof(ValueType))), idx() {} // buffer has no default ctor we pass zero-range to create an empty buffer device_pointer_base() : buffer(sycl::range<1>(0)) {} device_pointer_base(const device_pointer_base &in) : buffer(in.buffer), idx(in.idx) {} pointer get() const { auto res = (const_cast<device_pointer_base *>(this) ->buffer.template get_access<sycl::access_mode::read_write>()) .get_pointer(); return res + idx; } operator ValueType *() { auto res = (buffer.template get_access<sycl::access_mode::read_write>()) .get_pointer(); return res + idx; } operator ValueType *() const { auto res = (const_cast<device_pointer_base *>(this) ->buffer.template get_access<sycl::access_mode::read_write>()) .get_pointer(); return res + idx; } Derived operator+(difference_type forward) const { return Derived{buffer, idx + forward}; } Derived operator-(difference_type backward) const { return Derived{buffer, idx - backward}; } Derived operator++(int) { Derived p(buffer, idx); idx += 1; return p; } Derived operator--(int) { Derived p(buffer, idx); idx -= 1; return p; } difference_type operator-(const Derived &it) const { return idx - it.idx; } template <typename OtherIterator> typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value, difference_type>::type operator-(const OtherIterator &it) const { return idx - std::distance(oneapi::dpl::begin(buffer), it); } std::size_t get_idx() const { return idx; } // required sycl::buffer<ValueType, 1, Allocator> get_buffer() { return buffer; } // required }; template <typename T, sycl::access_mode Mode, typename Allocator> class device_pointer : public device_pointer_base<T, Allocator, device_pointer<T, Mode, Allocator>> { private: using base_type = device_pointer_base<T, Allocator, device_pointer>; public: using value_type = T; using difference_type = std::make_signed<std::size_t>::type; using pointer = T *; using reference = T &; using iterator_category = std::random_access_iterator_tag; using is_hetero = std::true_type; // required using is_passed_directly = std::false_type; static constexpr sycl::access_mode mode = Mode; // required device_pointer(sycl::buffer<T, 1> in, std::size_t i = 0) : base_type(in, i) {} #ifdef __USE_DPCT template <typename OtherT> device_pointer(OtherT *ptr) : base_type(ptr) {} #endif // needed for malloc_device, count is number of bytes to allocate device_pointer(const std::size_t count) : base_type(count) {} device_pointer() : base_type() {} device_pointer(const device_pointer &in) : base_type(in) {} device_pointer &operator+=(difference_type forward) { this->idx += forward; return *this; } device_pointer &operator-=(difference_type backward) { this->idx -= backward; return *this; } // include operators from base class using base_type::operator++; using base_type::operator--; device_pointer &operator++() { this->idx += 1; return *this; } device_pointer &operator--() { this->idx -= 1; return *this; } }; template <sycl::access_mode Mode, typename Allocator> class device_pointer<void, Mode, Allocator> : public device_pointer_base<dpct::byte_t, Allocator, device_pointer<void, Mode, Allocator>> { private: using base_type = device_pointer_base<dpct::byte_t, Allocator, device_pointer>; public: using value_type = dpct::byte_t; using difference_type = std::make_signed<std::size_t>::type; using pointer = void *; using reference = value_type &; using iterator_category = std::random_access_iterator_tag; using is_hetero = std::true_type; // required using is_passed_directly = std::false_type; static constexpr sycl::access_mode mode = Mode; // required device_pointer(sycl::buffer<value_type, 1> in, std::size_t i = 0) : base_type(in, i) {} #ifdef __USE_DPCT template <typename OtherT> device_pointer(OtherT *ptr) : base_type(ptr) {} #endif // needed for malloc_device, count is number of bytes to allocate device_pointer(const std::size_t count) : base_type(count) {} device_pointer() : base_type() {} device_pointer(const device_pointer &in) : base_type(in) {} device_pointer &operator+=(difference_type forward) { this->idx += forward; return *this; } device_pointer &operator-=(difference_type backward) { this->idx -= backward; return *this; } // include operators from base class using base_type::operator++; using base_type::operator--; device_pointer &operator++() { this->idx += 1; return *this; } device_pointer &operator--() { this->idx -= 1; return *this; } }; #else template <typename T> class device_iterator; template <typename ValueType, typename Derived> class device_pointer_base { protected: ValueType *ptr; public: using pointer = ValueType *; using difference_type = std::make_signed<std::size_t>::type; device_pointer_base(ValueType *p) : ptr(p) {} device_pointer_base(const std::size_t count) { sycl::queue default_queue = dpct::get_default_queue(); ptr = static_cast<ValueType *>(sycl::malloc_shared( count, default_queue.get_device(), default_queue.get_context())); } device_pointer_base() {} pointer get() const { return ptr; } operator ValueType *() { return ptr; } operator ValueType *() const { return ptr; } ValueType &operator[](difference_type idx) { return ptr[idx]; } ValueType &operator[](difference_type idx) const { return ptr[idx]; } Derived operator+(difference_type forward) const { return Derived{ptr + forward}; } Derived operator-(difference_type backward) const { return Derived{ptr - backward}; } Derived operator++(int) { Derived p(ptr); ++ptr; return p; } Derived operator--(int) { Derived p(ptr); --ptr; return p; } difference_type operator-(const Derived &it) const { return ptr - it.ptr; } }; template <typename T> class device_pointer : public device_pointer_base<T, device_pointer<T>> { private: using base_type = device_pointer_base<T, device_pointer<T>>; public: using value_type = T; using difference_type = std::make_signed<std::size_t>::type; using pointer = T *; using reference = T &; using const_reference = const T &; using iterator_category = std::random_access_iterator_tag; using is_hetero = std::false_type; // required using is_passed_directly = std::true_type; // required device_pointer(T *p) : base_type(p) {} // needed for malloc_device, count is number of bytes to allocate device_pointer(const std::size_t count) : base_type(count) {} device_pointer() : base_type() {} device_pointer &operator=(const device_iterator<T> &in) { this->ptr = static_cast<device_pointer<T>>(in).ptr; return *this; } // include operators from base class using base_type::operator++; using base_type::operator--; device_pointer &operator++() { ++(this->ptr); return *this; } device_pointer &operator--() { --(this->ptr); return *this; } device_pointer &operator+=(difference_type forward) { this->ptr = this->ptr + forward; return *this; } device_pointer &operator-=(difference_type backward) { this->ptr = this->ptr - backward; return *this; } }; template <> class device_pointer<void> : public device_pointer_base<dpct::byte_t, device_pointer<void>> { private: using base_type = device_pointer_base<dpct::byte_t, device_pointer<void>>; public: using value_type = dpct::byte_t; using difference_type = std::make_signed<std::size_t>::type; using pointer = void *; using reference = value_type &; using const_reference = const value_type &; using iterator_category = std::random_access_iterator_tag; using is_hetero = std::false_type; // required using is_passed_directly = std::true_type; // required device_pointer(void *p) : base_type(static_cast<value_type *>(p)) {} // needed for malloc_device, count is number of bytes to allocate device_pointer(const std::size_t count) : base_type(count) {} device_pointer() : base_type() {} pointer get() const { return static_cast<pointer>(this->ptr); } operator void *() { return this->ptr; } operator void *() const { return this->ptr; } // include operators from base class using base_type::operator++; using base_type::operator--; device_pointer &operator++() { ++(this->ptr); return *this; } device_pointer &operator--() { --(this->ptr); return *this; } device_pointer &operator+=(difference_type forward) { this->ptr = this->ptr + forward; return *this; } device_pointer &operator-=(difference_type backward) { this->ptr = this->ptr - backward; return *this; } }; #endif #ifdef DPCT_USM_LEVEL_NONE template <typename T, sycl::access_mode Mode = sycl::access_mode::read_write, typename Allocator = detail::__buffer_allocator<T>> class device_iterator : public device_pointer<T, Mode, Allocator> { using Base = device_pointer<T, Mode, Allocator>; public: using value_type = T; using difference_type = std::make_signed<std::size_t>::type; using pointer = T *; using reference = T &; using iterator_category = std::random_access_iterator_tag; using is_hetero = std::true_type; // required using is_passed_directly = std::false_type; // required static constexpr sycl::access_mode mode = Mode; // required device_iterator() : Base() {} device_iterator(sycl::buffer<T, 1, Allocator> vec, std::size_t index) : Base(vec, index) {} template <sycl::access_mode inMode> device_iterator(const device_iterator<T, inMode, Allocator> &in) : Base(in.buffer, in.idx) {} // required for iter_mode device_iterator &operator=(const device_iterator &in) { Base::buffer = in.buffer; Base::idx = in.idx; return *this; } reference operator*() const { return const_cast<device_iterator *>(this) ->buffer.template get_access<mode>()[Base::idx]; } reference operator[](difference_type i) const { return *(*this + i); } device_iterator &operator++() { ++Base::idx; return *this; } device_iterator &operator--() { --Base::idx; return *this; } device_iterator operator++(int) { device_iterator it(*this); ++(*this); return it; } device_iterator operator--(int) { device_iterator it(*this); --(*this); return it; } device_iterator operator+(difference_type forward) const { const auto new_idx = Base::idx + forward; return {Base::buffer, new_idx}; } device_iterator &operator+=(difference_type forward) { Base::idx += forward; return *this; } device_iterator operator-(difference_type backward) const { return {Base::buffer, Base::idx - backward}; } device_iterator &operator-=(difference_type backward) { Base::idx -= backward; return *this; } friend device_iterator operator+(difference_type forward, const device_iterator &it) { return it + forward; } difference_type operator-(const device_iterator &it) const { return Base::idx - it.idx; } template <typename OtherIterator> typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value, difference_type>::type operator-(const OtherIterator &it) const { return Base::idx - std::distance(oneapi::dpl::begin(Base::buffer), it); } bool operator==(const device_iterator &it) const { return *this - it == 0; } bool operator!=(const device_iterator &it) const { return !(*this == it); } bool operator<(const device_iterator &it) const { return *this - it < 0; } bool operator>(const device_iterator &it) const { return it < *this; } bool operator<=(const device_iterator &it) const { return !(*this > it); } bool operator>=(const device_iterator &it) const { return !(*this < it); } std::size_t get_idx() const { return Base::idx; } // required sycl::buffer<T, 1, Allocator> get_buffer() { return Base::buffer; } // required }; #else template <typename T> class device_iterator : public device_pointer<T> { using Base = device_pointer<T>; protected: std::size_t idx; public: using value_type = T; using difference_type = std::make_signed<std::size_t>::type; using pointer = typename Base::pointer; using reference = typename Base::reference; using iterator_category = std::random_access_iterator_tag; using is_hetero = std::false_type; // required using is_passed_directly = std::true_type; // required static constexpr sycl::access_mode mode = sycl::access_mode::read_write; // required device_iterator() : Base(nullptr), idx(0) {} device_iterator(T *vec, std::size_t index) : Base(vec), idx(index) {} template <sycl::access_mode inMode> device_iterator(const device_iterator<T> &in) : Base(in.ptr), idx(in.idx) {} // required for iter_mode device_iterator &operator=(const device_iterator &in) { Base::operator=(in); idx = in.idx; return *this; } reference operator*() const { return *(Base::ptr + idx); } reference operator[](difference_type i) { return Base::ptr[idx + i]; } reference operator[](difference_type i) const { return Base::ptr[idx + i]; } device_iterator &operator++() { ++idx; return *this; } device_iterator &operator--() { --idx; return *this; } device_iterator operator++(int) { device_iterator it(*this); ++(*this); return it; } device_iterator operator--(int) { device_iterator it(*this); --(*this); return it; } device_iterator operator+(difference_type forward) const { const auto new_idx = idx + forward; return {Base::ptr, new_idx}; } device_iterator &operator+=(difference_type forward) { idx += forward; return *this; } device_iterator operator-(difference_type backward) const { return {Base::ptr, idx - backward}; } device_iterator &operator-=(difference_type backward) { idx -= backward; return *this; } friend device_iterator operator+(difference_type forward, const device_iterator &it) { return it + forward; } difference_type operator-(const device_iterator &it) const { return idx - it.idx; } template <typename OtherIterator> typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value, difference_type>::type operator-(const OtherIterator &it) const { return idx - it.get_idx(); } bool operator==(const device_iterator &it) const { return *this - it == 0; } bool operator!=(const device_iterator &it) const { return !(*this == it); } bool operator<(const device_iterator &it) const { return *this - it < 0; } bool operator>(const device_iterator &it) const { return it < *this; } bool operator<=(const device_iterator &it) const { return !(*this > it); } bool operator>=(const device_iterator &it) const { return !(*this < it); } std::size_t get_idx() const { return idx; } // required device_iterator &get_buffer() { return *this; } // required std::size_t size() const { return idx; } }; #endif template <typename T> device_pointer<T> malloc_device(const std::size_t num_elements) { return device_pointer<T>(num_elements * sizeof(T)); } static inline device_pointer<void> malloc_device(const std::size_t num_bytes) { return device_pointer<void>(num_bytes); } template <typename T> device_pointer<T> device_new(device_pointer<T> p, const T &value, const std::size_t count = 1) { std::vector<T> result(count, value); p.buffer = sycl::buffer<T, 1>(result.begin(), result.end()); return p + count; } template <typename T> device_pointer<T> device_new(device_pointer<T> p, const std::size_t count = 1) { return device_new(p, T{}, count); } template <typename T> device_pointer<T> device_new(const std::size_t count = 1) { return device_pointer<T>(count); } template <typename T> void free_device(device_pointer<T> ptr) {} template <typename T> typename std::enable_if<!std::is_trivially_destructible<T>::value, void>::type device_delete(device_pointer<T> p, const std::size_t count = 1) { for (std::size_t i = 0; i < count; ++i) { p[i].~T(); } } template <typename T> typename std::enable_if<std::is_trivially_destructible<T>::value, void>::type device_delete(device_pointer<T>, const std::size_t count = 1) {} template <typename T> device_pointer<T> get_device_pointer(T *ptr) { return device_pointer<T>(ptr); } template <typename T> device_pointer<T> get_device_pointer(const device_pointer<T> &ptr) { return device_pointer<T>(ptr); } template <typename T> T *get_raw_pointer(const device_pointer<T> &ptr) { return ptr.get(); } template <typename Pointer> Pointer get_raw_pointer(const Pointer &ptr) { return ptr; } template <typename T> const T &get_raw_reference(const device_reference<T> &ref) { return ref.value; } template <typename T> T &get_raw_reference(device_reference<T> &ref) { return ref.value; } template <typename T> const T &get_raw_reference(const T &ref) { return ref; } template <typename T> T &get_raw_reference(T &ref) { return ref; } } // namespace dpct #endif
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/dpl_extras/vector.h
//==---- vector.h ---------------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_VECTOR_H__ #define __DPCT_VECTOR_H__ #include <oneapi/dpl/algorithm> #include <oneapi/dpl/execution> #include <sycl/sycl.hpp> #include "memory.h" #include <algorithm> #include <iterator> #include <vector> #include "../device.hpp" namespace dpct { namespace internal { template <typename Iter, typename Void = void> // for non-iterators struct is_iterator : std::false_type {}; template <typename Iter> // For iterators struct is_iterator< Iter, typename std::enable_if< !std::is_void<typename Iter::iterator_category>::value, void>::type> : std::true_type {}; template <typename T> // For pointers struct is_iterator<T *> : std::true_type {}; } // end namespace internal #ifndef DPCT_USM_LEVEL_NONE template <typename T, typename Allocator = sycl::usm_allocator<T, sycl::usm::alloc::shared>> class device_vector { public: using iterator = device_iterator<T>; using const_iterator = const iterator; using reference = device_reference<T>; using const_reference = const reference; using value_type = T; using pointer = T *; using const_pointer = const T *; using difference_type = typename ::std::iterator_traits<iterator>::difference_type; using size_type = ::std::size_t; private: Allocator _alloc; size_type _size; size_type _capacity; pointer _storage; size_type _min_capacity() const { return size_type(1); } void _set_capacity_and_alloc() { _capacity = ::std::max(_size * 2, _min_capacity()); _storage = _alloc.allocate(_capacity); } public: template <typename OtherA> operator ::std::vector<T, OtherA>() const { auto __tmp = ::std::vector<T, OtherA>(this->size()); ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), this->begin(), this->end(), __tmp.begin()); return __tmp; } device_vector() : _alloc(get_default_queue()), _size(0), _capacity(_min_capacity()) { _set_capacity_and_alloc(); } ~device_vector() /*= default*/ { _alloc.deallocate(_storage, _capacity); }; explicit device_vector(size_type n) : device_vector(n, T()) {} explicit device_vector(size_type n, const T &value) : _alloc(get_default_queue()), _size(n) { _set_capacity_and_alloc(); if (_size > 0) { ::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()), begin(), end(), T(value)); } } device_vector(const device_vector &other) : _alloc(get_default_queue()) { _size = other.size(); _capacity = other.capacity(); _storage = _alloc.allocate(_capacity); ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), other.begin(), other.end(), begin()); } device_vector(device_vector &&other) : _alloc(get_default_queue()), _size(other.size()), _capacity(other.capacity()), _storage(other._storage) { other._size = 0; other._capacity = 0; other._storage = nullptr; } template <typename InputIterator> device_vector(InputIterator first, typename ::std::enable_if< internal::is_iterator<InputIterator>::value && !::std::is_pointer<InputIterator>::value && ::std::is_same<typename ::std::iterator_traits< InputIterator>::iterator_category, ::std::random_access_iterator_tag>::value, InputIterator>::type last) : _alloc(get_default_queue()) { _size = ::std::distance(first, last); _set_capacity_and_alloc(); if (_size > 0) { ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), first, last, begin()); } } template <typename InputIterator> device_vector(InputIterator first, typename ::std::enable_if<::std::is_pointer<InputIterator>::value, InputIterator>::type last) : _alloc(get_default_queue()) { _size = ::std::distance(first, last); _set_capacity_and_alloc(); if (_size > 0) { auto ptr_type = sycl::get_pointer_type(first, get_default_context()); if (ptr_type != sycl::usm::alloc::host && ptr_type != sycl::usm::alloc::unknown) { ::std::copy( oneapi::dpl::execution::make_device_policy(get_default_queue()), first, last, begin()); } else { sycl::buffer<T, 1> buf(first, last); auto buf_first = oneapi::dpl::begin(buf); auto buf_last = oneapi::dpl::end(buf); ::std::copy( oneapi::dpl::execution::make_device_policy(get_default_queue()), buf_first, buf_last, begin()); } } } template <typename InputIterator> device_vector(InputIterator first, typename ::std::enable_if< internal::is_iterator<InputIterator>::value && !::std::is_pointer<InputIterator>::value && !::std::is_same<typename ::std::iterator_traits< InputIterator>::iterator_category, ::std::random_access_iterator_tag>::value, InputIterator>::type last) : _alloc(get_default_queue()), _size(::std::distance(first, last)) { _set_capacity_and_alloc(); ::std::vector<T> _tmp(first, last); if (_size > 0) { ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), _tmp.begin(), _tmp.end(), this->begin()); } } template <typename OtherAllocator> device_vector(const device_vector<T, OtherAllocator> &v) : _alloc(get_default_queue()), _storage(v.real_begin()), _size(v.size()), _capacity(v.capacity()) {} template <typename OtherAllocator> device_vector(::std::vector<T, OtherAllocator> &v) : _alloc(get_default_queue()), _size(v.size()) { _set_capacity_and_alloc(); if (_size > 0) { ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), v.begin(), v.end(), this->begin()); } } template <typename OtherAllocator> device_vector &operator=(const ::std::vector<T, OtherAllocator> &v) { resize(v.size()); if (_size > 0) { ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), v.begin(), v.end(), begin()); } return *this; } device_vector &operator=(const device_vector &other) { // Copy assignment operator: resize(other.size()); if (_size > 0) { ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), other.begin(), other.end(), begin()); } return *this; } device_vector &operator=(device_vector &&other) { // Move assignment operator: device_vector dummy(::std::move(other)); this->swap(dummy); return *this; } size_type size() const { return _size; } iterator begin() noexcept { return device_iterator<T>(_storage, 0); } iterator end() { return device_iterator<T>(_storage, size()); } const_iterator begin() const noexcept { return device_iterator<T>(_storage, 0); } const_iterator cbegin() const noexcept { return begin(); } const_iterator end() const { return device_iterator<T>(_storage, size()); } const_iterator cend() const { return end(); } T *real_begin() { return _storage; } const T *real_begin() const { return _storage; } void swap(device_vector &v) { ::std::swap(_size, v._size); ::std::swap(_capacity, v._capacity); ::std::swap(_storage, v._storage); ::std::swap(_alloc, v._alloc); } reference operator[](size_type n) { return _storage[n]; } const_reference operator[](size_type n) const { return _storage[n]; } void reserve(size_type n) { if (n > capacity()) { // allocate buffer for new size auto tmp = _alloc.allocate(2 * n); // copy content (old buffer to new buffer) ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), begin(), end(), tmp); // deallocate old memory _alloc.deallocate(_storage, _capacity); _storage = tmp; _capacity = 2 * n; } } void resize(size_type new_size, const T &x = T()) { reserve(new_size); if (_size < new_size) { ::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()), begin() + _size, begin() + new_size, x); } _size = new_size; } size_type max_size(void) const { return ::std::numeric_limits<size_type>::max() / sizeof(T); } size_type capacity() const { return _capacity; } const_reference front() const { return *begin(); } reference front() { return *begin(); } const_reference back(void) const { return *(end() - 1); } reference back(void) { return *(end() - 1); } pointer data(void) { return _storage; } const_pointer data(void) const { return _storage; } void shrink_to_fit(void) { if (_size != capacity()) { size_type tmp_capacity = ::std::max(_size, _min_capacity()); auto tmp = _alloc.allocate(tmp_capacity); if (_size > 0) { ::std::copy( oneapi::dpl::execution::make_device_policy(get_default_queue()), begin(), end(), tmp); } _alloc.deallocate(_storage, _capacity); _storage = tmp; _capacity = tmp_capacity; } } void assign(size_type n, const T &x) { resize(n); if (_size > 0) { ::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()), begin(), begin() + n, x); } } template <typename InputIterator> void assign(InputIterator first, typename ::std::enable_if<internal::is_iterator<InputIterator>::value, InputIterator>::type last) { auto n = ::std::distance(first, last); resize(n); if (_size > 0) { ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), first, last, begin()); } } void clear(void) { _size = 0; } bool empty(void) const { return (size() == 0); } void push_back(const T &x) { insert(end(), size_type(1), x); } void pop_back(void) { if (_size > 0) --_size; } iterator erase(iterator first, iterator last) { auto n = ::std::distance(first, last); if (last == end()) { _size = _size - n; return end(); } auto m = ::std::distance(last, end()); if (m <= 0) { return end(); } auto tmp = _alloc.allocate(m); // copy remainder to temporary buffer. ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), last, end(), tmp); // override (erase) subsequence in storage. ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), tmp, tmp + m, first); _alloc.deallocate(tmp, m); _size -= n; return begin() + first.get_idx() + n; } iterator erase(iterator pos) { return erase(pos, pos + 1); } iterator insert(iterator position, const T &x) { auto n = ::std::distance(begin(), position); insert(position, size_type(1), x); return begin() + n; } void insert(iterator position, size_type n, const T &x) { if (position == end()) { resize(size() + n); ::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()), end() - n, end(), x); } else { auto i_n = ::std::distance(begin(), position); // allocate temporary storage auto m = ::std::distance(position, end()); // will throw if position is not inside active vector auto tmp = _alloc.allocate(m); // copy remainder ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), position, end(), tmp); resize(size() + n); // resizing might invalidate position position = begin() + position.get_idx(); ::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()), position, position + n, x); ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), tmp, tmp + m, position + n); _alloc.deallocate(tmp, m); } } template <typename InputIterator> void insert(iterator position, InputIterator first, typename ::std::enable_if<internal::is_iterator<InputIterator>::value, InputIterator>::type last) { auto n = ::std::distance(first, last); if (position == end()) { resize(size() + n); ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), first, last, end()); } else { auto m = ::std::distance(position, end()); // will throw if position is not inside active vector auto tmp = _alloc.allocate(m); ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), position, end(), tmp); resize(size() + n); // resizing might invalidate position position = begin() + position.get_idx(); ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), first, last, position); ::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), tmp, tmp + m, position + n); _alloc.deallocate(tmp, m); } } Allocator get_allocator() const { return _alloc; } }; #else template <typename T, typename Allocator = detail::__buffer_allocator<T>> class device_vector { static_assert( std::is_same<Allocator, detail::__buffer_allocator<T>>::value, "device_vector doesn't support custom allocator when USM is not used."); public: using iterator = device_iterator<T>; using const_iterator = const iterator; using reference = device_reference<T>; using const_reference = const reference; using value_type = T; using pointer = T *; using const_pointer = const T *; using difference_type = typename std::iterator_traits<iterator>::difference_type; using size_type = std::size_t; private: using Buffer = sycl::buffer<T, 1>; using Range = sycl::range<1>; // Using mem_mgr to handle memory allocation void *_storage; size_type _size; size_type _min_capacity() const { return size_type(1); } void *alloc_store(size_type num_bytes) { return detail::mem_mgr::instance().mem_alloc(num_bytes); } public: template <typename OtherA> operator std::vector<T, OtherA>() const { auto __tmp = std::vector<T, OtherA>(this->size()); std::copy(oneapi::dpl::execution::dpcpp_default, this->begin(), this->end(), __tmp.begin()); return __tmp; } device_vector() : _storage(alloc_store(_min_capacity() * sizeof(T))), _size(0) {} ~device_vector() = default; explicit device_vector(size_type n) : device_vector(n, T()) {} explicit device_vector(size_type n, const T &value) : _storage(alloc_store(std::max(n, _min_capacity()) * sizeof(T))), _size(n) { auto buf = get_buffer(); std::fill(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(buf), oneapi::dpl::begin(buf) + n, T(value)); } device_vector(const device_vector &other) : _storage(other._storage), _size(other.size()) {} device_vector(device_vector &&other) : _storage(std::move(other._storage)), _size(other.size()) {} template <typename InputIterator> device_vector(InputIterator first, typename std::enable_if< internal::is_iterator<InputIterator>::value && !std::is_pointer<InputIterator>::value && std::is_same<typename std::iterator_traits< InputIterator>::iterator_category, std::random_access_iterator_tag>::value, InputIterator>::type last) : _storage(alloc_store(std::distance(first, last) * sizeof(T))), _size(std::distance(first, last)) { auto buf = get_buffer(); auto dst = oneapi::dpl::begin(buf); std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), first, last, dst); } template <typename InputIterator> device_vector(InputIterator first, typename std::enable_if<std::is_pointer<InputIterator>::value, InputIterator>::type last) : _storage(alloc_store(std::distance(first, last) * sizeof(T))), _size(std::distance(first, last)) { auto buf = get_buffer(); Buffer tmp_buf(first, last); auto start = oneapi::dpl::begin(tmp_buf); auto end = oneapi::dpl::end(tmp_buf); auto dst = oneapi::dpl::begin(buf); std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), start, end, dst); } template <typename InputIterator> device_vector(InputIterator first, typename std::enable_if< internal::is_iterator<InputIterator>::value && !std::is_same<typename std::iterator_traits< InputIterator>::iterator_category, std::random_access_iterator_tag>::value, InputIterator>::type last) : _storage(alloc_store(std::distance(first, last) * sizeof(T))), _size(std::distance(first, last)) { auto buf = get_buffer(); std::vector<T> tmp(first, last); Buffer tmp_buf(tmp); auto start = oneapi::dpl::begin(tmp_buf); auto end = oneapi::dpl::end(tmp_buf); auto dst = oneapi::dpl::begin(buf); std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), start, end, dst); } template <typename OtherAllocator> device_vector(const device_vector<T, OtherAllocator> &v) : _storage(alloc_store(v.size() * sizeof(T))), _size(v.size()) { auto buf = get_buffer(); auto dst = oneapi::dpl::begin(buf); std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()), v.real_begin(), v.real_begin() + v.size(), dst); } template <typename OtherAllocator> device_vector(std::vector<T, OtherAllocator> &v) : _storage(alloc_store(v.size() * sizeof(T))), _size(v.size()) { std::copy(oneapi::dpl::execution::dpcpp_default, v.begin(), v.end(), oneapi::dpl::begin(get_buffer())); } device_vector &operator=(const device_vector &other) { // Copy assignment operator: _size = other.size(); void *tmp = alloc_store(_size * sizeof(T)); auto tmp_buf = detail::mem_mgr::instance() .translate_ptr(tmp) .buffer.template reinterpret<T, 1>(sycl::range<1>(_size)); std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(other.get_buffer()), oneapi::dpl::end(other.get_buffer()), oneapi::dpl::begin(tmp_buf)); detail::mem_mgr::instance().mem_free(_storage); _storage = tmp; return *this; } device_vector &operator=(device_vector &&other) { // Move assignment operator: _size = other.size(); this->_storage = std::move(other._storage); return *this; } template <typename OtherAllocator> device_vector &operator=(const std::vector<T, OtherAllocator> &v) { Buffer data(v.begin(), v.end()); _size = v.size(); void *tmp = alloc_store(_size * sizeof(T)); auto tmp_buf = detail::mem_mgr::instance() .translate_ptr(tmp) .buffer.template reinterpret<T, 1>(sycl::range<1>(_size)); std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(data), oneapi::dpl::end(data), oneapi::dpl::begin(tmp_buf)); detail::mem_mgr::instance().mem_free(_storage); _storage = tmp; return *this; } Buffer get_buffer() const { return detail::mem_mgr::instance() .translate_ptr(_storage) .buffer.template reinterpret<T, 1>(sycl::range<1>(capacity())); } size_type size() const { return _size; } iterator begin() noexcept { return device_iterator<T>(get_buffer(), 0); } iterator end() { return device_iterator<T>(get_buffer(), _size); } const_iterator begin() const noexcept { return device_iterator<T>(get_buffer(), 0); } const_iterator cbegin() const noexcept { return begin(); } const_iterator end() const { return device_iterator<T>(get_buffer(), _size); } const_iterator cend() const { return end(); } T *real_begin() { return (detail::mem_mgr::instance() .translate_ptr(_storage) .buffer.template get_access<sycl::access_mode::read_write>()) .get_pointer(); } const T *real_begin() const { return const_cast<device_vector *>(this) ->detail::mem_mgr::instance() .translate_ptr(_storage) .buffer.template get_access<sycl::access_mode::read_write>() .get_pointer(); } void swap(device_vector &v) { void *temp = v._storage; v._storage = this->_storage; this->_storage = temp; std::swap(_size, v._size); } reference operator[](size_type n) { return *(begin() + n); } const_reference operator[](size_type n) const { return *(begin() + n); } void reserve(size_type n) { if (n > capacity()) { // create new buffer (allocate for new size) void *a = alloc_store(n * sizeof(T)); // copy content (old buffer to new buffer) if (_storage != nullptr) { auto tmp = detail::mem_mgr::instance() .translate_ptr(a) .buffer.template reinterpret<T, 1>(sycl::range<1>(n)); auto src_buf = get_buffer(); std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(src_buf), oneapi::dpl::end(src_buf), oneapi::dpl::begin(tmp)); // deallocate old memory detail::mem_mgr::instance().mem_free(_storage); } _storage = a; } } void resize(size_type new_size, const T &x = T()) { reserve(new_size); if (_size < new_size) { auto src_buf = get_buffer(); std::fill(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(src_buf) + _size, oneapi::dpl::begin(src_buf) + new_size, x); } _size = new_size; } size_type max_size(void) const { return std::numeric_limits<size_type>::max() / sizeof(T); } size_type capacity() const { return _storage != nullptr ? detail::mem_mgr::instance() .translate_ptr(_storage) .buffer.size() / sizeof(T) : 0; } const_reference front() const { return *begin(); } reference front() { return *begin(); } const_reference back(void) const { return *(end() - 1); } reference back(void) { return *(end() - 1); } pointer data(void) { return reinterpret_cast<pointer>(_storage); } const_pointer data(void) const { return reinterpret_cast<const_pointer>(_storage); } void shrink_to_fit(void) { if (_size != capacity()) { void *a = alloc_store(_size * sizeof(T)); auto tmp = detail::mem_mgr::instance() .translate_ptr(a) .buffer.template reinterpret<T, 1>(sycl::range<1>(_size)); std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(get_buffer()), oneapi::dpl::begin(get_buffer()) + _size, oneapi::dpl::begin(tmp)); detail::mem_mgr::instance().mem_free(_storage); _storage = a; } } void assign(size_type n, const T &x) { resize(n); std::fill(oneapi::dpl::execution::dpcpp_default, begin(), begin() + n, x); } template <typename InputIterator> void assign(InputIterator first, typename std::enable_if<internal::is_iterator<InputIterator>::value, InputIterator>::type last) { auto n = std::distance(first, last); resize(n); if (internal::is_iterator<InputIterator>::value && !std::is_pointer<InputIterator>::value) std::copy(oneapi::dpl::execution::dpcpp_default, first, last, begin()); else { Buffer tmp(first, last); std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp), oneapi::dpl::end(tmp), begin()); } } void clear(void) { _size = 0; detail::mem_mgr::instance().mem_free(_storage); _storage = nullptr; } bool empty(void) const { return (size() == 0); } void push_back(const T &x) { insert(end(), size_type(1), x); } void pop_back(void) { if (_size > 0) --_size; } iterator erase(iterator first, iterator last) { auto n = std::distance(first, last); if (last == end()) { _size = _size - n; return end(); } Buffer tmp{Range(std::distance(last, end()))}; // copy remainder to temporary buffer. std::copy(oneapi::dpl::execution::dpcpp_default, last, end(), oneapi::dpl::begin(tmp)); // override (erase) subsequence in storage. std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp), oneapi::dpl::end(tmp), first); resize(_size - n); return begin() + first.get_idx() + n; } iterator erase(iterator pos) { return erase(pos, pos + 1); } iterator insert(iterator position, const T &x) { auto n = std::distance(begin(), position); insert(position, size_type(1), x); return begin() + n; } void insert(iterator position, size_type n, const T &x) { if (position == end()) { resize(size() + n); std::fill(oneapi::dpl::execution::dpcpp_default, end() - n, end(), x); } else { auto i_n = std::distance(begin(), position); // allocate temporary storage Buffer tmp{Range(std::distance(position, end()))}; // copy remainder std::copy(oneapi::dpl::execution::dpcpp_default, position, end(), oneapi::dpl::begin(tmp)); resize(size() + n); // resizing might invalidate position position = begin() + position.get_idx(); std::fill(oneapi::dpl::execution::dpcpp_default, position, position + n, x); std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp), oneapi::dpl::end(tmp), position + n); } } template <typename InputIterator> void insert(iterator position, InputIterator first, typename std::enable_if<internal::is_iterator<InputIterator>::value, InputIterator>::type last) { auto n = std::distance(first, last); if (position == end()) { resize(size() + n); std::copy(oneapi::dpl::execution::dpcpp_default, first, last, end()); } else { Buffer tmp{Range(std::distance(position, end()))}; std::copy(oneapi::dpl::execution::dpcpp_default, position, end(), oneapi::dpl::begin(tmp)); resize(size() + n); // resizing might invalidate position position = begin() + position.get_idx(); std::copy(oneapi::dpl::execution::dpcpp_default, first, last, position); std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp), oneapi::dpl::end(tmp), position + n); } } }; #endif } // end namespace dpct #endif
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/dpl_extras/dpcpp_extensions.h
//==---- dpcpp_extensions.h ------------------*- C++ -*---------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------===// #ifndef __DPCT_DPCPP_EXTENSIONS_H__ #define __DPCT_DPCPP_EXTENSIONS_H__ #include <sycl/sycl.hpp> #include <stdexcept> #ifdef SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS #include <sycl/ext/oneapi/experimental/user_defined_reductions.hpp> #endif #include "../dpct.hpp" namespace dpct { namespace group { namespace detail { template <typename... _Args> constexpr auto __reduce_over_group(_Args... __args) { return sycl::reduce_over_group(__args...); } template <typename... _Args> constexpr auto __group_broadcast(_Args... __args) { return sycl::group_broadcast(__args...); } template <typename... _Args> constexpr auto __exclusive_scan_over_group(_Args... __args) { return sycl::exclusive_scan_over_group(__args...); } template <typename... _Args> constexpr auto __inclusive_scan_over_group(_Args... __args) { return sycl::inclusive_scan_over_group(__args...); } } // end namespace detail /// Perform an exclusive scan over the values of inputs from all work-items in /// the group using the operator binary_op, which must be one of the SYCL 2020 /// group algorithms library function objects. /// /// \param item A work-item in a group. /// \param inputs Pointer to the input data for the scan operation. /// \param outputs Pointer to the location where scan results will be stored. /// \param init initial value of the scan result. /// \param binary_op functor that implements the binary operation used to /// perform the scan. template <typename Item, typename T, class BinaryOperation, int VALUES_PER_THREAD> __dpct_inline__ void exclusive_scan(const Item &item, T (&inputs)[VALUES_PER_THREAD], T (&outputs)[VALUES_PER_THREAD], T init, BinaryOperation binary_op) { T result = inputs[0]; #pragma unroll for (int i = 1; i < VALUES_PER_THREAD; ++i) { result = binary_op(result, inputs[i]); } T exclusive_result = detail::__exclusive_scan_over_group(item.get_group(), result, binary_op); T input = inputs[0]; if (item.get_local_linear_id() == 0) { outputs[0] = init; } else { outputs[0] = exclusive_result; } #pragma unroll for (int i = 1; i < VALUES_PER_THREAD; ++i) { T output = binary_op(input, outputs[i - 1]); input = inputs[i]; outputs[i] = output; } } /// Perform an exclusive scan over the values of input from all work-items in /// the group using the operator binary_op, which must be one of the SYCL 2020 /// group algorithms library function objects. /// /// \param item A work-item in a group. /// \param input Input data for the scan operation. /// \param init initial value of the scan result. /// \param binary_op functor that implements the binary operation used to /// perform the scan. \param group_aggregate group-wide aggregate of all inputs /// in the work-items of the group. \returns exclusive scan of the first i /// work-items where item is the i-th work item. template <typename Item, typename T, class BinaryOperation> __dpct_inline__ T exclusive_scan(const Item &item, T input, T init, BinaryOperation binary_op, T &group_aggregate) { T output = detail::__exclusive_scan_over_group(item.get_group(), input, init, binary_op); if (item.get_local_linear_id() == item.get_local_range().size() - 1) { group_aggregate = binary_op(output, input); } group_aggregate = detail::__group_broadcast( item.get_group(), group_aggregate, item.get_local_range().size() - 1); return output; } /// Perform an exclusive scan over the values of input from all work-items in /// the group using the operator binary_op, which must be one of the SYCL 2020 /// group algorithms library function objects. /// /// \param item A work-item in a group. /// \param input Input data for the scan operation. /// \param binary_op functor that implements the binary operation used to /// perform the scan. \param prefix_callback_op functor invoked by the first /// work-item in the group that returns the /// initial value in the resulting scan of the work-items in the group. /// \returns exclusive scan of the input elements assigned to work-items in the /// group. template <typename Item, typename T, class BinaryOperation, class GroupPrefixCallbackOperation> __dpct_inline__ T exclusive_scan(const Item &item, T input, BinaryOperation binary_op, GroupPrefixCallbackOperation &prefix_callback_op) { T group_aggregate; T output = detail::__exclusive_scan_over_group(item.get_group(), input, binary_op); if (item.get_local_linear_id() == item.get_local_range().size() - 1) { group_aggregate = binary_op(output, input); } group_aggregate = detail::__group_broadcast( item.get_group(), group_aggregate, item.get_local_range().size() - 1); T group_prefix = prefix_callback_op(group_aggregate); if (item.get_local_linear_id() == 0) { output = group_prefix; } else { output = binary_op(group_prefix, output); } return output; } namespace detail { typedef uint16_t digit_counter_type; typedef uint32_t packed_counter_type; template <int N, int CURRENT_VAL = N, int COUNT = 0> struct log2 { enum { VALUE = log2<N, (CURRENT_VAL >> 1), COUNT + 1>::VALUE }; }; template <int N, int COUNT> struct log2<N, 0, COUNT> { enum { VALUE = (1 << (COUNT - 1) < N) ? COUNT : COUNT - 1 }; }; __dpct_inline__ uint32_t bfe(uint32_t source, uint32_t bit_start, uint32_t num_bits) { const uint32_t MASK = (1 << num_bits) - 1; return (source >> bit_start) & MASK; } template <int RADIX_BITS, bool DESCENDING = false> class radix_rank { public: static size_t get_local_memory_size(size_t group_threads) { return group_threads * PADDED_COUNTER_LANES * sizeof(packed_counter_type); } radix_rank(uint8_t *local_memory) : _local_memory(local_memory) {} template <typename Item, int VALUES_PER_THREAD> __dpct_inline__ void rank_keys(const Item &item, uint32_t (&keys)[VALUES_PER_THREAD], int (&ranks)[VALUES_PER_THREAD], int current_bit, int num_bits) { digit_counter_type thread_prefixes[VALUES_PER_THREAD]; digit_counter_type *digit_counters[VALUES_PER_THREAD]; digit_counter_type *buffer = reinterpret_cast<digit_counter_type *>(_local_memory); reset_local_memory(item); item.barrier(sycl::access::fence_space::local_space); #pragma unroll for (int i = 0; i < VALUES_PER_THREAD; ++i) { uint32_t digit = bfe(keys[i], current_bit, num_bits); uint32_t sub_counter = digit >> LOG_COUNTER_LANES; uint32_t counter_lane = digit & (COUNTER_LANES - 1); if (DESCENDING) { sub_counter = PACKING_RATIO - 1 - sub_counter; counter_lane = COUNTER_LANES - 1 - counter_lane; } digit_counters[i] = &buffer[counter_lane * item.get_local_range().size() * PACKING_RATIO + item.get_local_linear_id() * PACKING_RATIO + sub_counter]; thread_prefixes[i] = *digit_counters[i]; *digit_counters[i] = thread_prefixes[i] + 1; } item.barrier(sycl::access::fence_space::local_space); scan_counters(item); item.barrier(sycl::access::fence_space::local_space); for (int i = 0; i < VALUES_PER_THREAD; ++i) { ranks[i] = thread_prefixes[i] + *digit_counters[i]; } } private: template <typename Item> __dpct_inline__ void reset_local_memory(const Item &item) { packed_counter_type *ptr = reinterpret_cast<packed_counter_type *>(_local_memory); #pragma unroll for (int i = 0; i < PADDED_COUNTER_LANES; ++i) { ptr[i * item.get_local_range().size() + item.get_local_linear_id()] = 0; } } template <typename Item> __dpct_inline__ packed_counter_type upsweep(const Item &item) { packed_counter_type sum = 0; packed_counter_type *ptr = reinterpret_cast<packed_counter_type *>(_local_memory); #pragma unroll for (int i = 0; i < PADDED_COUNTER_LANES; i++) { cached_segment[i] = ptr[item.get_local_linear_id() * PADDED_COUNTER_LANES + i]; } #pragma unroll for (int i = 0; i < PADDED_COUNTER_LANES; ++i) { sum += cached_segment[i]; } return sum; } template <typename Item> __dpct_inline__ void exclusive_downsweep(const Item &item, packed_counter_type raking_partial) { packed_counter_type *ptr = reinterpret_cast<packed_counter_type *>(_local_memory); packed_counter_type sum = raking_partial; #pragma unroll for (int i = 0; i < PADDED_COUNTER_LANES; ++i) { packed_counter_type value = cached_segment[i]; cached_segment[i] = sum; sum += value; } #pragma unroll for (int i = 0; i < PADDED_COUNTER_LANES; ++i) { ptr[item.get_local_linear_id() * PADDED_COUNTER_LANES + i] = cached_segment[i]; } } struct prefix_callback { __dpct_inline__ packed_counter_type operator()(packed_counter_type block_aggregate) { packed_counter_type block_prefix = 0; #pragma unroll for (int packed = 1; packed < PACKING_RATIO; packed++) { block_prefix += block_aggregate << (sizeof(digit_counter_type) * 8 * packed); } return block_prefix; } }; template <typename Item> __dpct_inline__ void scan_counters(const Item &item) { packed_counter_type raking_partial = upsweep(item); prefix_callback callback; packed_counter_type exclusive_partial = exclusive_scan( item, raking_partial, sycl::ext::oneapi::plus<packed_counter_type>(), callback); exclusive_downsweep(item, exclusive_partial); } private: static constexpr int PACKING_RATIO = sizeof(packed_counter_type) / sizeof(digit_counter_type); static constexpr int LOG_PACKING_RATIO = log2<PACKING_RATIO>::VALUE; static constexpr int LOG_COUNTER_LANES = RADIX_BITS - LOG_PACKING_RATIO; static constexpr int COUNTER_LANES = 1 << LOG_COUNTER_LANES; static constexpr int PADDED_COUNTER_LANES = COUNTER_LANES + 1; packed_counter_type cached_segment[PADDED_COUNTER_LANES]; uint8_t *_local_memory; }; template <typename T, typename U> struct base_traits { static __dpct_inline__ U twiddle_in(U key) { throw std::runtime_error("Not implemented"); } static __dpct_inline__ U twiddle_out(U key) { throw std::runtime_error("Not implemented"); } }; template <typename U> struct base_traits<uint32_t, U> { static __dpct_inline__ U twiddle_in(U key) { return key; } static __dpct_inline__ U twiddle_out(U key) { return key; } }; template <typename U> struct base_traits<int, U> { static constexpr U HIGH_BIT = U(1) << ((sizeof(U) * 8) - 1); static __dpct_inline__ U twiddle_in(U key) { return key ^ HIGH_BIT; } static __dpct_inline__ U twiddle_out(U key) { return key ^ HIGH_BIT; } }; template <typename U> struct base_traits<float, U> { static constexpr U HIGH_BIT = U(1) << ((sizeof(U) * 8) - 1); static __dpct_inline__ U twiddle_in(U key) { U mask = (key & HIGH_BIT) ? U(-1) : HIGH_BIT; return key ^ mask; } static __dpct_inline__ U twiddle_out(U key) { U mask = (key & HIGH_BIT) ? HIGH_BIT : U(-1); return key ^ mask; } }; template <typename T> struct traits : base_traits<T, T> {}; template <> struct traits<uint32_t> : base_traits<uint32_t, uint32_t> {}; template <> struct traits<int> : base_traits<int, uint32_t> {}; template <> struct traits<float> : base_traits<float, uint32_t> {}; } // namespace detail namespace detail { template <int N> struct power_of_two { enum { VALUE = ((N & (N - 1)) == 0) }; }; __dpct_inline__ uint32_t shr_add(uint32_t x, uint32_t shift, uint32_t addend) { return (x >> shift) + addend; } } // namespace detail /// Implements scatter to blocked exchange pattern used in radix sort algorithm. /// /// \tparam T type of the data elements exchanges /// \tparam VALUES_PER_THREAD number of data elements assigned to a thread template <typename T, int VALUES_PER_THREAD> class exchange { public: static size_t get_local_memory_size(size_t group_threads) { size_t padding_values = (INSERT_PADDING) ? ((group_threads * VALUES_PER_THREAD) >> LOG_LOCAL_MEMORY_BANKS) : 0; return (group_threads * VALUES_PER_THREAD + padding_values) * sizeof(T); } exchange(uint8_t *local_memory) : _local_memory(local_memory) {} /// Rearrange elements from rank order to blocked order template <typename Item> __dpct_inline__ void scatter_to_blocked(Item item, T (&keys)[VALUES_PER_THREAD], int (&ranks)[VALUES_PER_THREAD]) { T *buffer = reinterpret_cast<T *>(_local_memory); #pragma unroll for (int i = 0; i < VALUES_PER_THREAD; i++) { int offset = ranks[i]; if (INSERT_PADDING) offset = detail::shr_add(offset, LOG_LOCAL_MEMORY_BANKS, offset); buffer[offset] = keys[i]; } item.barrier(sycl::access::fence_space::local_space); #pragma unroll for (int i = 0; i < VALUES_PER_THREAD; i++) { int offset = (item.get_local_id(0) * VALUES_PER_THREAD) + i; if (INSERT_PADDING) offset = detail::shr_add(offset, LOG_LOCAL_MEMORY_BANKS, offset); keys[i] = buffer[offset]; } } private: static constexpr int LOG_LOCAL_MEMORY_BANKS = 5; static constexpr bool INSERT_PADDING = (VALUES_PER_THREAD > 4) && (detail::power_of_two<VALUES_PER_THREAD>::VALUE); uint8_t *_local_memory; }; /// Implements radix sort to sort integer data elements assigned to all threads /// in the group. /// /// \tparam T type of the data elements exchanges /// \tparam VALUES_PER_THREAD number of data elements assigned to a thread /// \tparam DECENDING boolean value indicating if data elements are sorted in /// decending order. template <typename T, int VALUES_PER_THREAD, bool DESCENDING = false> class radix_sort { public: static size_t get_local_memory_size(size_t group_threads) { size_t ranks_size = detail::radix_rank<RADIX_BITS>::get_local_memory_size(group_threads); size_t exchange_size = exchange<T, VALUES_PER_THREAD>::get_local_memory_size(group_threads); return sycl::max(ranks_size, exchange_size); } radix_sort(uint8_t *local_memory) : _local_memory(local_memory) {} template <typename Item> __dpct_inline__ void sort(const Item &item, T (&keys)[VALUES_PER_THREAD], int begin_bit = 0, int end_bit = 8 * sizeof(T)) { uint32_t(&unsigned_keys)[VALUES_PER_THREAD] = reinterpret_cast<uint32_t(&)[VALUES_PER_THREAD]>(keys); #pragma unroll for (int i = 0; i < VALUES_PER_THREAD; ++i) { unsigned_keys[i] = detail::traits<T>::twiddle_in(unsigned_keys[i]); } while (true) { int pass_bits = sycl::min(RADIX_BITS, end_bit - begin_bit); int ranks[VALUES_PER_THREAD]; detail::radix_rank<RADIX_BITS, DESCENDING>(_local_memory) .template rank_keys(item, unsigned_keys, ranks, begin_bit, pass_bits); begin_bit += RADIX_BITS; item.barrier(sycl::access::fence_space::local_space); exchange<T, VALUES_PER_THREAD>(_local_memory) .scatter_to_blocked(item, keys, ranks); item.barrier(sycl::access::fence_space::local_space); if (begin_bit >= end_bit) break; } #pragma unroll for (int i = 0; i < VALUES_PER_THREAD; ++i) { unsigned_keys[i] = detail::traits<T>::twiddle_out(unsigned_keys[i]); } } private: static constexpr int RADIX_BITS = 4; uint8_t *_local_memory; }; /// Perform a reduction of the data elements assigned to all threads in the /// group. /// /// \param item A work-item in a group. /// \param inputs Pointer to the input data for the reduce operation. /// \param binary_op functor that implements the binary operation used to /// perform the scan. \returns value of the reduction using binary_op template <typename Item, typename T, class BinaryOperation, int VALUES_PER_THREAD> __dpct_inline__ T reduce(Item item, T (&inputs)[VALUES_PER_THREAD], BinaryOperation binary_op) { T result = inputs[0]; #pragma unroll for (int i = 1; i < VALUES_PER_THREAD; i++) { result = binary_op(result, inputs[i]); } return detail::__reduce_over_group(item.get_group(), result, binary_op); } /// Perform a reduction on a limited number of the work items in a subgroup /// /// \param item A work-item in a group. /// \param value value per work item which is to be reduced /// \param items_to_reduce num work items at the start of the subgroup to reduce /// \param binary_op functor that implements the binary operation used to /// perform the scan. \returns value of the reduction using binary_op template <typename Item, typename T, class BinaryOperation> __dpct_inline__ typename ::std::enable_if_t<sycl::has_known_identity_v<BinaryOperation, T>, T> reduce_over_partial_group(const Item &item, const T &value, const ::std::uint16_t &items_to_reduce, BinaryOperation binary_op) { T value_temp = (item.get_local_linear_id() < items_to_reduce) ? value : sycl::known_identity_v<BinaryOperation, T>; return detail::__reduce_over_group(item.get_sub_group(), value_temp, binary_op); } /// Perform an inclusive scan over the values of inputs from all work-items in /// the group using the operator binary_op, which must be one of the SYCL 2020 /// group algorithms library function objects. /// /// \param item A work-item in a group. /// \param inputs Pointer to the input data for the scan operation. /// \param outputs Pointer to the location where scan results will be stored. /// \param binary_op functor that implements the binary operation used to /// perform the scan. \returns inclusive scan of the input elements assigned to /// work-items in the group. template <typename Item, typename T, class BinaryOperation, int VALUES_PER_THREAD> __dpct_inline__ void inclusive_scan(const Item &item, T (&inputs)[VALUES_PER_THREAD], T (&outputs)[VALUES_PER_THREAD], BinaryOperation binary_op) { T result = inputs[0]; #pragma unroll for (int i = 1; i < VALUES_PER_THREAD; ++i) { result = binary_op(result, inputs[i]); } T exclusive_result = detail::__exclusive_scan_over_group(item.get_group(), result, binary_op); if (item.get_local_linear_id() == 0) { outputs[0] = inputs[0]; } else { outputs[0] = binary_op(inputs[0], exclusive_result); } #pragma unroll for (int i = 1; i < VALUES_PER_THREAD; ++i) { outputs[i] = binary_op(inputs[i], outputs[i - 1]); } } /// Perform an inclusive scan over the values of inputs from all work-items in /// the group using the operator binary_op, which must be one of the SYCL 2020 /// group algorithms library function objects. /// /// \param item A work-item in a group. /// \param input Pointer to the input data for the scan operation. /// \param binary_op functor that implements the binary operation used to /// perform the scan. \param group_aggregate group-wide aggregate of all inputs /// in the work-items of the group. \returns inclusive scan of the input /// elements assigned to work-items in the group. template <typename Item, typename T, class BinaryOperation> __dpct_inline__ T inclusive_scan(const Item &item, T input, BinaryOperation binary_op, T &group_aggregate) { T output = detail::__inclusive_scan_over_group(item.get_group(), input, binary_op); if (item.get_local_linear_id() == item.get_local_range().size() - 1) { group_aggregate = output; } group_aggregate = detail::__group_broadcast( item.get_group(), group_aggregate, item.get_local_range().size() - 1); return output; } /// Perform an inclusive scan over the values of input from all work-items in /// the group using the operator binary_op, which must be one of the SYCL 2020 /// group algorithms library function objects. /// /// \param item A work-item in a group. /// \param input Input data for the scan operation. /// \param binary_op functor that implements the binary operation used to /// perform the scan. \param prefix_callback_op functor invoked by the first /// work-item in the group that returns the /// initial value in the resulting scan of the work-items in the group. /// \returns inclusive scan of the input elements assigned to work-items in the /// group. template <typename Item, typename T, class BinaryOperation, class GroupPrefixCallbackOperation> __dpct_inline__ T inclusive_scan(const Item &item, T input, BinaryOperation binary_op, GroupPrefixCallbackOperation &prefix_callback_op) { T group_aggregate; T output = inclusive_scan(item, input, binary_op, group_aggregate); T group_prefix = prefix_callback_op(group_aggregate); return binary_op(group_prefix, output); } } // namespace group namespace device { namespace detail { template <typename... _Args> constexpr auto __joint_reduce(_Args... __args) { return sycl::joint_reduce(__args...); } } // namespace detail /// Perform a reduce on each of the segments specified within data stored on /// the device. /// /// \param queue Command queue used to access device used for reduction /// \param inputs Pointer to the data elements on the device to be reduced /// \param outputs Pointer to the storage where the reduced value for each /// segment will be stored \param segment_count number of segments to be reduced /// \param begin_offsets Pointer to the set of indices that are the first /// element in each segment \param end_offsets Pointer to the set of indices /// that are one past the last element in each segment \param binary_op functor /// that implements the binary operation used to perform the scan. \param init /// initial value of the reduction for each segment. template <int GROUP_SIZE, typename T, typename OffsetT, class BinaryOperation> void segmented_reduce(sycl::queue queue, T *inputs, T *outputs, size_t segment_count, OffsetT *begin_offsets, OffsetT *end_offsets, BinaryOperation binary_op, T init) { sycl::range<1> global_size(segment_count * GROUP_SIZE); sycl::range<1> local_size(GROUP_SIZE); queue.submit([&](sycl::handler &cgh) { cgh.parallel_for( sycl::nd_range<1>(global_size, local_size), [=](sycl::nd_item<1> item) { OffsetT segment_begin = begin_offsets[item.get_group_linear_id()]; OffsetT segment_end = end_offsets[item.get_group_linear_id()]; if (segment_begin == segment_end) { if (item.get_local_linear_id() == 0) { outputs[item.get_group_linear_id()] = init; } return; } sycl::multi_ptr<T, sycl::access::address_space::global_space> input_ptr = inputs; T group_aggregate = detail::__joint_reduce( item.get_group(), input_ptr + segment_begin, input_ptr + segment_end, init, binary_op); if (item.get_local_linear_id() == 0) { outputs[item.get_group_linear_id()] = group_aggregate; } }); }); } #ifdef SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS namespace experimental { namespace detail { template <typename _Tp, typename... _Ts> struct __is_any { constexpr static bool value = std::disjunction_v< std::is_same<std::remove_cv_t<_Tp>, std::remove_cv_t<_Ts>>...>; }; template <typename _Tp, typename _Bp> struct __in_native_op_list { constexpr static bool value = __is_any<_Bp, sycl::plus<_Tp>, sycl::bit_or<_Tp>, sycl::bit_xor<_Tp>, sycl::bit_and<_Tp>, sycl::maximum<_Tp>, sycl::minimum<_Tp>, sycl::multiplies<_Tp>>::value; }; template <typename _Tp, typename _Bp> struct __is_native_op { constexpr static bool value = __in_native_op_list<_Tp, _Bp>::value || __in_native_op_list<void, _Bp>::value; }; } // namespace detail /// Perform a reduce on each of the segments specified within data stored on /// the device. Compared with dpct::device::segmented_reduce, this experimental /// feature support user define reductions. /// /// \param queue Command queue used to access device used for reduction /// \param inputs Pointer to the data elements on the device to be reduced /// \param outputs Pointer to the storage where the reduced value for each /// segment will be stored \param segment_count number of segments to be reduced /// \param begin_offsets Pointer to the set of indices that are the first /// element in each segment \param end_offsets Pointer to the set of indices /// that are one past the last element in each segment \param binary_op functor /// that implements the binary operation used to perform the scan. \param init /// initial value of the reduction for each segment. template <int GROUP_SIZE, typename T, typename OffsetT, class BinaryOperation> void segmented_reduce(sycl::queue queue, T *inputs, T *outputs, size_t segment_count, OffsetT *begin_offsets, OffsetT *end_offsets, BinaryOperation binary_op, T init) { sycl::range<1> global_size(segment_count * GROUP_SIZE); sycl::range<1> local_size(GROUP_SIZE); if constexpr (!detail::__is_native_op<T, BinaryOperation>::value) { queue.submit([&](sycl::handler &cgh) { size_t temp_memory_size = GROUP_SIZE * sizeof(T); auto scratch = sycl::local_accessor<std::byte, 1>(temp_memory_size, cgh); cgh.parallel_for( sycl::nd_range<1>(global_size, local_size), [=](sycl::nd_item<1> item) { OffsetT segment_begin = begin_offsets[item.get_group_linear_id()]; OffsetT segment_end = end_offsets[item.get_group_linear_id()]; if (segment_begin == segment_end) { if (item.get_local_linear_id() == 0) { outputs[item.get_group_linear_id()] = init; } return; } // Create a handle that associates the group with an allocation it // can use auto handle = sycl::ext::oneapi::experimental::group_with_scratchpad( item.get_group(), sycl::span(&scratch[0], temp_memory_size)); T group_aggregate = sycl::ext::oneapi::experimental::joint_reduce( handle, inputs + segment_begin, inputs + segment_end, init, binary_op); if (item.get_local_linear_id() == 0) { outputs[item.get_group_linear_id()] = group_aggregate; } }); }); } else { dpct::device::segmented_reduce<GROUP_SIZE>(queue, inputs, outputs, segment_count, begin_offsets, end_offsets, binary_op, init); } } } // namespace experimental #endif // SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS } // namespace device } // namespace dpct #endif
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_odd_even_merge_sort_SYCLMigration/01_dpct_output/include/dpct/dpl_extras/functional.h
//==---- functional.h -----------------------------*- C++ -*----------------==// // // Copyright (C) Intel Corporation // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // See https://llvm.org/LICENSE.txt for license information. // //===----------------------------------------------------------------------===// #ifndef __DPCT_FUNCTIONAL_H__ #define __DPCT_FUNCTIONAL_H__ #include <functional> #include <oneapi/dpl/functional> #include <oneapi/dpl/iterator> #if ONEDPL_USE_DPCPP_BACKEND #include <oneapi/dpl/pstl/hetero/dpcpp/parallel_backend_sycl_utils.h> #endif #include <tuple> #include <utility> namespace dpct { struct null_type {}; namespace internal { template <class _ExecPolicy, class _T> using enable_if_execution_policy = typename std::enable_if<oneapi::dpl::execution::is_execution_policy< typename std::decay<_ExecPolicy>::type>::value, _T>::type; template <typename _T> struct is_hetero_execution_policy : ::std::false_type {}; template <typename... PolicyParams> struct is_hetero_execution_policy< oneapi::dpl::execution::device_policy<PolicyParams...>> : ::std::true_type { }; template <typename _T> struct is_fpga_execution_policy : ::std::false_type {}; #if _ONEDPL_FPGA_DEVICE template <unsigned int unroll_factor, typename... PolicyParams> struct is_hetero_execution_policy< execution::fpga_policy<unroll_factor, PolicyParams...>> : ::std::true_type { }; #endif template <class _ExecPolicy, class _T> using enable_if_hetero_execution_policy = typename std::enable_if< is_hetero_execution_policy<typename std::decay<_ExecPolicy>::type>::value, _T>::type; #if _ONEDPL_CPP14_INTEGER_SEQUENCE_PRESENT template <std::size_t... _Sp> using index_sequence = std::index_sequence<_Sp...>; template <std::size_t _Np> using make_index_sequence = std::make_index_sequence<_Np>; #else template <std::size_t... _Sp> class index_sequence {}; template <std::size_t _Np, std::size_t... _Sp> struct make_index_sequence_impl : make_index_sequence_impl<_Np - 1, _Np - 1, _Sp...> {}; template <std::size_t... _Sp> struct make_index_sequence_impl<0, _Sp...> { using type = index_sequence<_Sp...>; }; template <std::size_t _Np> using make_index_sequence = typename make_index_sequence_impl<_Np>::type; #endif // Minimal buffer implementations for temporary storage in mapping rules // Some of our algorithms need to start with raw memory buffer, // not an initialized array, because initialization/destruction // would make the span be at least O(N). #if ONEDPL_USE_DPCPP_BACKEND template <typename _Tp> class __buffer { sycl::buffer<_Tp, 1> __buf; __buffer(const __buffer &) = delete; void operator=(const __buffer &) = delete; public: // Try to obtain buffer of given size to store objects of _Tp type __buffer(std::size_t __n) : __buf(sycl::range<1>(__n)) {} // Return pointer to buffer, or NULL if buffer could not be obtained. auto get() -> decltype(oneapi::dpl::begin(__buf)) const { return oneapi::dpl::begin(__buf); } }; #else template <typename _Tp> class __buffer { std::unique_ptr<_Tp> _M_ptr; __buffer(const __buffer &) = delete; void operator=(const __buffer &) = delete; public: // Try to obtain buffer of given size to store objects of _Tp type __buffer(const std::size_t __n) : _M_ptr(new _Tp[__n]) {} // Return pointer to buffer, or NULL if buffer could not be obtained. _Tp *get() const { return _M_ptr.get(); } }; #endif // Implements C++14 std::less<void> specialization to allow parameter type // deduction. class __less { public: template <typename _Xp, typename _Yp> bool operator()(_Xp &&__x, _Yp &&__y) const { return std::forward<_Xp>(__x) < std::forward<_Yp>(__y); } }; template <typename Policy, typename NewName> struct rebind_policy { using type = Policy; }; template <typename KernelName, typename NewName> struct rebind_policy<oneapi::dpl::execution::device_policy<KernelName>, NewName> { using type = oneapi::dpl::execution::device_policy<NewName>; }; #if _ONEDPL_FPGA_DEVICE template <unsigned int factor, typename KernelName, typename NewName> struct rebind_policy<oneapi::dpl::execution::fpga_policy<factor, KernelName>, NewName> { using type = oneapi::dpl::execution::fpga_policy<factor, NewName>; }; #endif template <typename T1, typename T2, typename R1 = typename std::iterator_traits<T1>::reference, typename R2 = typename std::iterator_traits<T2>::reference> struct perm_fun { typedef R2 result_of; perm_fun(T1 input) : source(input) {} R2 operator()(R1 x) const { return *(source + x); } private: T1 source; }; // Functor compares first element (key) from tied sequence. template <typename Compare = class internal::__less> struct compare_key_fun { typedef bool result_of; compare_key_fun(Compare _comp = internal::__less()) : comp(_comp) {} template <typename _T1, typename _T2> result_of operator()(_T1 &&a, _T2 &&b) const { using std::get; return comp(get<0>(a), get<0>(b)); } private: mutable Compare comp; }; // Functor evaluates second element of tied sequence with predicate. // Used by: copy_if, remove_copy_if, stable_partition_copy // Lambda: template <typename Predicate> struct predicate_key_fun { typedef bool result_of; predicate_key_fun(Predicate _pred) : pred(_pred) {} template <typename _T1> result_of operator()(_T1 &&a) const { using std::get; return pred(get<1>(a)); } private: mutable Predicate pred; }; // Used by: remove_if template <typename Predicate> struct negate_predicate_key_fun { typedef bool result_of; negate_predicate_key_fun(Predicate _pred) : pred(_pred) {} template <typename _T1> result_of operator()(_T1 &&a) const { using std::get; return !pred(get<1>(a)); } private: mutable Predicate pred; }; template <typename T> struct sequence_fun { using result_type = T; sequence_fun(T _init, T _step) : init(_init), step(_step) {} template <typename _T> result_type operator()(_T &&i) const { return static_cast<T>(init + step * i); } private: const T init; const T step; }; //[binary_pred](Ref a, Ref b){ return(binary_pred(get<0>(a),get<0>(b))); template <typename Predicate> struct unique_fun { typedef bool result_of; unique_fun(Predicate _pred) : pred(_pred) {} template <typename _T> result_of operator()(_T &&a, _T &&b) const { using std::get; return pred(get<0>(a), get<0>(b)); } private: mutable Predicate pred; }; // Lambda: [pred, &new_value](Ref1 a, Ref2 s) {return pred(s) ? new_value : a; // }); template <typename T, typename Predicate> struct replace_if_fun { public: typedef T result_of; replace_if_fun(Predicate _pred, T _new_value) : pred(_pred), new_value(_new_value) {} template <typename _T1, typename _T2> T operator()(_T1 &&a, _T2 &&s) const { return pred(s) ? new_value : a; } private: mutable Predicate pred; const T new_value; }; //[pred,op](Ref a){return pred(a) ? op(a) : a; } template <typename T, typename Predicate, typename Operator> struct transform_if_fun { transform_if_fun(Predicate _pred, Operator _op) : pred(_pred), op(_op) {} template <typename _T> void operator()(_T&& t) const { using std::get; if (pred(get<0>(t))) get<1>(t) = op(get<0>(t)); } private: mutable Predicate pred; mutable Operator op; }; //[pred, op](Ref1 a, Ref2 s) { return pred(s) ? op(a) : a; }); template <typename T, typename Predicate, typename Operator> struct transform_if_unary_zip_mask_fun { transform_if_unary_zip_mask_fun(Predicate _pred, Operator _op) : pred(_pred), op(_op) {} template <typename _T> void operator()(_T&& t) const { using std::get; if (pred(get<1>(t))) get<2>(t) = op(get<0>(t)); } private: mutable Predicate pred; mutable Operator op; }; template <typename T, typename Predicate, typename BinaryOperation> class transform_if_zip_mask_fun { public: transform_if_zip_mask_fun(Predicate _pred = oneapi::dpl::identity(), BinaryOperation _op = oneapi::dpl::identity()) : pred(_pred), op(_op) {} template <typename _T> void operator()(_T &&t) const { using std::get; if (pred(get<2>(t))) get<3>(t) = op(get<0>(t), get<1>(t)); } private: mutable Predicate pred; mutable BinaryOperation op; }; // This following code is similar to a section of code in // oneDPL/include/oneapi/dpl/pstl/hetero/dpcpp/parallel_backend_sycl_radix_sort.h // It has a similar approach, and could be consolidated. // Outside of some differences in approach, there are two significant // differences in function. // // 1) This code allows the output type of the bit range translation to be fit // into to the minimal type required to provide that many bits. The code in // oneDPL to calculate the bucket for the radix is similar but its output is // always std::uint32_t. The assumption that the bit range desired will fit in // 32 bits is not true for this code. // // 2) This code ensures that for floating point type, -0.0f and 0.0f map to the // same value. This allows the output of this translation to be used to provide // a sort which ensures the stability of these values for floating point types. template <int N> struct uint_byte_map {}; template <> struct uint_byte_map<1> { using type = uint8_t; }; template <> struct uint_byte_map<2> { using type = uint16_t; }; template <> struct uint_byte_map<4> { using type = uint32_t; }; template <> struct uint_byte_map<8> { using type = uint64_t; }; template <typename T> struct uint_map { using type = typename uint_byte_map<sizeof(T)>::type; }; template <typename T, typename OutKeyT> class translate_key { using uint_type_t = typename uint_map<T>::type; public: translate_key(int begin_bit, int end_bit) { shift = begin_bit; mask = ~OutKeyT(0); // all ones mask = mask >> (sizeof(OutKeyT) * 8 - (end_bit - begin_bit)); // setup appropriate mask flip_sign = uint_type_t(1) << (sizeof(uint_type_t) * 8 - 1); // sign bit flip_key = ~uint_type_t(0); // 0xF...F } inline OutKeyT operator()(const T &key) const { uint_type_t intermediate; if constexpr (std::is_floating_point<T>::value) { // normal case (both -0.0f and 0.0f equal -0.0f) if (key != T(-0.0f)) { uint_type_t is_negative = reinterpret_cast<const uint_type_t &>(key) >> (sizeof(uint_type_t) * 8 - 1); intermediate = reinterpret_cast<const uint_type_t &>(key) ^ ((is_negative * flip_key) | flip_sign); } else // special case for -0.0f to keep stability with 0.0f { T negzero = T(-0.0f); intermediate = reinterpret_cast<const uint_type_t &>(negzero); } } else if constexpr (std::is_signed<T>::value) { intermediate = reinterpret_cast<const uint_type_t &>(key) ^ flip_sign; } else { intermediate = key; } return static_cast<OutKeyT>(intermediate >> shift) & mask; // shift, cast, and mask } private: uint8_t shift; OutKeyT mask; uint_type_t flip_sign; uint_type_t flip_key; }; } // end namespace internal } // end namespace dpct #endif
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/simple-add/src/simple-add-usm.cpp
//============================================================== // Iota is the equivalent of a Hello, World! sample for data parallel programs. // Building and running the sample verifies that your development environment // is setup correctly and demonstrates the use of the core features of SYCL. // This sample runs on both CPU and GPU (or FPGA). When run, it computes on both // the CPU and offload device, then compares results. If the code executes on // both CPU and the offload device, the name of the offload device and a success // message are displayed. And, your development environment is setup correctly! // // For comprehensive instructions regarding SYCL Programming, go to // https://software.intel.com/en-us/oneapi-programming-guide and search based on // relevant terms noted in the comments. // // SYCL material used in the code sample: // • A one dimensional array of data. // • A device queue and kernel. //============================================================== // Copyright © 2020 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= #include <sycl/sycl.hpp> #include <array> #include <iostream> #if FPGA_HARDWARE || FPGA_EMULATOR || FPGA_SIMULATOR #include <sycl/ext/intel/fpga_extensions.hpp> #endif using namespace sycl; using namespace std; // Create an exception handler for asynchronous SYCL exceptions static auto exception_handler = [](sycl::exception_list e_list) { for (std::exception_ptr const &e : e_list) { try { std::rethrow_exception(e); } catch (std::exception const &e) { #if _DEBUG std::cout << "Failure" << std::endl; #endif std::terminate(); } } }; // Array size for this example. constexpr size_t array_size = 10000; //************************************ // Iota in SYCL on device. //************************************ void IotaParallel(queue &q, int *a, size_t size, int value) { // Create the range object for the array. range num_items{size}; // Use parallel_for to populate consecutive numbers starting with a specified // value in parallel on device. This executes the kernel. // 1st parameter is the number of work items to use. // 2nd parameter is the kernel, a lambda that specifies what to do per // work item. The parameter of the lambda is the work item id. // SYCL supports unnamed lambda kernel by default. auto e = q.parallel_for(num_items, [=](auto i) { a[i] = value + i; }); // q.parallel_for() is an asynchronous call. SYCL runtime enqueues and runs // the kernel asynchronously. Wait for the asynchronous call to complete. e.wait(); } //************************************ // Demonstrate iota both sequential on CPU and parallel on device. //************************************ int main() { // Create device selector for the device of your interest. #if FPGA_EMULATOR // Intel extension: FPGA emulator selector on systems without FPGA card. auto selector = sycl::ext::intel::fpga_emulator_selector_v; #elif FPGA_SIMULATOR // Intel extension: FPGA simulator selector on systems without FPGA card. auto selector = sycl::ext::intel::fpga_simulator_selector_v; #elif FPGA_HARDWARE // Intel extension: FPGA selector on systems with FPGA card. auto selector = sycl::ext::intel::fpga_selector_v; #else // The default device selector will select the most performant device. auto selector = default_selector_v; #endif constexpr int value = 100000; try { queue q(selector, exception_handler); // Print out the device information used for the kernel code. cout << "Running on device: " << q.get_device().get_info<info::device::name>() << "\n"; cout << "Array size: " << array_size << "\n"; int *sequential = malloc_shared<int>(array_size, q); int *parallel = malloc_shared<int>(array_size, q); if ((sequential == nullptr) || (parallel == nullptr)) { if (sequential != nullptr) free(sequential, q); if (parallel != nullptr) free(parallel, q); cout << "Shared memory allocation failure.\n"; return -1; } // Sequential iota. for (size_t i = 0; i < array_size; i++) sequential[i] = value + i; // Parallel iota in SYCL. IotaParallel(q, parallel, array_size, value); // Verify two results are equal. for (size_t i = 0; i < array_size; i++) { if (parallel[i] != sequential[i]) { cout << "Failed on device.\n"; return -1; } } int indices[]{0, 1, 2, (array_size - 1)}; constexpr size_t indices_size = sizeof(indices) / sizeof(int); // Print out iota result. for (int i = 0; i < indices_size; i++) { int j = indices[i]; if (i == indices_size - 1) cout << "...\n"; cout << "[" << j << "]: " << j << " + " << value << " = " << sequential[j] << "\n"; } free(sequential, q); free(parallel, q); } catch (std::exception const &e) { cout << "An exception is caught while computing on device.\n"; terminate(); } cout << "Successfully completed on device.\n"; return 0; }
cpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/simple-add/src/simple-add-buffers.cpp
//============================================================== // Iota is the equivalent of a Hello, World! sample for data parallel programs. // Building and running the sample verifies that your development environment // is setup correctly and demonstrates the use of the core features of SYCL. // This sample runs on both CPU and GPU (or FPGA). When run, it computes on both // the CPU and offload device, then compares results. If the code executes on // both CPU and the offload device, the name of the offload device and a success // message are displayed. And, your development environment is setup correctly! // // For comprehensive instructions regarding SYCL Programming, go to // https://software.intel.com/en-us/oneapi-programming-guide and search based on // relevant terms noted in the comments. // // SYCL material used in the code sample: // • A one dimensional array of data. // • A device queue, buffer, accessor, and kernel. //============================================================== // Copyright © 2020 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= #include <sycl/sycl.hpp> #include <array> #include <iostream> #if FPGA_HARDWARE || FPGA_EMULATOR || FPGA_SIMULATOR #include <sycl/ext/intel/fpga_extensions.hpp> #endif using namespace sycl; using namespace std; // Create an exception handler for asynchronous SYCL exceptions static auto exception_handler = [](sycl::exception_list e_list) { for (std::exception_ptr const &e : e_list) { try { std::rethrow_exception(e); } catch (std::exception const &e) { #if _DEBUG std::cout << "Failure" << std::endl; #endif std::terminate(); } } }; // Array type and data size for this example. constexpr size_t array_size = 10000; typedef array<int, array_size> IntArray; //************************************ // Iota in SYCL on device. //************************************ void IotaParallel(queue &q, IntArray &a_array, int value) { // Create the range object for the array managed by the buffer. range num_items{a_array.size()}; // Create buffer that hold the data shared between the host and the devices. // The buffer destructor is responsible to copy the data back to host when it // goes out of scope. buffer a_buf(a_array); // Submit a command group to the queue by a lambda function that contains the // data access permission and device computation (kernel). q.submit([&](auto &h) { // Create an accessor with write permission. accessor a(a_buf, h, write_only, no_init); // Use parallel_for to populate consecutive numbers starting with a // specified value in parallel on device. This executes the kernel. // 1st parameter is the number of work items to use. // 2nd parameter is the kernel, a lambda that specifies what to do per // work item. The parameter of the lambda is the work item id. // SYCL supports unnamed lambda kernel by default. h.parallel_for(num_items, [=](auto i) { a[i] = value + i; }); }); } //************************************ // Demonstrate iota both sequential on CPU and parallel on device. //************************************ int main() { // Create device selector for the device of your interest. #if FPGA_EMULATOR // Intel extension: FPGA emulator selector on systems without FPGA card. auto selector = sycl::ext::intel::fpga_emulator_selector_v; #elif FPGA_SIMULATOR // Intel extension: FPGA simulator selector on systems without FPGA card. auto selector = sycl::ext::intel::fpga_simulator_selector_v; #elif FPGA_HARDWARE // Intel extension: FPGA selector on systems with FPGA card. auto selector = sycl::ext::intel::fpga_selector_v; #else // The default device selector will select the most performant device. auto selector = default_selector_v; #endif // Create array objects with "array_size" to store data. IntArray sequential, parallel; constexpr int value = 100000; // Sequential iota. for (size_t i = 0; i < sequential.size(); i++) sequential[i] = value + i; try { queue q(selector, exception_handler); // Print out the device information used for the kernel code. cout << "Running on device: " << q.get_device().get_info<info::device::name>() << "\n"; cout << "Array size: " << parallel.size() << "\n"; // Parallel iota in SYCL. IotaParallel(q, parallel, value); } catch (std::exception const &e) { cout << "An exception is caught while computing on device.\n"; terminate(); } // Verify two results are equal. for (size_t i = 0; i < sequential.size(); i++) { if (parallel[i] != sequential[i]) { cout << "Failed on device.\n"; return -1; } } int indices[]{0, 1, 2, (sequential.size() - 1)}; constexpr size_t indices_size = sizeof(indices) / sizeof(int); // Print out iota result. for (int i = 0; i < indices_size; i++) { int j = indices[i]; if (i == indices_size - 1) cout << "...\n"; cout << "[" << j << "]: " << j << " + " << value << " = " << parallel[j] << "\n"; } cout << "Successfully completed on device.\n"; return 0; }
cpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/complex_mult/src/complex_mult.cpp
//============================================================== // Copyright © 2020 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= #include <sycl/sycl.hpp> #include <iomanip> #include <vector> // dpc_common.hpp can be found in the dev-utilities include folder. // e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/dpc_common.hpp #include "dpc_common.hpp" #include "Complex.hpp" using namespace sycl; using namespace std; // Number of complex numbers passing to the SYCL code static const int num_elements = 10000; class CustomDeviceSelector { public: CustomDeviceSelector(std::string vendorName) : vendorName_(vendorName){}; int operator()(const device &dev) const { int device_rating = 0; // In the below code we are querying for the custom device specific to a // Vendor and if it is a GPU device we are giving the highest rating. The // second preference is given to any GPU device and the third preference is // given to CPU device. if (dev.is_gpu() & (dev.get_info<info::device::name>().find(vendorName_) != std::string::npos)) device_rating = 3; else if (dev.is_gpu()) device_rating = 2; else if (dev.is_cpu()) device_rating = 1; return device_rating; }; private: std::string vendorName_; }; // in_vect1 and in_vect2 are the vectors with num_elements complex nubers and // are inputs to the parallel function void SYCLParallel(queue &q, std::vector<Complex2> &in_vect1, std::vector<Complex2> &in_vect2, std::vector<Complex2> &out_vect) { auto R = range(in_vect1.size()); if (in_vect2.size() != in_vect1.size() || out_vect.size() != in_vect1.size()){ std::cout << "ERROR: Vector sizes do not match"<< "\n"; return; } // Setup input buffers buffer bufin_vect1(in_vect1); buffer bufin_vect2(in_vect2); // Setup Output buffers buffer bufout_vect(out_vect); std::cout << "Target Device: " << q.get_device().get_info<info::device::name>() << "\n"; // Submit Command group function object to the queue q.submit([&](auto &h) { // Accessors set as read mode accessor V1(bufin_vect1,h,read_only); accessor V2(bufin_vect2,h,read_only); // Accessor set to Write mode accessor V3 (bufout_vect,h,write_only); h.parallel_for(R, [=](auto i) { // call the complex_mul function that computes the multiplication of the // complex number V3[i] = V1[i].complex_mul(V2[i]); }); }); q.wait_and_throw(); } void Scalar(std::vector<Complex2> &in_vect1, std::vector<Complex2> &in_vect2, std::vector<Complex2> &out_vect) { if ((in_vect2.size() != in_vect1.size()) || (out_vect.size() != in_vect1.size())){ std::cout<<"ERROR: Vector sizes do not match"<<"\n"; return; } for (int i = 0; i < in_vect1.size(); i++) { out_vect[i] = in_vect1[i].complex_mul(in_vect2[i]); } } // Compare the results of the two output vectors from parallel and scalar. They // should be equal int Compare(std::vector<Complex2> &v1, std::vector<Complex2> &v2) { int ret_code = 1; if(v1.size() != v2.size()){ ret_code = -1; } for (int i = 0; i < v1.size(); i++) { if (v1[i] != v2[i]) { ret_code = -1; break; } } return ret_code; } int main() { // Declare your Input and Output vectors of the Complex2 class vector<Complex2> input_vect1; vector<Complex2> input_vect2; vector<Complex2> out_vect_parallel; vector<Complex2> out_vect_scalar; for (int i = 0; i < num_elements; i++) { input_vect1.push_back(Complex2(i + 2, i + 4)); input_vect2.push_back(Complex2(i + 4, i + 6)); out_vect_parallel.push_back(Complex2(0, 0)); out_vect_scalar.push_back(Complex2(0, 0)); } // Initialize your Input and Output Vectors. Inputs are initialized as below. // Outputs are initialized with 0 try { // Pass in the name of the vendor for which the device you want to query std::string vendor_name = "Intel"; // std::string vendor_name = "AMD"; // std::string vendor_name = "Nvidia"; // queue constructor passed exception handler CustomDeviceSelector selector(vendor_name); queue q(selector); // Call the SYCLParallel with the required inputs and outputs SYCLParallel(q, input_vect1, input_vect2, out_vect_parallel); } catch (...) { // some other exception detected std::cout << "Failure" << std::endl; std::terminate(); } std::cout << "****************************************Multiplying Complex numbers " "in Parallel********************************************************" << std::endl; // Print the outputs of the Parallel function int indices[]{0, 1, 2, 3, 4, (num_elements - 1)}; constexpr size_t indices_size = sizeof(indices) / sizeof(int); for (int i = 0; i < indices_size; i++) { int j = indices[i]; if (i == indices_size - 1) std::cout << "...\n"; std::cout << "[" << j << "] " << input_vect1[j] << " * " << input_vect2[j] << " = " << out_vect_parallel[j] << "\n"; } // Call the Scalar function with the required input and outputs Scalar(input_vect1, input_vect2, out_vect_scalar); // Compare the outputs from the parallel and the scalar functions. They should // be equal int ret_code = Compare(out_vect_parallel, out_vect_scalar); if (ret_code == 1) { std::cout << "Complex multiplication successfully run on the device" << "\n"; } else std::cout << "*********************************************Verification Failed. Results are " "not matched**************************" << "\n"; return 0; }
cpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/matrix_mul/src/matrix_mul_sycl.cpp
//============================================================== // Copyright © 2020 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= /** * Matrix_mul multiplies two large matrices both the CPU and the offload device, * then compares results. If the code executes on both CPU and the offload * device, the name of the offload device and a success message are displayed. * * For comprehensive instructions regarding SYCL Programming, go to * https://software.intel.com/en-us/oneapi-programming-guide and search based on * relevant terms noted in the comments. */ #include <sycl/sycl.hpp> #include <iostream> #include <limits> // dpc_common.hpp can be found in the dev-utilities include folder. // e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/dpc_common.hpp #include "dpc_common.hpp" using namespace std; using namespace sycl; /** * Each element of the product matrix c[i][j] is computed from a unique row and * column of the factor matrices, a[i][k] and b[k][j] */ // Matrix size constants. constexpr int m_size = 150 * 8; // Must be a multiple of 8. constexpr int M = m_size / 8; constexpr int N = m_size / 4; constexpr int P = m_size / 2; /** * Perform matrix multiplication on host to verify results from device. */ int VerifyResult(float (*c_back)[P]); int main() { // Host memory buffer that device will write data back before destruction. float(*c_back)[P] = new float[M][P]; // Intialize c_back for (int i = 0; i < M; i++) for (int j = 0; j < P; j++) c_back[i][j] = 0.0f; // Initialize the device queue with the default selector. The device queue is // used to enqueue kernels. It encapsulates all states needed for execution. try { queue q(default_selector_v); cout << "Device: " << q.get_device().get_info<info::device::name>() << "\n"; // Create 2D buffers for matrices, buffer c is bound with host memory c_back buffer<float, 2> a_buf(range(M, N)); buffer<float, 2> b_buf(range(N, P)); buffer c_buf(reinterpret_cast<float *>(c_back), range(M, P)); cout << "Problem size: c(" << M << "," << P << ") = a(" << M << "," << N << ") * b(" << N << "," << P << ")\n"; // Using three command groups to illustrate execution order. The use of // first two command groups for initializing matrices is not the most // efficient way. It just demonstrates the implicit multiple command group // execution ordering. // Submit command group to queue to initialize matrix a q.submit([&](auto &h) { // Get write only access to the buffer on a device. accessor a(a_buf, h, write_only); // Execute kernel. h.parallel_for(range(M, N), [=](auto index) { // Each element of matrix a is 1. a[index] = 1.0f; }); }); // Submit command group to queue to initialize matrix b q.submit([&](auto &h) { // Get write only access to the buffer on a device accessor b(b_buf, h, write_only); // Execute kernel. h.parallel_for(range(N, P), [=](auto index) { // Each column of b is the sequence 1,2,...,N b[index] = index[0] + 1.0f; }); }); // Submit command group to queue to multiply matrices: c = a * b q.submit([&](auto &h) { // Read from a and b, write to c accessor a(a_buf, h, read_only); accessor b(b_buf, h, read_only); accessor c(c_buf, h, write_only); int width_a = a_buf.get_range()[1]; // Execute kernel. h.parallel_for(range(M, P), [=](auto index) { // Get global position in Y direction. int row = index[0]; // Get global position in X direction. int col = index[1]; float sum = 0.0f; // Compute the result of one element of c for (int i = 0; i < width_a; i++) { sum += a[row][i] * b[i][col]; } c[index] = sum; }); }); } catch (sycl::exception const &e) { cout << "An exception is caught while multiplying matrices.\n"; terminate(); } int result; cout << "Result of matrix multiplication using SYCL: "; result = VerifyResult(c_back); delete[] c_back; return result; } bool ValueSame(float a, float b) { return fabs(a - b) < numeric_limits<float>::epsilon(); } int VerifyResult(float (*c_back)[P]) { // Check that the results are correct by comparing with host computing. int i, j, k; // 2D arrays on host side. float(*a_host)[N] = new float[M][N]; float(*b_host)[P] = new float[N][P]; float(*c_host)[P] = new float[M][P]; // Each element of matrix a is 1. for (i = 0; i < M; i++) for (j = 0; j < N; j++) a_host[i][j] = 1.0f; // Each column of b_host is the sequence 1,2,...,N for (i = 0; i < N; i++) for (j = 0; j < P; j++) b_host[i][j] = i + 1.0f; // c_host is initialized to zero. for (i = 0; i < M; i++) for (j = 0; j < P; j++) c_host[i][j] = 0.0f; for (i = 0; i < M; i++) { for (k = 0; k < N; k++) { // Each element of the product is just the sum 1+2+...+n for (j = 0; j < P; j++) { c_host[i][j] += a_host[i][k] * b_host[k][j]; } } } bool mismatch_found = false; // Compare host side results with the result buffer from device side: print // mismatched data 5 times only. int print_count = 0; for (i = 0; i < M; i++) { for (j = 0; j < P; j++) { if (!ValueSame(c_back[i][j], c_host[i][j])) { cout << "Fail - The result is incorrect for element: [" << i << ", " << j << "], expected: " << c_host[i][j] << ", but found: " << c_back[i][j] << "\n"; mismatch_found = true; print_count++; if (print_count == 5) break; } } if (print_count == 5) break; } delete[] a_host; delete[] b_host; delete[] c_host; if (!mismatch_found) { cout << "Success - The results are correct!\n"; return 0; } else { cout << "Fail - The results mismatch!\n"; return -1; } }
cpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/matrix_mul/src/matrix_mul_omp.cpp
//============================================================== // Copyright © 2020 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= #include <float.h> #include <math.h> #include <omp.h> #include <iostream> #include <limits> using namespace std; // Matrix size constants. constexpr int m_size = 150 * 8; // Must be a multiple of 8. constexpr int M = m_size / 8; constexpr int N = m_size / 4; constexpr int P = m_size / 2; /** * Each element of the product matrix c[i][j] is computed from a unique row and * column of the factor matrices, a[i][k] and b[k][j] */ float a[M][N]; float b[N][P]; float c[M][P]; /** * Perform matrix multiplication on CPU with OpenMP. */ void MatrixMulOpenMpCpu(float (*a)[N], float (*b)[P], float (*c)[P]); /** * Perform matrix multiplication on GPU with OpenMP offloading. */ void __attribute__((noinline)) MatrixMulOpenMpGpuOffloading(); /** * Perform matrix multiplication on host to verify results from OpenMP. */ int VerifyResult(float (*c_back)[P]); int main(void) { int Result1, Result2; cout << "Problem size: c(" << M << "," << P << ") = a(" << M << "," << N << ") * b(" << N << "," << P << ")\n"; cout << "Running on " << omp_get_num_devices() << " device(s)\n"; cout << "The default device id: " << omp_get_default_device() << "\n"; MatrixMulOpenMpCpu(a, b, c); cout << "Result of matrix multiplication using OpenMP: "; Result1 = VerifyResult(c); MatrixMulOpenMpGpuOffloading(); cout << "Result of matrix multiplication using GPU offloading: "; Result2 = VerifyResult(c); return Result1 || Result2; } void MatrixMulOpenMpCpu(float (*a)[N], float (*b)[P], float (*c)[P]) { int i, j, k; // Each element of matrix a is 1. for (i = 0; i < M; i++) for (j = 0; j < N; j++) a[i][j] = 1.0f; // Each column of b is the sequence 1,2,...,N for (i = 0; i < N; i++) for (j = 0; j < P; j++) b[i][j] = i + 1.0f; for (i = 0; i < M; i++) for (j = 0; j < P; j++) c[i][j] = 0.0f; // Parallelize by row. The threads don't need to synchronize at // loop end, so "nowait" can be used. #pragma omp for nowait private(i, j, k) for (i = 0; i < M; i++) { for (k = 0; k < N; k++) { // Each element of the product is just the sum 1+2+...+n for (j = 0; j < P; j++) { c[i][j] += a[i][k] * b[k][j]; } } } } void __attribute__((noinline)) MatrixMulOpenMpGpuOffloading() { int i, j, k; // Each element of matrix a is 1. for (i = 0; i < M; i++) for (j = 0; j < N; j++) a[i][j] = 1.0f; // Each column of b is the sequence 1,2,...,N for (i = 0; i < N; i++) for (j = 0; j < P; j++) b[i][j] = i + 1.0f; // c is initialized to zero. for (i = 0; i < M; i++) for (j = 0; j < P; j++) c[i][j] = 0.0f; // Parallelize on target device. #pragma omp target teams distribute parallel for map(to : a, b) \ map(tofrom : c) thread_limit(128) { for (i = 0; i < M; i++) { for (k = 0; k < N; k++) { // Each element of the product is just the sum 1+2+...+n for (j = 0; j < P; j++) { c[i][j] += a[i][k] * b[k][j]; } } } } } bool ValueSame(float a, float b) { return fabs(a - b) < numeric_limits<float>::epsilon(); } int VerifyResult(float (*c_back)[P]) { // Check that the results are correct by comparing with host computing. int i, j, k; // 2D arrays on host side. float(*a_host)[N] = new float[M][N]; float(*b_host)[P] = new float[N][P]; float(*c_host)[P] = new float[M][P]; // Each element of matrix a is 1. for (i = 0; i < M; i++) for (j = 0; j < N; j++) a_host[i][j] = 1.0f; // Each column of b_host is the sequence 1,2,...,N for (i = 0; i < N; i++) for (j = 0; j < P; j++) b_host[i][j] = i + 1.0f; // c_host is initialized to zero. for (i = 0; i < M; i++) for (j = 0; j < P; j++) c_host[i][j] = 0.0f; for (i = 0; i < M; i++) { for (k = 0; k < N; k++) { // Each element of the product is just the sum 1+2+...+n for (j = 0; j < P; j++) { c_host[i][j] += a_host[i][k] * b_host[k][j]; } } } bool mismatch_found = false; // Compare host side results with the result buffer from device side: print // mismatched data 5 times only. int print_count = 0; for (i = 0; i < M; i++) { for (j = 0; j < P; j++) { if (!ValueSame(c_back[i][j], c_host[i][j])) { cout << "Fail - The result is incorrect for element: [" << i << ", " << j << "], expected: " << c_host[i][j] << ", but found: " << c_back[i][j] << "\n"; mismatch_found = true; print_count++; if (print_count == 5) break; } } if (print_count == 5) break; } delete[] a_host; delete[] b_host; delete[] c_host; if (!mismatch_found) { cout << "Success - The results are correct!\n"; return 0; } else { cout << "Fail - The results mismatch!\n"; return -1; } }
cpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobi_iterative_gpu_optimization/src/1_guided_jacobi_iterative_solver_cpu.cpp
//============================================================== // Copyright © 2022 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= #include <bits/stdc++.h> #include <sycl/sycl.hpp> #include <algorithm> #include <chrono> #include <cstdlib> #include <ctime> #include <oneapi/dpl/random> #include <vector> using namespace sycl; typedef double Real; // Program variables, feel free to change anything. static const int kSize = 30000; static const Real kCheckError = 1e-15; static const Real kCalculationError = 1e-10; static const int kMinRand = -1000; static const int kMaxRaand = 1000; static const int kMaxSweepps = 100; static const std::uint32_t kSeed = 666; std::ofstream outfile; // Function responsible for generating a float type // diagonally dominant matrix. Float had to be used // as using double would result in segmentation faults // for extreamlly large matrixes. This is also an example // of using sycl based RNG which had to be used as using // external (non sycl) functions slows down the execution // drasticly. void GenerateMatrix(std::vector<float> &input_matrix, std::vector<Real> &input_results) { for (int i = 0; i < kSize; ++i) { int j = kSize * i; Real sum = 0; oneapi::dpl::minstd_rand engine(kSeed, i + j); oneapi::dpl::uniform_real_distribution<Real> distr(kMinRand, kMaxRaand); for (int j = i * kSize; j < kSize * (i + 1); ++j) { input_matrix[j] = distr(engine); input_matrix[j] = round(100. * input_matrix[j]) / 100.; sum += fabs(input_matrix[j]); } oneapi::dpl::uniform_int_distribution<int> distr2(0, 100); int gen_neg = distr2(engine); if (gen_neg < 50) input_matrix[i * kSize + i] = sum + 1; else input_matrix[i * kSize + i] = -1 * (sum + 1); input_results[i] = distr(engine); input_results[i] = round(100. * input_results[i]) / 100.; } } // Function responsible for printing the matrix, called only for N < 10. void PrintMatrix(std::vector<float> input_matrix, std::vector<Real> input_results) { for (int i = 0; i < kSize; ++i) { std::cout << '['; for (int j = i * kSize; j < kSize * (i + 1); ++j) { std::cout << input_matrix[j] << " "; } std::cout << "][" << input_results[i] << "]\n"; } for (int i = 0; i < kSize; ++i) { outfile << '['; for (int j = i * kSize; j < kSize * (i + 1); ++j) { outfile << input_matrix[j] << " "; } outfile << "][" << input_results[i] << "]\n"; } } // Function responsible for printing the results. void PrintResults(Real *data, int kSize) { outfile << std::fixed; outfile << std::setprecision(11); for (int i = 0; i < kSize; ++i) outfile << "X" << i + 1 << " equals: " << data[i] << std::endl; } // Function responsible for checking if the algorithm has finished. // For each of the newly calculated results the difference is checked // betwenn it and the corresponding result from the previous iteration. // If the difference between them is less than the error variable the // number is incremented by one, if all the results are correct the function // returns a bool value that is true and the main function can stop. bool CheckIfEqual(Real *data, Real *old_output_data) { int correct_result = 0; for (int i = 0; i < kSize; ++i) { if (fabs(data[i] - old_output_data[i]) < kCheckError) correct_result++; } return correct_result == kSize; } int main(int argc, char *argv[]) { auto begin_runtime = std::chrono::high_resolution_clock::now(); outfile.open("report.txt", std::ios_base::out); std::vector<float> input_matrix(kSize * kSize); std::vector<Real> input_results(kSize); std::cout << "Running the code on CPU\n"; outfile << "Running the code on CPU\n"; auto begin_matrix = std::chrono::high_resolution_clock::now(); GenerateMatrix(input_matrix, input_results); auto end_matrix = std::chrono::high_resolution_clock::now(); auto elapsed_matrix = std::chrono::duration_cast<std::chrono::nanoseconds>( end_matrix - begin_matrix); std::cout << "\nMatrix generated, time elapsed: " << elapsed_matrix.count() * 1e-9 << " seconds.\n"; outfile << "\nMatrix generated, time elapsed: " << elapsed_matrix.count() * 1e-9 << " seconds.\n"; if (kSize < 10) PrintMatrix(input_matrix, input_results); auto begin_computations = std::chrono::high_resolution_clock::now(); Real output_data[kSize]; Real old_output_data[kSize]; for (int i = 0; i < kSize; i++) output_data[i] = 0; bool is_equal = false; int sweeps = 0; // The main functionality of the Jacobi Solver. Every iteration // calculates new values until the difference between the values // calculatedthis iteration and the one before is less than the error. do { for (int i = 0; i < kSize; ++i) old_output_data[i] = output_data[i]; for (int i = 0; i < kSize; ++i) { int j = kSize * i; int it = kSize * i + i; output_data[i] = input_results[i]; for (int z = 0; z < kSize; ++z) { if (z != i) output_data[i] = output_data[i] - (old_output_data[z] * static_cast<Real>(input_matrix[j])); j = j + 1; } output_data[i] = output_data[i] / static_cast<Real>(input_matrix[it]); } ++sweeps; is_equal = CheckIfEqual(output_data, old_output_data); } while (!is_equal && sweeps < kMaxSweepps); auto end_computations = std::chrono::high_resolution_clock::now(); auto elapsed_computations = std::chrono::duration_cast<std::chrono::nanoseconds>(end_computations - begin_computations); std::cout << "\nComputations complete, time elapsed: " << elapsed_computations.count() * 1e-9 << " seconds.\n"; std::cout << "Total number of sweeps: " << sweeps << "\nChecking results\n"; outfile << "\nComputations complete, time elapsed: " << elapsed_computations.count() * 1e-9 << " seconds.\n"; outfile << "Total number of sweeps: " << sweeps << "\nChecking results\n"; auto begin_check = std::chrono::high_resolution_clock::now(); std::vector<Real> output_results(kSize, 0); // Calculating a new set of results from the calculated values. for (int i = 0; i < kSize * kSize; ++i) { output_results[i / kSize] += output_data[i % kSize] * static_cast<Real>(input_matrix[i]); } bool all_eq = true; // Comparing the newly calculated results with the ones that were // given. If the difference is less than the error rate for each of // the elements, then all values have been calculated correctly. for (int i = 0; i < kSize; ++i) { Real diff = fabs(output_results[i] - input_results[i]); if (diff > kCalculationError) all_eq = false; } if (all_eq) { std::cout << "All values are correct.\n"; outfile << "All values are correct.\n"; } else { std::cout << "There have been some errors. The values are not correct.\n"; outfile << "There have been some errors. The values are not correct.\n"; } auto end_check = std::chrono::high_resolution_clock::now(); auto elapsed_check = std::chrono::duration_cast<std::chrono::nanoseconds>( end_check - begin_check); std::cout << "\nCheck complete, time elapsed: " << elapsed_check.count() * 1e-9 << " seconds.\n"; outfile << "\nCheck complete, time elapsed: " << elapsed_check.count() * 1e-9 << " seconds.\n"; auto end_runtime = std::chrono::high_resolution_clock::now(); auto elapsed_runtime = std::chrono::duration_cast<std::chrono::nanoseconds>( end_runtime - begin_runtime); std::cout << "Total runtime is " << elapsed_runtime.count() * 1e-9 << " seconds.\n"; outfile << "Total runtime is " << elapsed_runtime.count() * 1e-9 << " seconds.\n"; PrintResults(output_data, kSize); return 0; }
cpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobi_iterative_gpu_optimization/src/3_guided_jacobi_iterative_solver_multi_gpu.cpp
//============================================================== // Copyright © 2022 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= #include <bits/stdc++.h> #include <sycl/sycl.hpp> #include <algorithm> #include <chrono> #include <cstdlib> #include <ctime> #include <oneapi/dpl/random> #include <vector> using namespace sycl; typedef float Real; // Program variables, feel free to change anything . static const int kSize = 30000; static const Real kCheckError = 1e-15; static const Real kCalculationError = 1e-10; static const int kMinRand = -1000; static const int kMaxRand = 1000; static const int kMaxSweeps = 100; static const std::uint32_t kSeed = 666; std::ofstream outfile; // Function responsible for generating a float type // diagonally dominant matrix. This is also an example // of using sycl based RNG which had to be used as using // external (non sycl) functions slows down the execution // drasticly. void GenerateMatrix(std::vector<float> &input_matrix, std::vector<Real> &input_results) { std::vector<queue> q; for (const auto &p : platform::get_platforms()) { if (p.get_info<info::platform::name>().find("Level-Zero") != std::string::npos) { for (const auto &d : p.get_devices()) { if (d.is_gpu() && d.get_info<info::device::name>().find("Intel") != std::string::npos) { q.push_back(queue(d)); outfile << "--Found GPU: " << d.get_info<info::device::name>() << "\n"; } } } } buffer bufin_mat(input_matrix); buffer bufin_res(input_results); q[0].submit([&](handler &h) { accessor in_mat_acc{bufin_mat, h}; accessor in_res_acc{bufin_res, h}; h.parallel_for(range<1>(kSize / 2), [=](id<1> id) { int i = id; int j = kSize * i; Real sum = 0; oneapi::dpl::minstd_rand engine(kSeed, i + j); oneapi::dpl::uniform_real_distribution<Real> distr(kMinRand, kMaxRand); for (int j = i * kSize; j < kSize * (i + 1); ++j) { in_mat_acc[j] = distr(engine); in_mat_acc[j] = round(100. * in_mat_acc[j]) / 100.; sum += fabs(in_mat_acc[j]); } oneapi::dpl::uniform_int_distribution<int> distr2(0, 100); int gen_neg = distr2(engine); if (gen_neg < 50) in_mat_acc[i * kSize + i] = sum + 1; else in_mat_acc[i * kSize + i] = -1 * (sum + 1); in_res_acc[i] = distr(engine); in_res_acc[i] = round(100. * in_res_acc[i]) / 100.; }); }); q[1].submit([&](handler &h) { accessor in_mat_acc{bufin_mat, h}; accessor in_res_acc{bufin_res, h}; h.parallel_for(range<1>(kSize / 2), [=](id<1> id) { int i = kSize / 2 + id; int j = kSize * i; Real sum = 0; oneapi::dpl::minstd_rand engine(kSeed, i + j); oneapi::dpl::uniform_real_distribution<Real> distr(kMinRand, kMaxRand); for (int j = i * kSize; j < kSize * (i + 1); ++j) { in_mat_acc[j] = distr(engine); in_mat_acc[j] = round(100. * in_mat_acc[j]) / 100.; sum += fabs(in_mat_acc[j]); } oneapi::dpl::uniform_int_distribution<int> distr2(0, 100); int gen_neg = distr2(engine); if (gen_neg < 50) in_mat_acc[i * kSize + i] = sum + 1; else in_mat_acc[i * kSize + i] = -1 * (sum + 1); in_res_acc[i] = distr(engine); in_res_acc[i] = round(100. * in_res_acc[i]) / 100.; }); }); } // Function responsible for printing the matrix, called only for N < 10. void PrintMatrix(const std::vector<float> &input_matrix, const std::vector<Real> &input_results) { for (int i = 0; i < kSize; ++i) { std::cout << '['; for (int j = i * kSize; j < kSize * (i + 1); ++j) { std::cout << input_matrix[j] << " "; } std::cout << "][" << input_results[i] << "]\n"; } for (int i = 0; i < kSize; ++i) { outfile << '['; for (int j = i * kSize; j < kSize * (i + 1); ++j) { outfile << input_matrix[j] << " "; } outfile << "][" << input_results[i] << "]\n"; } } // Function responsible for printing the results. void PrintResults(const std::vector<Real> &data, int N) { outfile << std::fixed; outfile << std::setprecision(11); for (int i = 0; i < N; ++i) outfile << "X" << i + 1 << " equals: " << data[i] << std::endl; } // Function responsible for checking if the algorithm has finished. // For each of the newly calculated results the difference is checked // betwenn it and the corresponding result from the previous iteration. // If the difference between them is less than the error variable the // number is incremented by one, if all the results are correct the function // returns a bool value that is true and the main function can stop. bool CheckIfEqual(const std::vector<Real> &data, const std::vector<Real> &old_output_data) { int correct_result = 0; for (int i = 0; i < kSize; ++i) { if (fabs(data[i] - old_output_data[i]) < kCheckError) correct_result++; } return correct_result == kSize; } int main(int argc, char *argv[]) { auto begin_runtime = std::chrono::high_resolution_clock::now(); outfile.open("report.txt", std::ios_base::out); std::vector<float> input_matrix(kSize * kSize); std::vector<Real> input_results(kSize); std::vector<queue> q; for (const auto &p : platform::get_platforms()) { if (p.get_info<info::platform::name>().find("Level-Zero") != std::string::npos) { for (const auto &d : p.get_devices()) { if (d.is_gpu() && d.get_info<info::device::name>().find("Intel") != std::string::npos) { q.push_back(queue(d)); std::cout << "--Found GPU: " << d.get_info<info::device::name>() << "\n"; } } } } if (q.size() < 2) { std::cout << "Two GPUs are needed to run this code. Aborting\n"; return 0; } auto begin_matrix = std::chrono::high_resolution_clock::now(); GenerateMatrix(input_matrix, input_results); buffer bufin_mat(input_matrix); buffer bufin_res(input_results); auto end_matrix = std::chrono::high_resolution_clock::now(); auto elapsed_matrix = std::chrono::duration_cast<std::chrono::nanoseconds>( end_matrix - begin_matrix); std::cout << "\nMatrix generated, time elapsed: " << elapsed_matrix.count() * 1e-9 << " seconds.\n"; outfile << "\nMatrix generated, time elapsed: " << elapsed_matrix.count() * 1e-9 << " seconds.\n"; if (kSize < 10) PrintMatrix(input_matrix, input_results); auto begin_computations = std::chrono::high_resolution_clock::now(); std::vector<Real> output_data(kSize, 0); std::vector<Real> old_output_data(kSize, 0); for (int i = 0; i < kSize; i++) output_data[i] = 0; bool is_equal = false; int sweeps = 0; // The main functionality of the Jacobi Solver. Every iteration // calculates new values until the difference between the values // calculatedthis iteration and the one before is less than the error. { do { buffer bufout_data(output_data); buffer bufold_out_data(old_output_data); for (int i = 0; i < kSize; ++i) old_output_data[i] = output_data[i]; q[0].submit([&](handler &h) { accessor out_data_acc{bufout_data, h}; accessor out_old_data_acc{bufold_out_data, h}; accessor in_mat_acc{bufin_mat, h, read_only}; accessor in_res_acc{bufin_res, h, read_only}; h.parallel_for(range<1>(kSize / 2), [=](id<1> id) { int i = id; int j = kSize * i; int it = kSize * i + i; out_data_acc[i] = in_res_acc[i]; for (int z = 0; z < kSize; ++z) { if (z != i) out_data_acc[i] = out_data_acc[i] - (out_old_data_acc[z] * static_cast<Real>(in_mat_acc[j])); j = j + 1; } out_data_acc[i] = out_data_acc[i] / static_cast<Real>(in_mat_acc[it]); }); }) .wait(); q[1].submit([&](handler &h) { accessor out_data_acc{bufout_data, h}; accessor out_old_data_acc{bufold_out_data, h}; accessor in_mat_acc{bufin_mat, h, read_only}; accessor in_res_acc{bufin_res, h, read_only}; h.parallel_for(range<1>(kSize / 2), [=](id<1> id) { int i = kSize / 2 + id; int j = kSize * i; int it = kSize * i + i; out_data_acc[i] = in_res_acc[i]; for (int z = 0; z < kSize; ++z) { if (z != i) out_data_acc[i] = out_data_acc[i] - (out_old_data_acc[z] * static_cast<Real>(in_mat_acc[j])); j = j + 1; } out_data_acc[i] = out_data_acc[i] / static_cast<Real>(in_mat_acc[it]); }); }) .wait(); bufout_data.get_access<access::mode::read>(); bufold_out_data.get_access<access::mode::read>(); ++sweeps; is_equal = CheckIfEqual(output_data, old_output_data); } while (!is_equal && sweeps < kMaxSweeps); } auto end_computations = std::chrono::high_resolution_clock::now(); auto elapsed_computations = std::chrono::duration_cast<std::chrono::nanoseconds>(end_computations - begin_computations); std::cout << "\nComputations complete, time elapsed: " << elapsed_computations.count() * 1e-9 << " seconds.\n"; std::cout << "Total number of sweeps: " << sweeps << "\nChecking results\n"; outfile << "\nComputations complete, time elapsed: " << elapsed_computations.count() * 1e-9 << " seconds.\n"; outfile << "Total number of sweeps: " << sweeps << "\nChecking results\n"; auto begin_check = std::chrono::high_resolution_clock::now(); std::vector<Real> output_results(kSize, 0); // Calculating a new set of results from the calculated values. for (int i = 0; i < kSize * kSize; ++i) { output_results[i / kSize] += output_data[i % kSize] * static_cast<Real>(input_matrix[i]); } bool *all_eq = malloc_shared<bool>(1, q[0]); all_eq[0] = true; // Comparing the newly calculated results with the ones that were // given. If the difference is less than the error rate for each of // the elements, then all values have been calculated correctly. { buffer bufout_res(output_results); q[0].submit([&](handler &h) { accessor R{bufin_res, h, read_only}; accessor NR{bufout_res, h, read_only}; h.parallel_for(range<1>(kSize), [=](id<1> id) { Real diff = fabs(NR[id] - R[id]); if (diff > kCalculationError) all_eq[0] = false; }); }); } if (all_eq[0]) { std::cout << "All values are correct.\n"; outfile << "All values are correct.\n"; } else { std::cout << "There have been some errors. The values are not correct.\n"; outfile << "There have been some errors. The values are not correct.\n"; } auto end_check = std::chrono::high_resolution_clock::now(); auto elapsed_check = std::chrono::duration_cast<std::chrono::nanoseconds>( end_check - begin_check); std::cout << "\nCheck complete, time elapsed: " << elapsed_check.count() * 1e-9 << " seconds.\n"; outfile << "\nCheck complete, time elapsed: " << elapsed_check.count() * 1e-9 << " seconds.\n"; auto end_runtime = std::chrono::high_resolution_clock::now(); auto elapsed_runtime = std::chrono::duration_cast<std::chrono::nanoseconds>( end_runtime - begin_runtime); std::cout << "Total runtime is " << elapsed_runtime.count() * 1e-9 << " seconds.\n"; outfile << "Total runtime is " << elapsed_runtime.count() * 1e-9 << " seconds.\n"; PrintResults(output_data, kSize); return 0; }
cpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobi_iterative_gpu_optimization/src/2_guided_jacobi_iterative_solver_gpu.cpp
//============================================================== // Copyright © 2022 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= #include <bits/stdc++.h> #include <sycl/sycl.hpp> #include <algorithm> #include <chrono> #include <cstdlib> #include <ctime> #include <oneapi/dpl/random> #include <vector> using namespace sycl; typedef float Real; // Program variables, feel free to change anything . static const int kSize = 30000; static const Real kCheckError = 1e-15; static const Real kCalculationError = 1e-10; static const int kMinRand = -1000; static const int kMaxRand = 1000; static const int kMaxSweeps = 100; static const std::uint32_t kSeed = 666; std::ofstream outfile; // Function responsible for generating a float type // diagonally dominant matrix. This is also an example // of using sycl based RNG which had to be used as using // external (non sycl) functions slows down the execution // drasticly. void GenerateMatrix(std::vector<float> &input_matrix, std::vector<Real> &input_results) { queue q(gpu_selector_v); buffer bufin_mat(input_matrix); buffer bufin_res(input_results); q.submit([&](handler &h) { accessor in_mat_acc{bufin_mat, h}; accessor in_res_acc{bufin_res, h}; h.parallel_for(range<1>(kSize), [=](id<1> id) { int i = id; int j = kSize * i; Real sum = 0; oneapi::dpl::minstd_rand engine(kSeed, i + j); oneapi::dpl::uniform_real_distribution<Real> distr(kMinRand, kMaxRand); for (int j = i * kSize; j < kSize * (i + 1); ++j) { in_mat_acc[j] = distr(engine); in_mat_acc[j] = round(100. * in_mat_acc[j]) / 100.; sum += fabs(in_mat_acc[j]); } oneapi::dpl::uniform_int_distribution<int> distr2(0, 100); int gen_neg = distr2(engine); if (gen_neg < 50) in_mat_acc[i * kSize + i] = sum + 1; else in_mat_acc[i * kSize + i] = -1 * (sum + 1); in_res_acc[i] = distr(engine); in_res_acc[i] = round(100. * in_res_acc[i]) / 100.; }); }); } // Function responsible for printing the matrix, called only for N < 10. void PrintMatrix(const std::vector<float> &input_matrix, const std::vector<Real> &input_results) { for (int i = 0; i < kSize; ++i) { std::cout << '['; for (int j = i * kSize; j < kSize * (i + 1); ++j) { std::cout << input_matrix[j] << " "; } std::cout << "][" << input_results[i] << "]\n"; } for (int i = 0; i < kSize; ++i) { outfile << '['; for (int j = i * kSize; j < kSize * (i + 1); ++j) { outfile << input_matrix[j] << " "; } outfile << "][" << input_results[i] << "]\n"; } } // Function responsible for printing the results. void PrintResults(Real *output_data, int kSize) { outfile << std::fixed; outfile << std::setprecision(11); for (int i = 0; i < kSize; ++i) outfile << "X" << i + 1 << " equals: " << output_data[i] << std::endl; } // Function responsible for checking if the algorithm has finished. // For each of the newly calculated results the difference is checked // betwenn it and the corresponding result from the previous iteration. // If the difference between them is less than the error variable the // number is incremented by one, if all the results are correct the function // returns a bool value that is true and the main function can stop. bool CheckIfEqual(Real *output_data, Real *old_output_data) { int correct_result = 0; for (int i = 0; i < kSize; ++i) { if (fabs(output_data[i] - old_output_data[i]) < kCheckError) correct_result++; } return correct_result == kSize; } int main(int argc, char *argv[]) { auto begin_runtime = std::chrono::high_resolution_clock::now(); outfile.open("report.txt", std::ios_base::out); std::vector<float> input_matrix(kSize * kSize); std::vector<Real> input_results(kSize); queue q(gpu_selector_v); std::cout << "Device : " << q.get_device().get_info<info::device::name>() << std::endl; outfile << "Device : " << q.get_device().get_info<info::device::name>() << std::endl; auto begin_matrix = std::chrono::high_resolution_clock::now(); GenerateMatrix(input_matrix, input_results); buffer bufin_mat(input_matrix); buffer bufin_res(input_results); auto end_matrix = std::chrono::high_resolution_clock::now(); auto elapsed_matrix = std::chrono::duration_cast<std::chrono::nanoseconds>( end_matrix - begin_matrix); std::cout << "\nMatrix generated, time elapsed: " << elapsed_matrix.count() * 1e-9 << " seconds.\n"; outfile << "\nMatrix generated, time elapsed: " << elapsed_matrix.count() * 1e-9 << " seconds.\n"; if (kSize < 10) PrintMatrix(input_matrix, input_results); auto begin_computations = std::chrono::high_resolution_clock::now(); Real *output_data = malloc_shared<Real>(kSize, q); Real *old_output_data = malloc_shared<Real>(kSize, q); for (int i = 0; i < kSize; i++) output_data[i] = 0; bool is_equal = false; int sweeps = 0; // The main functionality of the Jacobi Solver. Every iteration // calculates new values until the difference between the values // calculatedthis iteration and the one before is less than the error. do { for (int i = 0; i < kSize; ++i) old_output_data[i] = output_data[i]; q.submit([&](handler &h) { accessor M{bufin_mat, h, read_only}; accessor R{bufin_res, h, read_only}; h.parallel_for(range<1>(kSize), [=](id<1> id) { int i = id; int j = kSize * i; int it = kSize * i + i; output_data[i] = R[i]; for (int z = 0; z < kSize; ++z) { if (z != i) output_data[i] = output_data[i] - (old_output_data[z] * static_cast<Real>(M[j])); j = j + 1; } output_data[i] = output_data[i] / static_cast<Real>(M[it]); }); }).wait(); ++sweeps; is_equal = CheckIfEqual(output_data, old_output_data); } while (!is_equal && sweeps < kMaxSweeps); auto end_computations = std::chrono::high_resolution_clock::now(); auto elapsed_computations = std::chrono::duration_cast<std::chrono::nanoseconds>(end_computations - begin_computations); std::cout << "\nComputations complete, time elapsed: " << elapsed_computations.count() * 1e-9 << " seconds.\n"; std::cout << "Total number of sweeps: " << sweeps << "\nChecking results\n"; outfile << "\nComputations complete, time elapsed: " << elapsed_computations.count() * 1e-9 << " seconds.\n"; outfile << "Total number of sweeps: " << sweeps << "\nChecking results\n"; auto begin_check = std::chrono::high_resolution_clock::now(); std::vector<Real> output_results(kSize, 0); // Calculating a new set of results from the calculated values. for (int i = 0; i < kSize * kSize; ++i) { output_results[i / kSize] += output_data[i % kSize] * static_cast<Real>(input_matrix[i]); } bool *all_eq = malloc_shared<bool>(1, q); all_eq[0] = true; // Comparing the newly calculated results with the ones that were // given. If the difference is less than the error rate for each of // the elements, then all values have been calculated correctly. { buffer bufout_res(output_results); q.submit([&](handler &h) { accessor R{bufin_res, h, read_only}; accessor NR{bufout_res, h, read_only}; h.parallel_for(range<1>(kSize), [=](id<1> id) { Real diff = fabs(NR[id] - R[id]); if (diff > kCalculationError) all_eq[0] = false; }); }); } if (all_eq[0]) { std::cout << "All values are correct.\n"; outfile << "All values are correct.\n"; } else { std::cout << "There have been some errors. The values are not correct.\n"; outfile << "There have been some errors. The values are not correct.\n"; } auto end_check = std::chrono::high_resolution_clock::now(); auto elapsed_check = std::chrono::duration_cast<std::chrono::nanoseconds>( end_check - begin_check); std::cout << "\nCheck complete, time elapsed: " << elapsed_check.count() * 1e-9 << " seconds.\n"; outfile << "\nCheck complete, time elapsed: " << elapsed_check.count() * 1e-9 << " seconds.\n"; auto end_runtime = std::chrono::high_resolution_clock::now(); auto elapsed_runtime = std::chrono::duration_cast<std::chrono::nanoseconds>( end_runtime - begin_runtime); std::cout << "Total runtime is " << elapsed_runtime.count() * 1e-9 << " seconds.\n"; outfile << "Total runtime is " << elapsed_runtime.count() * 1e-9 << " seconds.\n"; PrintResults(output_data, kSize); free(output_data, q); free(old_output_data, q); return 0; }
cpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/helper_string.h
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // These are helper functions for the SDK samples (string parsing, timers, etc) #ifndef COMMON_HELPER_STRING_H_ #define COMMON_HELPER_STRING_H_ #include <stdio.h> #include <stdlib.h> #include <fstream> #include <string> #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) #ifndef _CRT_SECURE_NO_DEPRECATE #define _CRT_SECURE_NO_DEPRECATE #endif #ifndef STRCASECMP #define STRCASECMP _stricmp #endif #ifndef STRNCASECMP #define STRNCASECMP _strnicmp #endif #ifndef STRCPY #define STRCPY(sFilePath, nLength, sPath) strcpy_s(sFilePath, nLength, sPath) #endif #ifndef FOPEN #define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode) #endif #ifndef FOPEN_FAIL #define FOPEN_FAIL(result) (result != 0) #endif #ifndef SSCANF #define SSCANF sscanf_s #endif #ifndef SPRINTF #define SPRINTF sprintf_s #endif #else // Linux Includes #include <string.h> #include <strings.h> #ifndef STRCASECMP #define STRCASECMP strcasecmp #endif #ifndef STRNCASECMP #define STRNCASECMP strncasecmp #endif #ifndef STRCPY #define STRCPY(sFilePath, nLength, sPath) strcpy(sFilePath, sPath) #endif #ifndef FOPEN #define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode)) #endif #ifndef FOPEN_FAIL #define FOPEN_FAIL(result) (result == NULL) #endif #ifndef SSCANF #define SSCANF sscanf #endif #ifndef SPRINTF #define SPRINTF sprintf #endif #endif #ifndef EXIT_WAIVED #define EXIT_WAIVED 2 #endif // CUDA Utility Helper Functions inline int stringRemoveDelimiter(char delimiter, const char *string) { int string_start = 0; while (string[string_start] == delimiter) { string_start++; } if (string_start >= static_cast<int>(strlen(string) - 1)) { return 0; } return string_start; } inline int getFileExtension(char *filename, char **extension) { int string_length = static_cast<int>(strlen(filename)); while (filename[string_length--] != '.') { if (string_length == 0) break; } if (string_length > 0) string_length += 2; if (string_length == 0) *extension = NULL; else *extension = &filename[string_length]; return string_length; } inline bool checkCmdLineFlag(const int argc, const char **argv, const char *string_ref) { bool bFound = false; if (argc >= 1) { for (int i = 1; i < argc; i++) { int string_start = stringRemoveDelimiter('-', argv[i]); const char *string_argv = &argv[i][string_start]; const char *equal_pos = strchr(string_argv, '='); int argv_length = static_cast<int>( equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv); int length = static_cast<int>(strlen(string_ref)); if (length == argv_length && !STRNCASECMP(string_argv, string_ref, length)) { bFound = true; continue; } } } return bFound; } // This function wraps the CUDA Driver API into a template function template <class T> inline bool getCmdLineArgumentValue(const int argc, const char **argv, const char *string_ref, T *value) { bool bFound = false; if (argc >= 1) { for (int i = 1; i < argc; i++) { int string_start = stringRemoveDelimiter('-', argv[i]); const char *string_argv = &argv[i][string_start]; int length = static_cast<int>(strlen(string_ref)); if (!STRNCASECMP(string_argv, string_ref, length)) { if (length + 1 <= static_cast<int>(strlen(string_argv))) { int auto_inc = (string_argv[length] == '=') ? 1 : 0; *value = (T)atoi(&string_argv[length + auto_inc]); } bFound = true; i = argc; } } } return bFound; } inline int getCmdLineArgumentInt(const int argc, const char **argv, const char *string_ref) { bool bFound = false; int value = -1; if (argc >= 1) { for (int i = 1; i < argc; i++) { int string_start = stringRemoveDelimiter('-', argv[i]); const char *string_argv = &argv[i][string_start]; int length = static_cast<int>(strlen(string_ref)); if (!STRNCASECMP(string_argv, string_ref, length)) { if (length + 1 <= static_cast<int>(strlen(string_argv))) { int auto_inc = (string_argv[length] == '=') ? 1 : 0; value = atoi(&string_argv[length + auto_inc]); } else { value = 0; } bFound = true; continue; } } } if (bFound) { return value; } else { return 0; } } inline float getCmdLineArgumentFloat(const int argc, const char **argv, const char *string_ref) { bool bFound = false; float value = -1; if (argc >= 1) { for (int i = 1; i < argc; i++) { int string_start = stringRemoveDelimiter('-', argv[i]); const char *string_argv = &argv[i][string_start]; int length = static_cast<int>(strlen(string_ref)); if (!STRNCASECMP(string_argv, string_ref, length)) { if (length + 1 <= static_cast<int>(strlen(string_argv))) { int auto_inc = (string_argv[length] == '=') ? 1 : 0; value = static_cast<float>(atof(&string_argv[length + auto_inc])); } else { value = 0.f; } bFound = true; continue; } } } if (bFound) { return value; } else { return 0; } } inline bool getCmdLineArgumentString(const int argc, const char **argv, const char *string_ref, char **string_retval) { bool bFound = false; if (argc >= 1) { for (int i = 1; i < argc; i++) { int string_start = stringRemoveDelimiter('-', argv[i]); char *string_argv = const_cast<char *>(&argv[i][string_start]); int length = static_cast<int>(strlen(string_ref)); if (!STRNCASECMP(string_argv, string_ref, length)) { *string_retval = &string_argv[length + 1]; bFound = true; continue; } } } if (!bFound) { *string_retval = NULL; } return bFound; } ////////////////////////////////////////////////////////////////////////////// //! Find the path for a file assuming that //! files are found in the searchPath. //! //! @return the path if succeeded, otherwise 0 //! @param filename name of the file //! @param executable_path optional absolute path of the executable ////////////////////////////////////////////////////////////////////////////// inline char *sdkFindFilePath(const char *filename, const char *executable_path) { // <executable_name> defines a variable that is replaced with the name of the // executable // Typical relative search paths to locate needed companion files (e.g. sample // input data, or JIT source files) The origin for the relative search may be // the .exe file, a .bat file launching an .exe, a browser .exe launching the // .exe or .bat, etc const char *searchPath[] = { "./", // same dir "./data/", // same dir "../../../../Samples/<executable_name>/", // up 4 in tree "../../../Samples/<executable_name>/", // up 3 in tree "../../Samples/<executable_name>/", // up 2 in tree "../../../../Samples/<executable_name>/data/", // up 4 in tree "../../../Samples/<executable_name>/data/", // up 3 in tree "../../Samples/<executable_name>/data/", // up 2 in tree "../../../../Samples/0_Introduction/<executable_name>/", // up 4 in tree "../../../Samples/0_Introduction/<executable_name>/", // up 3 in tree "../../Samples/0_Introduction/<executable_name>/", // up 2 in tree "../../../../Samples/1_Utilities/<executable_name>/", // up 4 in tree "../../../Samples/1_Utilities/<executable_name>/", // up 3 in tree "../../Samples/1_Utilities/<executable_name>/", // up 2 in tree "../../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 4 in tree "../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 3 in tree "../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 2 in tree "../../../../Samples/3_CUDA_Features/<executable_name>/", // up 4 in tree "../../../Samples/3_CUDA_Features/<executable_name>/", // up 3 in tree "../../Samples/3_CUDA_Features/<executable_name>/", // up 2 in tree "../../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 4 in tree "../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 3 in tree "../../Samples/4_CUDA_Libraries/<executable_name>/", // up 2 in tree "../../../../Samples/5_Domain_Specific/<executable_name>/", // up 4 in tree "../../../Samples/5_Domain_Specific/<executable_name>/", // up 3 in tree "../../Samples/5_Domain_Specific/<executable_name>/", // up 2 in tree "../../../../Samples/6_Performance/<executable_name>/", // up 4 in tree "../../../Samples/6_Performance/<executable_name>/", // up 3 in tree "../../Samples/6_Performance/<executable_name>/", // up 2 in tree "../../../../Samples/0_Introduction/<executable_name>/data/", // up 4 in tree "../../../Samples/0_Introduction/<executable_name>/data/", // up 3 in tree "../../Samples/0_Introduction/<executable_name>/data/", // up 2 in tree "../../../../Samples/1_Utilities/<executable_name>/data/", // up 4 in tree "../../../Samples/1_Utilities/<executable_name>/data/", // up 3 in tree "../../Samples/1_Utilities/<executable_name>/data/", // up 2 in tree "../../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 4 in tree "../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 3 in tree "../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 2 in tree "../../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 4 in tree "../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 3 in tree "../../Samples/3_CUDA_Features/<executable_name>/data/", // up 2 in tree "../../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 4 in tree "../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 3 in tree "../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 2 in tree "../../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 4 in tree "../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 3 in tree "../../Samples/5_Domain_Specific/<executable_name>/data/", // up 2 in tree "../../../../Samples/6_Performance/<executable_name>/data/", // up 4 in tree "../../../Samples/6_Performance/<executable_name>/data/", // up 3 in tree "../../Samples/6_Performance/<executable_name>/data/", // up 2 in tree "../../../../Common/data/", // up 4 in tree "../../../Common/data/", // up 3 in tree "../../Common/data/" // up 2 in tree }; // Extract the executable name std::string executable_name; if (executable_path != 0) { executable_name = std::string(executable_path); #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) // Windows path delimiter size_t delimiter_pos = executable_name.find_last_of('\\'); executable_name.erase(0, delimiter_pos + 1); if (executable_name.rfind(".exe") != std::string::npos) { // we strip .exe, only if the .exe is found executable_name.resize(executable_name.size() - 4); } #else // Linux & OSX path delimiter size_t delimiter_pos = executable_name.find_last_of('/'); executable_name.erase(0, delimiter_pos + 1); #endif } // Loop over all search paths and return the first hit for (unsigned int i = 0; i < sizeof(searchPath) / sizeof(char *); ++i) { std::string path(searchPath[i]); size_t executable_name_pos = path.find("<executable_name>"); // If there is executable_name variable in the searchPath // replace it with the value if (executable_name_pos != std::string::npos) { if (executable_path != 0) { path.replace(executable_name_pos, strlen("<executable_name>"), executable_name); } else { // Skip this path entry if no executable argument is given continue; } } #ifdef _DEBUG printf("sdkFindFilePath <%s> in %s\n", filename, path.c_str()); #endif // Test if the file exists path.append(filename); FILE *fp; FOPEN(fp, path.c_str(), "rb"); if (fp != NULL) { fclose(fp); // File found // returning an allocated array here for backwards compatibility reasons char *file_path = reinterpret_cast<char *>(malloc(path.length() + 1)); STRCPY(file_path, path.length() + 1, path.c_str()); return file_path; } if (fp) { fclose(fp); } } // File not found printf("\nerror: sdkFindFilePath: file <%s> not found!\n", filename); return 0; } #endif // COMMON_HELPER_STRING_H_
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/helper_cuda.h
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions for initialization and error checking #ifndef COMMON_HELPER_CUDA_H_ #define COMMON_HELPER_CUDA_H_ #pragma once #include <sycl/sycl.hpp> #include <dpct/dpct.hpp> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <helper_string.h> #ifndef EXIT_WAIVED #define EXIT_WAIVED 2 #endif // Note, it is required that your SDK sample to include the proper header // files, please refer the CUDA examples for examples of the needed CUDA // headers, which may change depending on which CUDA functions are used. // CUDA Runtime error messages #ifdef __DPCT_HPP__ static const char *_cudaGetErrorEnum(dpct::err0 error) { /* DPCT1009:9: SYCL uses exceptions to report errors and does not use the error codes. The original code was commented out and a warning string was inserted. You need to rewrite this code. */ return "cudaGetErrorName is not supported" /*cudaGetErrorName(error)*/; } #endif #ifdef CUDA_DRIVER_API // CUDA Driver API errors static const char *_cudaGetErrorEnum(CUresult error) { static char unknown[] = "<unknown>"; const char *ret = NULL; cuGetErrorName(error, &ret); return ret ? ret : unknown; } #endif #ifdef CUBLAS_API_H_ // cuBLAS API errors static const char *_cudaGetErrorEnum(cublasStatus_t error) { switch (error) { case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS"; case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED"; case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED"; case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE"; case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH"; case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR"; case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED"; case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR"; case CUBLAS_STATUS_NOT_SUPPORTED: return "CUBLAS_STATUS_NOT_SUPPORTED"; case CUBLAS_STATUS_LICENSE_ERROR: return "CUBLAS_STATUS_LICENSE_ERROR"; } return "<unknown>"; } #endif #ifdef _CUFFT_H_ // cuFFT API errors static const char *_cudaGetErrorEnum(cufftResult error) { switch (error) { case CUFFT_SUCCESS: return "CUFFT_SUCCESS"; case CUFFT_INVALID_PLAN: return "CUFFT_INVALID_PLAN"; case CUFFT_ALLOC_FAILED: return "CUFFT_ALLOC_FAILED"; case CUFFT_INVALID_TYPE: return "CUFFT_INVALID_TYPE"; case CUFFT_INVALID_VALUE: return "CUFFT_INVALID_VALUE"; case CUFFT_INTERNAL_ERROR: return "CUFFT_INTERNAL_ERROR"; case CUFFT_EXEC_FAILED: return "CUFFT_EXEC_FAILED"; case CUFFT_SETUP_FAILED: return "CUFFT_SETUP_FAILED"; case CUFFT_INVALID_SIZE: return "CUFFT_INVALID_SIZE"; case CUFFT_UNALIGNED_DATA: return "CUFFT_UNALIGNED_DATA"; case CUFFT_INCOMPLETE_PARAMETER_LIST: return "CUFFT_INCOMPLETE_PARAMETER_LIST"; case CUFFT_INVALID_DEVICE: return "CUFFT_INVALID_DEVICE"; case CUFFT_PARSE_ERROR: return "CUFFT_PARSE_ERROR"; case CUFFT_NO_WORKSPACE: return "CUFFT_NO_WORKSPACE"; case CUFFT_NOT_IMPLEMENTED: return "CUFFT_NOT_IMPLEMENTED"; case CUFFT_LICENSE_ERROR: return "CUFFT_LICENSE_ERROR"; case CUFFT_NOT_SUPPORTED: return "CUFFT_NOT_SUPPORTED"; } return "<unknown>"; } #endif #ifdef CUSPARSEAPI // cuSPARSE API errors static const char *_cudaGetErrorEnum(cusparseStatus_t error) { switch (error) { case CUSPARSE_STATUS_SUCCESS: return "CUSPARSE_STATUS_SUCCESS"; case CUSPARSE_STATUS_NOT_INITIALIZED: return "CUSPARSE_STATUS_NOT_INITIALIZED"; case CUSPARSE_STATUS_ALLOC_FAILED: return "CUSPARSE_STATUS_ALLOC_FAILED"; case CUSPARSE_STATUS_INVALID_VALUE: return "CUSPARSE_STATUS_INVALID_VALUE"; case CUSPARSE_STATUS_ARCH_MISMATCH: return "CUSPARSE_STATUS_ARCH_MISMATCH"; case CUSPARSE_STATUS_MAPPING_ERROR: return "CUSPARSE_STATUS_MAPPING_ERROR"; case CUSPARSE_STATUS_EXECUTION_FAILED: return "CUSPARSE_STATUS_EXECUTION_FAILED"; case CUSPARSE_STATUS_INTERNAL_ERROR: return "CUSPARSE_STATUS_INTERNAL_ERROR"; case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; } return "<unknown>"; } #endif #ifdef CUSOLVER_COMMON_H_ // cuSOLVER API errors static const char *_cudaGetErrorEnum(cusolverStatus_t error) { switch (error) { case CUSOLVER_STATUS_SUCCESS: return "CUSOLVER_STATUS_SUCCESS"; case CUSOLVER_STATUS_NOT_INITIALIZED: return "CUSOLVER_STATUS_NOT_INITIALIZED"; case CUSOLVER_STATUS_ALLOC_FAILED: return "CUSOLVER_STATUS_ALLOC_FAILED"; case CUSOLVER_STATUS_INVALID_VALUE: return "CUSOLVER_STATUS_INVALID_VALUE"; case CUSOLVER_STATUS_ARCH_MISMATCH: return "CUSOLVER_STATUS_ARCH_MISMATCH"; case CUSOLVER_STATUS_MAPPING_ERROR: return "CUSOLVER_STATUS_MAPPING_ERROR"; case CUSOLVER_STATUS_EXECUTION_FAILED: return "CUSOLVER_STATUS_EXECUTION_FAILED"; case CUSOLVER_STATUS_INTERNAL_ERROR: return "CUSOLVER_STATUS_INTERNAL_ERROR"; case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; case CUSOLVER_STATUS_NOT_SUPPORTED: return "CUSOLVER_STATUS_NOT_SUPPORTED "; case CUSOLVER_STATUS_ZERO_PIVOT: return "CUSOLVER_STATUS_ZERO_PIVOT"; case CUSOLVER_STATUS_INVALID_LICENSE: return "CUSOLVER_STATUS_INVALID_LICENSE"; } return "<unknown>"; } #endif #ifdef CURAND_H_ // cuRAND API errors static const char *_cudaGetErrorEnum(int error) { switch (error) { case 0: return "CURAND_STATUS_SUCCESS"; case 100: return "CURAND_STATUS_VERSION_MISMATCH"; case 101: return "CURAND_STATUS_NOT_INITIALIZED"; case 102: return "CURAND_STATUS_ALLOCATION_FAILED"; case 103: return "CURAND_STATUS_TYPE_ERROR"; case 104: return "CURAND_STATUS_OUT_OF_RANGE"; case 105: return "CURAND_STATUS_LENGTH_NOT_MULTIPLE"; case 106: return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED"; case 201: return "CURAND_STATUS_LAUNCH_FAILURE"; case 202: return "CURAND_STATUS_PREEXISTING_FAILURE"; case 203: return "CURAND_STATUS_INITIALIZATION_FAILED"; case 204: return "CURAND_STATUS_ARCH_MISMATCH"; case 999: return "CURAND_STATUS_INTERNAL_ERROR"; } return "<unknown>"; } #endif #ifdef NVJPEGAPI // nvJPEG API errors static const char *_cudaGetErrorEnum(nvjpegStatus_t error) { switch (error) { case NVJPEG_STATUS_SUCCESS: return "NVJPEG_STATUS_SUCCESS"; case NVJPEG_STATUS_NOT_INITIALIZED: return "NVJPEG_STATUS_NOT_INITIALIZED"; case NVJPEG_STATUS_INVALID_PARAMETER: return "NVJPEG_STATUS_INVALID_PARAMETER"; case NVJPEG_STATUS_BAD_JPEG: return "NVJPEG_STATUS_BAD_JPEG"; case NVJPEG_STATUS_JPEG_NOT_SUPPORTED: return "NVJPEG_STATUS_JPEG_NOT_SUPPORTED"; case NVJPEG_STATUS_ALLOCATOR_FAILURE: return "NVJPEG_STATUS_ALLOCATOR_FAILURE"; case NVJPEG_STATUS_EXECUTION_FAILED: return "NVJPEG_STATUS_EXECUTION_FAILED"; case NVJPEG_STATUS_ARCH_MISMATCH: return "NVJPEG_STATUS_ARCH_MISMATCH"; case NVJPEG_STATUS_INTERNAL_ERROR: return "NVJPEG_STATUS_INTERNAL_ERROR"; } return "<unknown>"; } #endif #ifdef NV_NPPIDEFS_H // NPP API errors static const char *_cudaGetErrorEnum(NppStatus error) { switch (error) { case NPP_NOT_SUPPORTED_MODE_ERROR: return "NPP_NOT_SUPPORTED_MODE_ERROR"; case NPP_ROUND_MODE_NOT_SUPPORTED_ERROR: return "NPP_ROUND_MODE_NOT_SUPPORTED_ERROR"; case NPP_RESIZE_NO_OPERATION_ERROR: return "NPP_RESIZE_NO_OPERATION_ERROR"; case NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY: return "NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY"; #if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000 case NPP_BAD_ARG_ERROR: return "NPP_BAD_ARGUMENT_ERROR"; case NPP_COEFF_ERROR: return "NPP_COEFFICIENT_ERROR"; case NPP_RECT_ERROR: return "NPP_RECTANGLE_ERROR"; case NPP_QUAD_ERROR: return "NPP_QUADRANGLE_ERROR"; case NPP_MEM_ALLOC_ERR: return "NPP_MEMORY_ALLOCATION_ERROR"; case NPP_HISTO_NUMBER_OF_LEVELS_ERROR: return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR"; case NPP_INVALID_INPUT: return "NPP_INVALID_INPUT"; case NPP_POINTER_ERROR: return "NPP_POINTER_ERROR"; case NPP_WARNING: return "NPP_WARNING"; case NPP_ODD_ROI_WARNING: return "NPP_ODD_ROI_WARNING"; #else // These are for CUDA 5.5 or higher case NPP_BAD_ARGUMENT_ERROR: return "NPP_BAD_ARGUMENT_ERROR"; case NPP_COEFFICIENT_ERROR: return "NPP_COEFFICIENT_ERROR"; case NPP_RECTANGLE_ERROR: return "NPP_RECTANGLE_ERROR"; case NPP_QUADRANGLE_ERROR: return "NPP_QUADRANGLE_ERROR"; case NPP_MEMORY_ALLOCATION_ERR: return "NPP_MEMORY_ALLOCATION_ERROR"; case NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR: return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR"; case NPP_INVALID_HOST_POINTER_ERROR: return "NPP_INVALID_HOST_POINTER_ERROR"; case NPP_INVALID_DEVICE_POINTER_ERROR: return "NPP_INVALID_DEVICE_POINTER_ERROR"; #endif case NPP_LUT_NUMBER_OF_LEVELS_ERROR: return "NPP_LUT_NUMBER_OF_LEVELS_ERROR"; case NPP_TEXTURE_BIND_ERROR: return "NPP_TEXTURE_BIND_ERROR"; case NPP_WRONG_INTERSECTION_ROI_ERROR: return "NPP_WRONG_INTERSECTION_ROI_ERROR"; case NPP_NOT_EVEN_STEP_ERROR: return "NPP_NOT_EVEN_STEP_ERROR"; case NPP_INTERPOLATION_ERROR: return "NPP_INTERPOLATION_ERROR"; case NPP_RESIZE_FACTOR_ERROR: return "NPP_RESIZE_FACTOR_ERROR"; case NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR: return "NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR"; #if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000 case NPP_MEMFREE_ERR: return "NPP_MEMFREE_ERR"; case NPP_MEMSET_ERR: return "NPP_MEMSET_ERR"; case NPP_MEMCPY_ERR: return "NPP_MEMCPY_ERROR"; case NPP_MIRROR_FLIP_ERR: return "NPP_MIRROR_FLIP_ERR"; #else case NPP_MEMFREE_ERROR: return "NPP_MEMFREE_ERROR"; case NPP_MEMSET_ERROR: return "NPP_MEMSET_ERROR"; case NPP_MEMCPY_ERROR: return "NPP_MEMCPY_ERROR"; case NPP_MIRROR_FLIP_ERROR: return "NPP_MIRROR_FLIP_ERROR"; #endif case NPP_ALIGNMENT_ERROR: return "NPP_ALIGNMENT_ERROR"; case NPP_STEP_ERROR: return "NPP_STEP_ERROR"; case NPP_SIZE_ERROR: return "NPP_SIZE_ERROR"; case NPP_NULL_POINTER_ERROR: return "NPP_NULL_POINTER_ERROR"; case NPP_CUDA_KERNEL_EXECUTION_ERROR: return "NPP_CUDA_KERNEL_EXECUTION_ERROR"; case NPP_NOT_IMPLEMENTED_ERROR: return "NPP_NOT_IMPLEMENTED_ERROR"; case NPP_ERROR: return "NPP_ERROR"; case NPP_SUCCESS: return "NPP_SUCCESS"; case NPP_WRONG_INTERSECTION_QUAD_WARNING: return "NPP_WRONG_INTERSECTION_QUAD_WARNING"; case NPP_MISALIGNED_DST_ROI_WARNING: return "NPP_MISALIGNED_DST_ROI_WARNING"; case NPP_AFFINE_QUAD_INCORRECT_WARNING: return "NPP_AFFINE_QUAD_INCORRECT_WARNING"; case NPP_DOUBLE_SIZE_WARNING: return "NPP_DOUBLE_SIZE_WARNING"; case NPP_WRONG_INTERSECTION_ROI_WARNING: return "NPP_WRONG_INTERSECTION_ROI_WARNING"; #if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x6000 /* These are 6.0 or higher */ case NPP_LUT_PALETTE_BITSIZE_ERROR: return "NPP_LUT_PALETTE_BITSIZE_ERROR"; case NPP_ZC_MODE_NOT_SUPPORTED_ERROR: return "NPP_ZC_MODE_NOT_SUPPORTED_ERROR"; case NPP_QUALITY_INDEX_ERROR: return "NPP_QUALITY_INDEX_ERROR"; case NPP_CHANNEL_ORDER_ERROR: return "NPP_CHANNEL_ORDER_ERROR"; case NPP_ZERO_MASK_VALUE_ERROR: return "NPP_ZERO_MASK_VALUE_ERROR"; case NPP_NUMBER_OF_CHANNELS_ERROR: return "NPP_NUMBER_OF_CHANNELS_ERROR"; case NPP_COI_ERROR: return "NPP_COI_ERROR"; case NPP_DIVISOR_ERROR: return "NPP_DIVISOR_ERROR"; case NPP_CHANNEL_ERROR: return "NPP_CHANNEL_ERROR"; case NPP_STRIDE_ERROR: return "NPP_STRIDE_ERROR"; case NPP_ANCHOR_ERROR: return "NPP_ANCHOR_ERROR"; case NPP_MASK_SIZE_ERROR: return "NPP_MASK_SIZE_ERROR"; case NPP_MOMENT_00_ZERO_ERROR: return "NPP_MOMENT_00_ZERO_ERROR"; case NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR: return "NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR"; case NPP_THRESHOLD_ERROR: return "NPP_THRESHOLD_ERROR"; case NPP_CONTEXT_MATCH_ERROR: return "NPP_CONTEXT_MATCH_ERROR"; case NPP_FFT_FLAG_ERROR: return "NPP_FFT_FLAG_ERROR"; case NPP_FFT_ORDER_ERROR: return "NPP_FFT_ORDER_ERROR"; case NPP_SCALE_RANGE_ERROR: return "NPP_SCALE_RANGE_ERROR"; case NPP_DATA_TYPE_ERROR: return "NPP_DATA_TYPE_ERROR"; case NPP_OUT_OFF_RANGE_ERROR: return "NPP_OUT_OFF_RANGE_ERROR"; case NPP_DIVIDE_BY_ZERO_ERROR: return "NPP_DIVIDE_BY_ZERO_ERROR"; case NPP_RANGE_ERROR: return "NPP_RANGE_ERROR"; case NPP_NO_MEMORY_ERROR: return "NPP_NO_MEMORY_ERROR"; case NPP_ERROR_RESERVED: return "NPP_ERROR_RESERVED"; case NPP_NO_OPERATION_WARNING: return "NPP_NO_OPERATION_WARNING"; case NPP_DIVIDE_BY_ZERO_WARNING: return "NPP_DIVIDE_BY_ZERO_WARNING"; #endif #if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x7000 /* These are 7.0 or higher */ case NPP_OVERFLOW_ERROR: return "NPP_OVERFLOW_ERROR"; case NPP_CORRUPTED_DATA_ERROR: return "NPP_CORRUPTED_DATA_ERROR"; #endif } return "<unknown>"; } #endif template <typename T> void check(T result, char const *const func, const char *const file, int const line) { } #ifdef __DPCT_HPP__ // This will output the proper CUDA error strings in the event // that a CUDA host call returns an error #define checkCudaErrors(val) check((val), #val, __FILE__, __LINE__) // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError(msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line) { /* DPCT1010:10: SYCL uses exceptions to report errors and does not use the error codes. The call was replaced with 0. You need to rewrite this code. */ dpct::err0 err = 0; } // This will only print the proper error string when calling cudaGetLastError // but not exit program incase error detected. #define printLastCudaError(msg) __printLastCudaError(msg, __FILE__, __LINE__) inline void __printLastCudaError(const char *errorMessage, const char *file, const int line) { /* DPCT1010:12: SYCL uses exceptions to report errors and does not use the error codes. The call was replaced with 0. You need to rewrite this code. */ dpct::err0 err = 0; } #endif #ifndef MAX #define MAX(a, b) (a > b ? a : b) #endif // Float To Int conversion inline int ftoi(float value) { return (value >= 0 ? static_cast<int>(value + 0.5) : static_cast<int>(value - 0.5)); } // Beginning of GPU Architecture definitions inline int _ConvertSMVer2Cores(int major, int minor) { // Defines for GPU Architecture types (using the SM version to determine // the # of cores per SM typedef struct dpct_type_113531 { int SM; // 0xMm (hexidecimal notation), M = SM Major version, // and m = SM minor version int Cores; } sSMtoCores; sSMtoCores nGpuArchCoresPerSM[] = { {0x30, 192}, {0x32, 192}, {0x35, 192}, {0x37, 192}, {0x50, 128}, {0x52, 128}, {0x53, 128}, {0x60, 64}, {0x61, 128}, {0x62, 128}, {0x70, 64}, {0x72, 64}, {0x75, 64}, {0x80, 64}, {0x86, 128}, {0x87, 128}, {0x89, 128}, {0x90, 128}, {-1, -1}}; int index = 0; while (nGpuArchCoresPerSM[index].SM != -1) { if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) { return nGpuArchCoresPerSM[index].Cores; } index++; } // If we don't find the values, we default use the previous one // to run properly printf( "MapSMtoCores for SM %d.%d is undefined." " Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[index - 1].Cores); return nGpuArchCoresPerSM[index - 1].Cores; } inline const char* _ConvertSMVer2ArchName(int major, int minor) { // Defines for GPU Architecture types (using the SM version to determine // the GPU Arch name) typedef struct dpct_type_281558 { int SM; // 0xMm (hexidecimal notation), M = SM Major version, // and m = SM minor version const char* name; } sSMtoArchName; sSMtoArchName nGpuArchNameSM[] = { {0x30, "Kepler"}, {0x32, "Kepler"}, {0x35, "Kepler"}, {0x37, "Kepler"}, {0x50, "Maxwell"}, {0x52, "Maxwell"}, {0x53, "Maxwell"}, {0x60, "Pascal"}, {0x61, "Pascal"}, {0x62, "Pascal"}, {0x70, "Volta"}, {0x72, "Xavier"}, {0x75, "Turing"}, {0x80, "Ampere"}, {0x86, "Ampere"}, {0x87, "Ampere"}, {0x89, "Ada"}, {0x90, "Hopper"}, {-1, "Graphics Device"}}; int index = 0; while (nGpuArchNameSM[index].SM != -1) { if (nGpuArchNameSM[index].SM == ((major << 4) + minor)) { return nGpuArchNameSM[index].name; } index++; } // If we don't find the values, we default use the previous one // to run properly printf( "MapSMtoArchName for SM %d.%d is undefined." " Default to use %s\n", major, minor, nGpuArchNameSM[index - 1].name); return nGpuArchNameSM[index - 1].name; } // end of GPU Architecture definitions #ifdef __DPCT_HPP__ // General GPU Device CUDA Initialization inline int gpuDeviceInit(int devID) { int device_count; checkCudaErrors(DPCT_CHECK_ERROR( device_count = dpct::dev_mgr::instance().device_count())); if (device_count == 0) { fprintf(stderr, "gpuDeviceInit() CUDA error: " "no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } if (devID < 0) { devID = 0; } if (devID > device_count - 1) { fprintf(stderr, "\n"); fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", device_count); fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid" " GPU device. <<\n", devID); fprintf(stderr, "\n"); return -devID; } int computeMode = -1, major = 0, minor = 0; /* DPCT1035:14: All SYCL devices can be used by the host to submit tasks. You may need to adjust this code. */ checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1)); checkCudaErrors(DPCT_CHECK_ERROR( major = dpct::dev_mgr::instance().get_device(devID).get_major_version())); checkCudaErrors(DPCT_CHECK_ERROR( minor = dpct::dev_mgr::instance().get_device(devID).get_minor_version())); /* DPCT1035:15: All SYCL devices can be used by the host to submit tasks. You may need to adjust this code. */ if (computeMode == 0) { fprintf(stderr, "Error: device is running in <Compute Mode " "Prohibited>, no threads can use cudaSetDevice().\n"); return -1; } if (major < 1) { fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n"); exit(EXIT_FAILURE); } /* DPCT1093:16: The "devID" device may be not the one intended for use. Adjust the selected device if needed. */ checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID))); printf("gpuDeviceInit() CUDA Device [%d]: \"%s\n", devID, _ConvertSMVer2ArchName(major, minor)); return devID; } // This function returns the best GPU (with maximum GFLOPS) inline int gpuGetMaxGflopsDeviceId() try { int current_device = 0, sm_per_multiproc = 0; int max_perf_device = 0; int device_count = 0; int devices_prohibited = 0; uint64_t max_compute_perf = 0; checkCudaErrors(DPCT_CHECK_ERROR( device_count = dpct::dev_mgr::instance().device_count())); if (device_count == 0) { fprintf(stderr, "gpuGetMaxGflopsDeviceId() CUDA error:" " no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } // Find the best CUDA capable GPU device current_device = 0; while (current_device < device_count) { int computeMode = -1, major = 0, minor = 0; /* DPCT1035:17: All SYCL devices can be used by the host to submit tasks. You may need to adjust this code. */ checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1)); checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance() .get_device(current_device) .get_major_version())); checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance() .get_device(current_device) .get_minor_version())); // If this GPU is not running on Compute Mode prohibited, // then we can add it to the list /* DPCT1035:18: All SYCL devices can be used by the host to submit tasks. You may need to adjust this code. */ if (computeMode != 0) { if (major == 9999 && minor == 9999) { sm_per_multiproc = 1; } else { sm_per_multiproc = _ConvertSMVer2Cores(major, minor); } int multiProcessorCount = 0, clockRate = 0; checkCudaErrors( DPCT_CHECK_ERROR(multiProcessorCount = dpct::dev_mgr::instance() .get_device(current_device) .get_max_compute_units())); dpct::err0 result = DPCT_CHECK_ERROR(clockRate = dpct::dev_mgr::instance() .get_device(current_device) .get_max_clock_frequency()); uint64_t compute_perf = (uint64_t)multiProcessorCount * sm_per_multiproc * clockRate; if (compute_perf > max_compute_perf) { max_compute_perf = compute_perf; max_perf_device = current_device; } } else { devices_prohibited++; } ++current_device; } if (devices_prohibited == device_count) { fprintf(stderr, "gpuGetMaxGflopsDeviceId() CUDA error:" " all devices have compute mode prohibited.\n"); exit(EXIT_FAILURE); } return max_perf_device; } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; std::exit(1); } // Initialization code to find the best CUDA Device inline int findCudaDevice(int argc, const char **argv) { int devID = 0; // If the command-line has a device number specified, use it if (checkCmdLineFlag(argc, argv, "device")) { devID = getCmdLineArgumentInt(argc, argv, "device="); if (devID < 0) { printf("Invalid command line parameter\n "); exit(EXIT_FAILURE); } else { devID = gpuDeviceInit(devID); if (devID < 0) { printf("exiting...\n"); exit(EXIT_FAILURE); } } } else { // Otherwise pick the device with highest Gflops/s devID = gpuGetMaxGflopsDeviceId(); /* DPCT1093:19: The "devID" device may be not the one intended for use. Adjust the selected device if needed. */ checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID))); int major = 0, minor = 0; checkCudaErrors(DPCT_CHECK_ERROR( major = dpct::dev_mgr::instance().get_device(devID).get_major_version())); checkCudaErrors(DPCT_CHECK_ERROR( minor = dpct::dev_mgr::instance().get_device(devID).get_minor_version())); printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, _ConvertSMVer2ArchName(major, minor), major, minor); } return devID; } inline int findIntegratedGPU() { int current_device = 0; int device_count = 0; int devices_prohibited = 0; checkCudaErrors(DPCT_CHECK_ERROR( device_count = dpct::dev_mgr::instance().device_count())); if (device_count == 0) { fprintf(stderr, "CUDA error: no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } // Find the integrated GPU which is compute capable while (current_device < device_count) { int computeMode = -1, integrated = -1; /* DPCT1035:20: All SYCL devices can be used by the host to submit tasks. You may need to adjust this code. */ checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1)); checkCudaErrors( DPCT_CHECK_ERROR(integrated = dpct::dev_mgr::instance() .get_device(current_device) .get_integrated())); // If GPU is integrated and is not running on Compute Mode prohibited, // then cuda can map to GLES resource /* DPCT1035:21: All SYCL devices can be used by the host to submit tasks. You may need to adjust this code. */ if (integrated && (computeMode != 0)) { /* DPCT1093:22: The "current_device" device may be not the one intended for use. Adjust the selected device if needed. */ checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(current_device))); int major = 0, minor = 0; checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance() .get_device(current_device) .get_major_version())); checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance() .get_device(current_device) .get_minor_version())); printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", current_device, _ConvertSMVer2ArchName(major, minor), major, minor); return current_device; } else { devices_prohibited++; } current_device++; } if (devices_prohibited == device_count) { fprintf(stderr, "CUDA error:" " No GLES-CUDA Interop capable GPU found.\n"); exit(EXIT_FAILURE); } return -1; } // General check for CUDA GPU SM Capabilities inline bool checkCudaCapabilities(int major_version, int minor_version) { int dev; int major = 0, minor = 0; checkCudaErrors(dev = dpct::dev_mgr::instance().current_device_id()); checkCudaErrors(DPCT_CHECK_ERROR( major = dpct::dev_mgr::instance().get_device(dev).get_major_version())); checkCudaErrors(DPCT_CHECK_ERROR( minor = dpct::dev_mgr::instance().get_device(dev).get_minor_version())); if ((major > major_version) || (major == major_version && minor >= minor_version)) { printf(" Device %d: <%16s >, Compute SM %d.%d detected\n", dev, _ConvertSMVer2ArchName(major, minor), major, minor); return true; } else { printf( " No GPU device was found that can support " "CUDA compute capability %d.%d.\n", major_version, minor_version); return false; } } #endif // end of CUDA Helper Functions #endif // COMMON_HELPER_CUDA_H_
h
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/taskflow.hpp
#pragma once #include "core/executor.hpp" #include "algorithm/critical.hpp" #include "algorithm/for_each.hpp" /** @dir taskflow @brief root taskflow include dir */ /** @dir taskflow/core @brief taskflow core include dir */ /** @dir taskflow/algorithm @brief taskflow algorithms include dir */ /** @dir taskflow/cuda @brief taskflow CUDA include dir */ /** @file taskflow/taskflow.hpp @brief main taskflow include file */ // TF_VERSION % 100 is the patch level // TF_VERSION / 100 % 1000 is the minor version // TF_VERSION / 100000 is the major version // current version: 3.5.0 #define TF_VERSION 300500 #define TF_MAJOR_VERSION TF_VERSION/100000 #define TF_MINOR_VERSION TF_VERSION/100%1000 #define TF_PATCH_VERSION TF_VERSION%100 /** @brief taskflow namespace */ namespace tf { /** @private */ namespace detail { } /** @brief queries the version information in a string format @c major.minor.patch Release notes are available here: https://taskflow.github.io/taskflow/Releases.html */ constexpr const char* version() { return "3.5.0"; } } // end of namespace tf -----------------------------------------------------
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/dsl/meta_macro.hpp
// 2020/08/30 - Created by netcan: https://github.com/netcan // ref https://github.com/Erlkoenig90/map-macro/ #pragma once #ifdef _MSC_VER #define TF_EMPTY() #define TF_GET_ARG_COUNT_(...) \ TF_PASTE(TF_GET_ARG_COUNT_I(__VA_ARGS__, 64, 63, 62, 61, 60, 59, 58, 57, 56, \ 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, \ 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, \ 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, \ 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, \ 6, 5, 4, 3, 2, 1, 0, ), \ TF_EMPTY()) #else #define TF_GET_ARG_COUNT_(...) \ TF_GET_ARG_COUNT_I(__VA_ARGS__, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, \ 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, \ 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, \ 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, \ 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, ) #endif #define TF_GET_ARG_COUNT(...) TF_GET_ARG_COUNT_(__dummy__, ##__VA_ARGS__) #define TF_GET_ARG_COUNT_I( \ e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, \ e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, \ e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, \ e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, \ e62, e63, e64, size, ...) \ size #define TF_GET_FIRST(a, ...) a #define TF_GET_SECOND(a, b, ...) b #define TF_CONCATE(x, y) x##y #define TF_PASTE(x, y) TF_CONCATE(x, y) #define TF_EVAL0(...) __VA_ARGS__ #define TF_EVAL1(...) TF_EVAL0(TF_EVAL0(TF_EVAL0(__VA_ARGS__))) #define TF_EVAL2(...) TF_EVAL1(TF_EVAL1(TF_EVAL1(__VA_ARGS__))) #define TF_EVAL3(...) TF_EVAL2(TF_EVAL2(TF_EVAL2(__VA_ARGS__))) #define TF_EVAL4(...) TF_EVAL3(TF_EVAL3(TF_EVAL3(__VA_ARGS__))) #define TF_EVAL5(...) TF_EVAL4(TF_EVAL4(TF_EVAL4(__VA_ARGS__))) #ifdef _MSC_VER // MSVC needs more evaluations #define TF_EVAL6(...) TF_EVAL5(TF_EVAL5(TF_EVAL5(__VA_ARGS__))) #define TF_EVAL(...) TF_EVAL6(TF_EVAL6(__VA_ARGS__)) #else #define TF_EVAL(...) TF_EVAL5(__VA_ARGS__) #endif #define TF_MAP_END(...) #define TF_MAP_OUT #define EMPTY() #define DEFER(id) id EMPTY() #define TF_MAP_GET_END2() 0, TF_MAP_END #define TF_MAP_GET_END1(...) TF_MAP_GET_END2 #define TF_MAP_GET_END(...) TF_MAP_GET_END1 #define TF_MAP_NEXT0(test, next, ...) next TF_MAP_OUT #define TF_MAP_NEXT1(test, next) DEFER(TF_MAP_NEXT0)(test, next, 0) #define TF_MAP_NEXT(test, next) TF_MAP_NEXT1(TF_MAP_GET_END test, next) #define TF_MAP0(f, x, peek, ...) \ f(x) DEFER(TF_MAP_NEXT(peek, TF_MAP1))(f, peek, __VA_ARGS__) #define TF_MAP1(f, x, peek, ...) \ f(x) DEFER(TF_MAP_NEXT(peek, TF_MAP0))(f, peek, __VA_ARGS__) #define TF_MAP(f, ...) \ TF_EVAL(TF_MAP1(f, __VA_ARGS__, ()()(), ()()(), ()()(), 0))
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/dsl/type_list.hpp
// 2020/08/28 - Created by netcan: https://github.com/netcan #pragma once #include <cstddef> namespace tf { namespace dsl { template <typename...> using void_t = void; template <typename... Ts> struct TypeList { using type = TypeList<Ts...>; static constexpr size_t size = 0; template <typename... T> struct append { using type = TypeList<T...>; }; template <typename... T> using appendTo = typename append<T...>::type; template <typename T> using prepend = typename TypeList<T>::type; template <template <typename...> class T> using exportTo = T<Ts...>; }; template <typename Head, typename... Tails> struct TypeList<Head, Tails...> { using type = TypeList<Head, Tails...>; using head = Head; using tails = TypeList<Tails...>; static constexpr size_t size = sizeof...(Tails) + 1; template <typename... Ts> struct append { using type = TypeList<Head, Tails..., Ts...>; }; template <typename... Ts> using appendTo = typename append<Ts...>::type; template <typename T> using prepend = typename TypeList<T, Head, Tails...>::type; template <template <typename...> class T> using exportTo = T<Head, Tails...>; }; template <typename IN> struct IsTypeList { constexpr static bool value = false; }; template <typename IN> constexpr bool IsTypeList_v = IsTypeList<IN>::value; template <typename... Ts> struct IsTypeList<TypeList<Ts...>> { constexpr static bool value = true; }; template <typename... IN> struct Concat; template <typename... IN> using Concat_t = typename Concat<IN...>::type; template <> struct Concat<> { using type = TypeList<>; }; template <typename IN> struct Concat<IN> { using type = IN; }; template <typename IN, typename IN2> struct Concat<IN, IN2> { using type = typename IN2::template exportTo<IN::template append>::type; }; template <typename IN, typename IN2, typename... Rest> struct Concat<IN, IN2, Rest...> { using type = Concat_t<Concat_t<IN, IN2>, Rest...>; }; template <typename IN, typename OUT = TypeList<>, typename = void> struct Flatten { using type = OUT; }; template <typename IN> using Flatten_t = typename Flatten<IN>::type; template <typename IN, typename OUT> struct Flatten<IN, OUT, std::enable_if_t<IsTypeList_v<typename IN::head>>> { using type = typename Flatten<typename IN::tails, Concat_t<OUT, Flatten_t<typename IN::head>>>::type; }; template <typename IN, typename OUT> struct Flatten<IN, OUT, std::enable_if_t<!IsTypeList_v<typename IN::head>>> { using type = typename Flatten< typename IN::tails, typename OUT::template appendTo<typename IN::head>>::type; }; template <typename IN, template <typename> class F> struct Map { using type = TypeList<>; }; template <typename IN, template <typename> class F> using Map_t = typename Map<IN, F>::type; template <template <typename> class F, typename... Ts> struct Map<TypeList<Ts...>, F> { using type = TypeList<typename F<Ts>::type...>; }; template <typename IN, template <typename> class F, typename OUT = TypeList<>, typename = void> struct Filter { using type = OUT; }; template <typename IN, template <typename> class F> using Filter_t = typename Filter<IN, F>::type; template <typename IN, template <typename> class F, typename OUT> class Filter<IN, F, OUT, void_t<typename IN::head>> { using H = typename IN::head; public: using type = typename std::conditional_t< F<H>::value, Filter<typename IN::tails, F, typename OUT::template appendTo<H>>, Filter<typename IN::tails, F, OUT>>::type; }; template <typename IN, typename = void> struct Unique { using type = IN; }; template <typename IN> using Unique_t = typename Unique<IN>::type; template <typename IN> class Unique<IN, void_t<typename IN::head>> { template <typename T> struct IsDifferR { template <typename R> struct apply { static constexpr bool value = !std::is_same<T, R>::value; }; }; using tails = Unique_t<typename IN::tails>; using eraseHead = Filter_t<tails, IsDifferR<typename IN::head>::template apply>; public: using type = typename eraseHead::template prepend<typename IN::head>; }; } // namespace dsl } // namespace tf
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/dsl/task_dsl.hpp
// 2020/08/28 - Created by netcan: https://github.com/netcan #pragma once #include "../core/flow_builder.hpp" #include "meta_macro.hpp" #include "task_analyzer.hpp" #include "task_trait.hpp" namespace tf { namespace dsl { struct EmptyContext {}; template <typename CONTEXT = EmptyContext, typename... Chains> class TaskDsl { using Links = Unique_t<Flatten_t<TypeList<typename Chain<Chains>::type...>>>; using Analyzer = typename Links::template exportTo<TaskAnalyzer>; using AllTasks = typename Analyzer::AllTasks; template <typename TASK> struct TaskCbWithContext { using type = TaskCb<TASK, CONTEXT>; }; using TasksCB = typename Map_t<AllTasks, TaskCbWithContext>::template exportTo<std::tuple>; using OneToOneLinkSet = typename Analyzer::OneToOneLinkSet; template <typename OneToOneLink> struct OneToOneLinkInstanceType { using type = typename OneToOneLink::template InstanceType<TasksCB>; }; using OneToOneLinkInstances = typename Map_t<OneToOneLinkSet, OneToOneLinkInstanceType>::template exportTo<std::tuple>; public: constexpr TaskDsl(FlowBuilder &flow_builder, const CONTEXT &context = {}) { build_tasks_cb(flow_builder, context, std::make_index_sequence<AllTasks::size>{}); build_links(std::make_index_sequence<OneToOneLinkSet::size>{}); } template <typename TASK> Task &get_task() { constexpr size_t TasksCBSize = std::tuple_size<TasksCB>::value; constexpr size_t TaskIndex = TupleElementByF_v<TasksCB, IsTask<TASK>::template apply>; static_assert(TaskIndex < TasksCBSize, "fatal: not find TaskCb in TasksCB"); return std::get<TaskIndex>(tasksCb_).task_; } private: template <size_t... Is> void build_tasks_cb(FlowBuilder &flow_builder, const CONTEXT &context, std::index_sequence<Is...>) { auto _ = {0, (std::get<Is>(tasksCb_).build(flow_builder, context), 0)...}; (void)_; } template <size_t... Is> void build_links(std::index_sequence<Is...>) { auto _ = {0, (std::get<Is>(links_).build(tasksCb_), 0)...}; (void)_; } private: TasksCB tasksCb_; OneToOneLinkInstances links_; }; template <typename = void, typename... Chains, typename CONTEXT = EmptyContext> constexpr TaskDsl<CONTEXT, Chains...> taskDsl(FlowBuilder &flow_builder, CONTEXT &&context = {}) { return {flow_builder, context}; } } // namespace dsl } // namespace tf /////////////////////////////////////////////////////////////////////////////// #define TF_CHAIN(link) , link->void #define TF_CONTEXT_1(name) tf::dsl::EmptyContext #define TF_CONTEXT_2(name, context) context #define TF_CAPTURE_THIS_1 #define TF_CAPTURE_THIS_2 *this /////////////////////////////////////////////////////////////////////////////// // make_task(TASK_NAME, { return a action lambda }) #define make_task(name, ...) \ struct TF_GET_FIRST name : tf::dsl::TaskSignature, \ TF_PASTE(TF_CONTEXT_, TF_GET_ARG_COUNT name) \ name { \ using _ContextType = TF_PASTE(TF_CONTEXT_, TF_GET_ARG_COUNT name) name; \ TF_GET_FIRST name(const _ContextType &context) : _ContextType(context) {} \ auto operator()() { \ return [TF_PASTE(TF_CAPTURE_THIS_, TF_GET_ARG_COUNT name)] __VA_ARGS__; \ } \ } // some_tasks(A, B, C) means SomeTask #define some_tasks(...) auto (*)(tf::dsl::SomeTask<__VA_ARGS__>) // same as some_tasks #define fork_tasks(...) some_tasks(__VA_ARGS__) // same as some_tasks #define merge_tasks(...) some_tasks(__VA_ARGS__) // task(A) means a task A #define task(Task) auto (*)(Task) // taskbuild(...) build a task dsl graph #define build_taskflow(...) tf::dsl::taskDsl<void TF_MAP(TF_CHAIN, __VA_ARGS__)>
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/dsl/tuple_utils.hpp
// 2020/08/28 - Created by netcan: https://github.com/netcan #pragma once #include <cstddef> #include <tuple> namespace tf { namespace dsl { namespace detail { // get tuple element index by f, if not exists then index >= tuple_size template <typename TUP, template <typename> class F, typename = void> struct TupleElementByF { constexpr static size_t Index = 0; }; template <template <typename> class F, typename H, typename... Ts> struct TupleElementByF<std::tuple<H, Ts...>, F, std::enable_if_t<F<H>::value>> { constexpr static size_t Index = 0; }; template <template <typename> class F, typename H, typename... Ts> struct TupleElementByF<std::tuple<H, Ts...>, F, std::enable_if_t<!F<H>::value>> { constexpr static size_t Index = 1 + TupleElementByF<std::tuple<Ts...>, F>::Index; }; template <typename T, typename TUP, size_t... Is> constexpr inline T AggregationByTupImpl(TUP &&tup, std::index_sequence<Is...>) { return T{std::get<Is>(tup)...}; } } // namespace detail template <typename TUP, template <typename> class F> constexpr size_t TupleElementByF_v = detail::TupleElementByF<TUP, F>::Index; template <typename T, typename TUP> constexpr inline T AggregationByTup(TUP &&tup) { return detail::AggregationByTupImpl<T>( std::forward<TUP>(tup), std::make_index_sequence<std::tuple_size<std::decay_t<TUP>>::size>{}); } } // namespace dsl } // namespace tf
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/dsl/task_trait.hpp
// 2020/08/28 - Created by netcan: https://github.com/netcan #pragma once #include "../core/flow_builder.hpp" #include "../core/task.hpp" #include "type_list.hpp" #include <type_traits> namespace tf { namespace dsl { struct TaskSignature {}; template <typename TASK, typename CONTEXT> struct TaskCb { using TaskType = TASK; void build(FlowBuilder &build, const CONTEXT &context) { task_ = build.emplace(TaskType{context}()); } Task task_; }; template <typename TASK> struct IsTask { template <typename TaskCb> struct apply { constexpr static bool value = std::is_same<typename TaskCb::TaskType, TASK>::value; }; }; template <typename TASK, typename = void> struct TaskTrait; template <typename... TASK> struct SomeTask { using TaskList = Unique_t<Flatten_t<TypeList<typename TaskTrait<TASK>::TaskList...>>>; }; // a task self template <typename TASK> struct TaskTrait< TASK, std::enable_if_t<std::is_base_of<TaskSignature, TASK>::value>> { using TaskList = TypeList<TASK>; }; template <typename... TASK> struct TaskTrait<SomeTask<TASK...>> { using TaskList = typename SomeTask<TASK...>::TaskList; }; } // namespace dsl } // namespace tf
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/dsl/dsl.hpp
// TaskflowDSL is an experimental project that leverages C++17 to // provide a dedicated interface for expressive taskflow programming // // Created by netcan: https://github.com/netcan #pragma once #include "dsl/task_dsl.hpp" namespace tf { } // end of namespace tf -----------------------------------------------------
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/dsl/task_analyzer.hpp
// 2020/08/28 - Created by netcan: https://github.com/netcan #pragma once #include "connection.hpp" #include "type_list.hpp" #include <type_traits> namespace tf { namespace dsl { template <typename... Links> class TaskAnalyzer { template <typename FROMs, typename TOs, typename = void> struct BuildOneToOneLink; template <typename... Fs, typename Ts> struct BuildOneToOneLink<TypeList<Fs...>, Ts> { using type = Concat_t<typename BuildOneToOneLink<Fs, Ts>::type...>; }; template <typename F, typename... Ts> struct BuildOneToOneLink<F, TypeList<Ts...>, std::enable_if_t<!IsTypeList_v<F>>> { using type = TypeList<OneToOneLink<F, Ts>...>; }; template <typename Link> class OneToOneLinkSetF { using FromTaskList = typename Link::FromTaskList; using ToTaskList = typename Link::ToTaskList; public: using type = typename BuildOneToOneLink<FromTaskList, ToTaskList>::type; }; public: using AllTasks = Unique_t< Concat_t<typename Links::FromTaskList..., typename Links::ToTaskList...>>; using OneToOneLinkSet = Unique_t<Flatten_t<Map_t<TypeList<Links...>, OneToOneLinkSetF>>>; }; } // namespace dsl } // namespace tf
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/dsl/connection.hpp
// 2020/08/28 - Created by netcan: https://github.com/netcan #pragma once #include "../core/flow_builder.hpp" #include "task_trait.hpp" #include "tuple_utils.hpp" #include "type_list.hpp" namespace tf { namespace dsl { template <typename F, typename T> class Connection { using FROMs = typename TaskTrait<F>::TaskList; using TOs = typename TaskTrait<T>::TaskList; public: using FromTaskList = Unique_t<Flatten_t<FROMs>>; using ToTaskList = Unique_t<Flatten_t<TOs>>; }; template <typename T, typename OUT = TypeList<>> struct Chain; template <typename F, typename OUT> struct Chain<auto (*)(F)->void, OUT> { using From = F; using type = OUT; }; template <typename F, typename T, typename OUT> struct Chain<auto (*)(F)->T, OUT> { private: using To = typename Chain<T, OUT>::From; public: using From = F; using type = typename Chain< T, typename OUT::template appendTo<Connection<From, To>>>::type; }; template <typename FROM, typename TO> struct OneToOneLink { template <typename TasksCB> struct InstanceType { constexpr void build(TasksCB &tasksCb) { constexpr size_t TasksCBSize = std::tuple_size<TasksCB>::value; constexpr size_t FromTaskIndex = TupleElementByF_v<TasksCB, IsTask<FROM>::template apply>; constexpr size_t ToTaskIndex = TupleElementByF_v<TasksCB, IsTask<TO>::template apply>; static_assert(FromTaskIndex < TasksCBSize && ToTaskIndex < TasksCBSize, "fatal: not find TaskCb in TasksCB"); std::get<FromTaskIndex>(tasksCb).task_.precede( std::get<ToTaskIndex>(tasksCb).task_); } }; }; } // namespace dsl }; // namespace tf
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/tsq.hpp
#pragma once #include "../utility/macros.hpp" #include "../utility/traits.hpp" /** @file tsq.hpp @brief task queue include file */ namespace tf { // ---------------------------------------------------------------------------- // Task Types // ---------------------------------------------------------------------------- /** @enum TaskPriority @brief enumeration of all task priority values A priority is an enumerated value of type @c unsigned. Currently, %Taskflow defines three priority levels, @c HIGH, @c NORMAL, and @c LOW, starting from 0, 1, to 2. That is, the lower the value, the higher the priority. */ enum class TaskPriority : unsigned { /** @brief value of the highest priority (i.e., 0) */ HIGH = 0, /** @brief value of the normal priority (i.e., 1) */ NORMAL = 1, /** @brief value of the lowest priority (i.e., 2) */ LOW = 2, /** @brief conventional value for iterating priority values */ MAX = 3 }; // ---------------------------------------------------------------------------- // Task Queue // ---------------------------------------------------------------------------- /** @class: TaskQueue @tparam T data type (must be a pointer type) @tparam MAX_PRIORITY maximum level of the priority @brief class to create a lock-free unbounded single-producer multiple-consumer queue This class implements the work-stealing queue described in the paper, <a href="https://www.di.ens.fr/~zappa/readings/ppopp13.pdf">Correct and Efficient Work-Stealing for Weak Memory Models</a>, and extends it to include priority. Only the queue owner can perform pop and push operations, while others can steal data from the queue simultaneously. Priority starts from zero (highest priority) to the template value `MAX_PRIORITY-1` (lowest priority). All operations are associated with priority values to indicate the corresponding queues to which an operation is applied. The default template value, `MAX_PRIORITY`, is `TaskPriority::MAX` which applies only three priority levels to the task queue. @code{.cpp} auto [A, B, C, D, E] = taskflow.emplace( [] () { }, [&] () { std::cout << "Task B: " << counter++ << '\n'; // 0 }, [&] () { std::cout << "Task C: " << counter++ << '\n'; // 2 }, [&] () { std::cout << "Task D: " << counter++ << '\n'; // 1 }, [] () { } ); A.precede(B, C, D); E.succeed(B, C, D); B.priority(tf::TaskPriority::HIGH); C.priority(tf::TaskPriority::LOW); D.priority(tf::TaskPriority::NORMAL); executor.run(taskflow).wait(); @endcode In the above example, we have a task graph of five tasks, @c A, @c B, @c C, @c D, and @c E, in which @c B, @c C, and @c D can run in simultaneously when @c A finishes. Since we only uses one worker thread in the executor, we can deterministically run @c B first, then @c D, and @c C in order of their priority values. The output is as follows: @code{.shell-session} Task B: 0 Task D: 1 Task C: 2 @endcode */ template <typename T, unsigned MAX_PRIORITY = static_cast<unsigned>(TaskPriority::MAX)> class TaskQueue { static_assert(MAX_PRIORITY > 0, "MAX_PRIORITY must be at least one"); static_assert(std::is_pointer_v<T>, "T must be a pointer type"); struct Array { int64_t C; int64_t M; std::atomic<T>* S; explicit Array(int64_t c) : C {c}, M {c-1}, S {new std::atomic<T>[static_cast<size_t>(C)]} { } ~Array() { delete [] S; } int64_t capacity() const noexcept { return C; } void push(int64_t i, T o) noexcept { S[i & M].store(o, std::memory_order_relaxed); } T pop(int64_t i) noexcept { return S[i & M].load(std::memory_order_relaxed); } Array* resize(int64_t b, int64_t t) { Array* ptr = new Array {2*C}; for(int64_t i=t; i!=b; ++i) { ptr->push(i, pop(i)); } return ptr; } }; // Doubling the alignment by 2 seems to generate the most // decent performance. CachelineAligned<std::atomic<int64_t>> _top[MAX_PRIORITY]; CachelineAligned<std::atomic<int64_t>> _bottom[MAX_PRIORITY]; std::atomic<Array*> _array[MAX_PRIORITY]; std::vector<Array*> _garbage[MAX_PRIORITY]; //std::atomic<T> _cache {nullptr}; public: /** @brief constructs the queue with a given capacity @param capacity the capacity of the queue (must be power of 2) */ explicit TaskQueue(int64_t capacity = 512); /** @brief destructs the queue */ ~TaskQueue(); /** @brief queries if the queue is empty at the time of this call */ bool empty() const noexcept; /** @brief queries if the queue is empty at a specific priority value */ bool empty(unsigned priority) const noexcept; /** @brief queries the number of items at the time of this call */ size_t size() const noexcept; /** @brief queries the number of items with the given priority at the time of this call */ size_t size(unsigned priority) const noexcept; /** @brief queries the capacity of the queue */ int64_t capacity() const noexcept; /** @brief queries the capacity of the queue at a specific priority value */ int64_t capacity(unsigned priority) const noexcept; /** @brief inserts an item to the queue @param item the item to push to the queue @param priority priority value of the item to push (default = 0) Only the owner thread can insert an item to the queue. The operation can trigger the queue to resize its capacity if more space is required. */ TF_FORCE_INLINE void push(T item, unsigned priority); /** @brief pops out an item from the queue Only the owner thread can pop out an item from the queue. The return can be a @c nullptr if this operation failed (empty queue). */ T pop(); /** @brief pops out an item with a specific priority value from the queue @param priority priority of the item to pop Only the owner thread can pop out an item from the queue. The return can be a @c nullptr if this operation failed (empty queue). */ TF_FORCE_INLINE T pop(unsigned priority); /** @brief steals an item from the queue Any threads can try to steal an item from the queue. The return can be a @c nullptr if this operation failed (not necessary empty). */ T steal(); /** @brief steals an item with a specific priority value from the queue @param priority priority of the item to steal Any threads can try to steal an item from the queue. The return can be a @c nullptr if this operation failed (not necessary empty). */ T steal(unsigned priority); private: TF_NO_INLINE Array* resize_array(Array* a, unsigned p, std::int64_t b, std::int64_t t); }; // Constructor template <typename T, unsigned MAX_PRIORITY> TaskQueue<T, MAX_PRIORITY>::TaskQueue(int64_t c) { assert(c && (!(c & (c-1)))); unroll<0, MAX_PRIORITY, 1>([&](auto p){ _top[p].data.store(0, std::memory_order_relaxed); _bottom[p].data.store(0, std::memory_order_relaxed); _array[p].store(new Array{c}, std::memory_order_relaxed); _garbage[p].reserve(32); }); } // Destructor template <typename T, unsigned MAX_PRIORITY> TaskQueue<T, MAX_PRIORITY>::~TaskQueue() { unroll<0, MAX_PRIORITY, 1>([&](auto p){ for(auto a : _garbage[p]) { delete a; } delete _array[p].load(); }); } // Function: empty template <typename T, unsigned MAX_PRIORITY> bool TaskQueue<T, MAX_PRIORITY>::empty() const noexcept { for(unsigned i=0; i<MAX_PRIORITY; i++) { if(!empty(i)) { return false; } } return true; } // Function: empty template <typename T, unsigned MAX_PRIORITY> bool TaskQueue<T, MAX_PRIORITY>::empty(unsigned p) const noexcept { int64_t b = _bottom[p].data.load(std::memory_order_relaxed); int64_t t = _top[p].data.load(std::memory_order_relaxed); return (b <= t); } // Function: size template <typename T, unsigned MAX_PRIORITY> size_t TaskQueue<T, MAX_PRIORITY>::size() const noexcept { size_t s; unroll<0, MAX_PRIORITY, 1>([&](auto i) { s = i ? size(i) + s : size(i); }); return s; } // Function: size template <typename T, unsigned MAX_PRIORITY> size_t TaskQueue<T, MAX_PRIORITY>::size(unsigned p) const noexcept { int64_t b = _bottom[p].data.load(std::memory_order_relaxed); int64_t t = _top[p].data.load(std::memory_order_relaxed); return static_cast<size_t>(b >= t ? b - t : 0); } // Function: push template <typename T, unsigned MAX_PRIORITY> TF_FORCE_INLINE void TaskQueue<T, MAX_PRIORITY>::push(T o, unsigned p) { int64_t b = _bottom[p].data.load(std::memory_order_relaxed); int64_t t = _top[p].data.load(std::memory_order_acquire); Array* a = _array[p].load(std::memory_order_relaxed); // queue is full if(a->capacity() - 1 < (b - t)) { a = resize_array(a, p, b, t); } a->push(b, o); std::atomic_thread_fence(std::memory_order_release); _bottom[p].data.store(b + 1, std::memory_order_relaxed); } // Function: pop template <typename T, unsigned MAX_PRIORITY> T TaskQueue<T, MAX_PRIORITY>::pop() { for(unsigned i=0; i<MAX_PRIORITY; i++) { if(auto t = pop(i); t) { return t; } } return nullptr; } // Function: pop template <typename T, unsigned MAX_PRIORITY> TF_FORCE_INLINE T TaskQueue<T, MAX_PRIORITY>::pop(unsigned p) { int64_t b = _bottom[p].data.load(std::memory_order_relaxed) - 1; Array* a = _array[p].load(std::memory_order_relaxed); _bottom[p].data.store(b, std::memory_order_relaxed); std::atomic_thread_fence(std::memory_order_seq_cst); int64_t t = _top[p].data.load(std::memory_order_relaxed); T item {nullptr}; if(t <= b) { item = a->pop(b); if(t == b) { // the last item just got stolen if(!_top[p].data.compare_exchange_strong(t, t+1, std::memory_order_seq_cst, std::memory_order_relaxed)) { item = nullptr; } _bottom[p].data.store(b + 1, std::memory_order_relaxed); } } else { _bottom[p].data.store(b + 1, std::memory_order_relaxed); } return item; } // Function: steal template <typename T, unsigned MAX_PRIORITY> T TaskQueue<T, MAX_PRIORITY>::steal() { for(unsigned i=0; i<MAX_PRIORITY; i++) { if(auto t = steal(i); t) { return t; } } return nullptr; } // Function: steal template <typename T, unsigned MAX_PRIORITY> T TaskQueue<T, MAX_PRIORITY>::steal(unsigned p) { int64_t t = _top[p].data.load(std::memory_order_acquire); std::atomic_thread_fence(std::memory_order_seq_cst); int64_t b = _bottom[p].data.load(std::memory_order_acquire); T item {nullptr}; if(t < b) { Array* a = _array[p].load(std::memory_order_consume); item = a->pop(t); if(!_top[p].data.compare_exchange_strong(t, t+1, std::memory_order_seq_cst, std::memory_order_relaxed)) { return nullptr; } } return item; } // Function: capacity template <typename T, unsigned MAX_PRIORITY> int64_t TaskQueue<T, MAX_PRIORITY>::capacity() const noexcept { size_t s; unroll<0, MAX_PRIORITY, 1>([&](auto i) { s = i ? capacity(i) + s : capacity(i); }); return s; } // Function: capacity template <typename T, unsigned MAX_PRIORITY> int64_t TaskQueue<T, MAX_PRIORITY>::capacity(unsigned p) const noexcept { return _array[p].load(std::memory_order_relaxed)->capacity(); } template <typename T, unsigned MAX_PRIORITY> TF_NO_INLINE typename TaskQueue<T, MAX_PRIORITY>::Array* TaskQueue<T, MAX_PRIORITY>::resize_array(Array* a, unsigned p, std::int64_t b, std::int64_t t) { Array* tmp = a->resize(b, t); _garbage[p].push_back(a); std::swap(a, tmp); _array[p].store(a, std::memory_order_release); // Note: the original paper using relaxed causes t-san to complain //_array.store(a, std::memory_order_relaxed); return a; } } // end of namespace tf -----------------------------------------------------
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/graph.hpp
#pragma once #include "../utility/traits.hpp" #include "../utility/iterator.hpp" #include "../utility/object_pool.hpp" #include "../utility/os.hpp" #include "../utility/math.hpp" #include "../utility/small_vector.hpp" #include "../utility/serializer.hpp" #include "error.hpp" #include "declarations.hpp" #include "semaphore.hpp" #include "environment.hpp" #include "topology.hpp" #include "tsq.hpp" /** @file graph.hpp @brief graph include file */ namespace tf { // ---------------------------------------------------------------------------- // Class: CustomGraphBase // ---------------------------------------------------------------------------- /** @private */ class CustomGraphBase { public: virtual void dump(std::ostream&, const void*, const std::string&) const = 0; virtual ~CustomGraphBase() = default; }; // ---------------------------------------------------------------------------- // Class: Graph // ---------------------------------------------------------------------------- /** @class Graph @brief class to create a graph object A graph is the ultimate storage for a task dependency graph and is the main gateway to interact with an executor. A graph manages a set of nodes in a global object pool that animates and recycles node objects efficiently without going through repetitive and expensive memory allocations and deallocations. This class is mainly used for creating an opaque graph object in a custom class to interact with the executor through taskflow composition. A graph object is move-only. */ class Graph { friend class Node; friend class FlowBuilder; friend class Subflow; friend class Taskflow; friend class Executor; public: /** @brief constructs a graph object */ Graph() = default; /** @brief disabled copy constructor */ Graph(const Graph&) = delete; /** @brief constructs a graph using move semantics */ Graph(Graph&&); /** @brief destructs the graph object */ ~Graph(); /** @brief disabled copy assignment operator */ Graph& operator = (const Graph&) = delete; /** @brief assigns a graph using move semantics */ Graph& operator = (Graph&&); /** @brief queries if the graph is empty */ bool empty() const; /** @brief queries the number of nodes in the graph */ size_t size() const; /** @brief clears the graph */ void clear(); private: std::vector<Node*> _nodes; void _clear(); void _clear_detached(); void _merge(Graph&&); void _erase(Node*); template <typename ...ArgsT> Node* _emplace_back(ArgsT&&... args); Node* _emplace_back(); }; // ---------------------------------------------------------------------------- /** @class Runtime @brief class to create a runtime object used by a runtime task A runtime object is used by a runtime task for users to interact with the scheduling runtime, such as scheduling an active task and spawning a subflow. @code{.cpp} taskflow.emplace([](tf::Runtime& rt){ rt.run([](tf::Subflow& sf){ tf::Task A = sf.emplace([](){}); tf::Task B = sf.emplace([](){}); A.precede(B); }); }); @endcode A runtime task is associated with an executor and a worker that runs the runtime task. */ class Runtime { friend class Executor; public: /** @brief obtains the running executor The running executor of a runtime task is the executor that runs the parent taskflow of that runtime task. @code{.cpp} tf::Executor executor; tf::Taskflow taskflow; taskflow.emplace([&](tf::Runtime& rt){ assert(&(rt.executor()) == &executor); }); executor.run(taskflow).wait(); @endcode */ Executor& executor(); /** @brief schedules an active task immediately to the worker's queue @param task the given active task to schedule immediately This member function immediately schedules an active task to the task queue of the associated worker in the runtime task. An active task is a task in a running taskflow. The task may or may not be running, and scheduling that task will immediately put the task into the task queue of the worker that is running the runtime task. Consider the following example: @code{.cpp} tf::Task A, B, C, D; std::tie(A, B, C, D) = taskflow.emplace( [] () { return 0; }, [&C] (tf::Runtime& rt) { // C must be captured by reference std::cout << "B\n"; rt.schedule(C); }, [] () { std::cout << "C\n"; }, [] () { std::cout << "D\n"; } ); A.precede(B, C, D); executor.run(taskflow).wait(); @endcode The executor will first run the condition task @c A which returns @c 0 to inform the scheduler to go to the runtime task @c B. During the execution of @c B, it directly schedules task @c C without going through the normal taskflow graph scheduling process. At this moment, task @c C is active because its parent taskflow is running. When the taskflow finishes, we will see both @c B and @c C in the output. */ void schedule(Task task); /** @brief runs the given target and waits until it completes A target can be (1) a callable to spawn a subflow or (2) a composable target with `tf::Graph& T::graph()` defined @code{.cpp} // complete a subflow synchronously taskflow.emplace([](tf::Runtime& rt){ rt.run_and_wait([](tf::Subflow& sf){ tf::Task A = sf.emplace([](){}); tf::Task B = sf.emplace([](){}); }); }); // complete a custom graph synchronously tf::Taskflow taskflow; taskflow.emplace([](){}); taskflow.emplace([&](tf::Runtime& rt){ rt.run_and_wait(taskflow); }); @endcode */ template <typename T> void run_and_wait(T&& target); private: explicit Runtime(Executor&, Worker&, Node*); Executor& _executor; Worker& _worker; Node* _parent; }; // constructor inline Runtime::Runtime(Executor& e, Worker& w, Node* p) : _executor{e}, _worker {w}, _parent {p}{ } // Function: executor inline Executor& Runtime::executor() { return _executor; } // ---------------------------------------------------------------------------- // Node // ---------------------------------------------------------------------------- /** @private */ class Node { friend class Graph; friend class Task; friend class TaskView; friend class Taskflow; friend class Executor; friend class FlowBuilder; friend class Subflow; friend class Runtime; TF_ENABLE_POOLABLE_ON_THIS; // state bit flag constexpr static int CONDITIONED = 1; constexpr static int DETACHED = 2; constexpr static int ACQUIRED = 4; constexpr static int READY = 8; constexpr static int DEFERRED = 16; // static work handle struct Static { template <typename C> Static(C&&); std::function<void()> work; }; // runtime work handle struct Runtime { template <typename C> Runtime(C&&); std::function<void(tf::Runtime&)> work; }; // dynamic work handle struct Dynamic { template <typename C> Dynamic(C&&); std::function<void(Subflow&)> work; Graph subgraph; }; // condition work handle struct Condition { template <typename C> Condition(C&&); std::function<int()> work; }; // multi-condition work handle struct MultiCondition { template <typename C> MultiCondition(C&&); std::function<SmallVector<int>()> work; }; // module work handle struct Module { template <typename T> Module(T&); Graph& graph; }; // Async work struct Async { template <typename T> Async(T&&, std::shared_ptr<AsyncTopology>); std::function<void(bool)> work; std::shared_ptr<AsyncTopology> topology; }; // Silent async work struct SilentAsync { template <typename C> SilentAsync(C&&); std::function<void()> work; }; // cudaFlow work handle struct cudaFlow { template <typename C, typename G> cudaFlow(C&& c, G&& g); std::function<void(Executor&, Node*)> work; std::unique_ptr<CustomGraphBase> graph; }; // syclFlow work handle struct syclFlow { template <typename C, typename G> syclFlow(C&& c, G&& g); std::function<void(Executor&, Node*)> work; std::unique_ptr<CustomGraphBase> graph; }; using handle_t = std::variant< std::monostate, // placeholder Static, // static tasking Dynamic, // dynamic tasking Condition, // conditional tasking MultiCondition, // multi-conditional tasking Module, // composable tasking Async, // async tasking SilentAsync, // async tasking (no future) cudaFlow, // cudaFlow syclFlow, // syclFlow Runtime // runtime tasking >; struct Semaphores { SmallVector<Semaphore*> to_acquire; SmallVector<Semaphore*> to_release; }; public: // variant index constexpr static auto PLACEHOLDER = get_index_v<std::monostate, handle_t>; constexpr static auto STATIC = get_index_v<Static, handle_t>; constexpr static auto DYNAMIC = get_index_v<Dynamic, handle_t>; constexpr static auto CONDITION = get_index_v<Condition, handle_t>; constexpr static auto MULTI_CONDITION = get_index_v<MultiCondition, handle_t>; constexpr static auto MODULE = get_index_v<Module, handle_t>; constexpr static auto ASYNC = get_index_v<Async, handle_t>; constexpr static auto SILENT_ASYNC = get_index_v<SilentAsync, handle_t>; constexpr static auto CUDAFLOW = get_index_v<cudaFlow, handle_t>; constexpr static auto SYCLFLOW = get_index_v<syclFlow, handle_t>; constexpr static auto RUNTIME = get_index_v<Runtime, handle_t>; template <typename... Args> Node(Args&&... args); ~Node(); size_t num_successors() const; size_t num_dependents() const; size_t num_strong_dependents() const; size_t num_weak_dependents() const; const std::string& name() const; private: std::string _name; unsigned _priority {0}; void* _data {nullptr}; handle_t _handle; SmallVector<Node*> _successors; SmallVector<Node*> _dependents; Topology* _topology {nullptr}; Node* _parent {nullptr}; std::atomic<int> _state {0}; std::atomic<size_t> _join_counter {0}; std::unique_ptr<Semaphores> _semaphores; void _precede(Node*); void _set_up_join_counter(); bool _is_cancelled() const; bool _is_conditioner() const; bool _acquire_all(SmallVector<Node*>&); SmallVector<Node*> _release_all(); }; // ---------------------------------------------------------------------------- // Node Object Pool // ---------------------------------------------------------------------------- /** @private */ inline ObjectPool<Node> node_pool; // ---------------------------------------------------------------------------- // Definition for Node::Static // ---------------------------------------------------------------------------- // Constructor template <typename C> Node::Static::Static(C&& c) : work {std::forward<C>(c)} { } // ---------------------------------------------------------------------------- // Definition for Node::Dynamic // ---------------------------------------------------------------------------- // Constructor template <typename C> Node::Dynamic::Dynamic(C&& c) : work {std::forward<C>(c)} { } // ---------------------------------------------------------------------------- // Definition for Node::Condition // ---------------------------------------------------------------------------- // Constructor template <typename C> Node::Condition::Condition(C&& c) : work {std::forward<C>(c)} { } // ---------------------------------------------------------------------------- // Definition for Node::MultiCondition // ---------------------------------------------------------------------------- // Constructor template <typename C> Node::MultiCondition::MultiCondition(C&& c) : work {std::forward<C>(c)} { } // ---------------------------------------------------------------------------- // Definition for Node::cudaFlow // ---------------------------------------------------------------------------- template <typename C, typename G> Node::cudaFlow::cudaFlow(C&& c, G&& g) : work {std::forward<C>(c)}, graph {std::forward<G>(g)} { } // ---------------------------------------------------------------------------- // Definition for Node::syclFlow // ---------------------------------------------------------------------------- template <typename C, typename G> Node::syclFlow::syclFlow(C&& c, G&& g) : work {std::forward<C>(c)}, graph {std::forward<G>(g)} { } // ---------------------------------------------------------------------------- // Definition for Node::Module // ---------------------------------------------------------------------------- // Constructor template <typename T> inline Node::Module::Module(T& obj) : graph{ obj.graph() } { } // ---------------------------------------------------------------------------- // Definition for Node::Async // ---------------------------------------------------------------------------- // Constructor template <typename C> Node::Async::Async(C&& c, std::shared_ptr<AsyncTopology>tpg) : work {std::forward<C>(c)}, topology {std::move(tpg)} { } // ---------------------------------------------------------------------------- // Definition for Node::SilentAsync // ---------------------------------------------------------------------------- // Constructor template <typename C> Node::SilentAsync::SilentAsync(C&& c) : work {std::forward<C>(c)} { } // ---------------------------------------------------------------------------- // Definition for Node::Runtime // ---------------------------------------------------------------------------- // Constructor template <typename C> Node::Runtime::Runtime(C&& c) : work {std::forward<C>(c)} { } // ---------------------------------------------------------------------------- // Definition for Node // ---------------------------------------------------------------------------- // Constructor template <typename... Args> Node::Node(Args&&... args): _handle{std::forward<Args>(args)...} { } // Destructor inline Node::~Node() { // this is to avoid stack overflow if(_handle.index() == DYNAMIC) { // using std::get_if instead of std::get makes this compatible // with older macOS versions // the result of std::get_if is guaranteed to be non-null // due to the index check above auto& subgraph = std::get_if<Dynamic>(&_handle)->subgraph; std::vector<Node*> nodes; nodes.reserve(subgraph.size()); std::move( subgraph._nodes.begin(), subgraph._nodes.end(), std::back_inserter(nodes) ); subgraph._nodes.clear(); size_t i = 0; while(i < nodes.size()) { if(nodes[i]->_handle.index() == DYNAMIC) { auto& sbg = std::get_if<Dynamic>(&(nodes[i]->_handle))->subgraph; std::move( sbg._nodes.begin(), sbg._nodes.end(), std::back_inserter(nodes) ); sbg._nodes.clear(); } ++i; } //auto& np = Graph::_node_pool(); for(i=0; i<nodes.size(); ++i) { node_pool.recycle(nodes[i]); } } } // Procedure: _precede inline void Node::_precede(Node* v) { _successors.push_back(v); v->_dependents.push_back(this); } // Function: num_successors inline size_t Node::num_successors() const { return _successors.size(); } // Function: dependents inline size_t Node::num_dependents() const { return _dependents.size(); } // Function: num_weak_dependents inline size_t Node::num_weak_dependents() const { size_t n = 0; for(size_t i=0; i<_dependents.size(); i++) { //if(_dependents[i]->_handle.index() == Node::CONDITION) { if(_dependents[i]->_is_conditioner()) { n++; } } return n; } // Function: num_strong_dependents inline size_t Node::num_strong_dependents() const { size_t n = 0; for(size_t i=0; i<_dependents.size(); i++) { //if(_dependents[i]->_handle.index() != Node::CONDITION) { if(!_dependents[i]->_is_conditioner()) { n++; } } return n; } // Function: name inline const std::string& Node::name() const { return _name; } // Function: _is_conditioner inline bool Node::_is_conditioner() const { return _handle.index() == Node::CONDITION || _handle.index() == Node::MULTI_CONDITION; } // Function: _is_cancelled inline bool Node::_is_cancelled() const { if(_handle.index() == Node::ASYNC) { auto h = std::get_if<Node::Async>(&_handle); if(h->topology && h->topology->_is_cancelled.load(std::memory_order_relaxed)) { return true; } // async tasks spawned from subflow does not have topology } return _topology && _topology->_is_cancelled.load(std::memory_order_relaxed); } // Procedure: _set_up_join_counter inline void Node::_set_up_join_counter() { size_t c = 0; for(auto p : _dependents) { //if(p->_handle.index() == Node::CONDITION) { if(p->_is_conditioner()) { _state.fetch_or(Node::CONDITIONED, std::memory_order_relaxed); } else { c++; } } _join_counter.store(c, std::memory_order_release); } // Function: _acquire_all inline bool Node::_acquire_all(SmallVector<Node*>& nodes) { auto& to_acquire = _semaphores->to_acquire; for(size_t i = 0; i < to_acquire.size(); ++i) { if(!to_acquire[i]->_try_acquire_or_wait(this)) { for(size_t j = 1; j <= i; ++j) { auto r = to_acquire[i-j]->_release(); nodes.insert(std::end(nodes), std::begin(r), std::end(r)); } return false; } } return true; } // Function: _release_all inline SmallVector<Node*> Node::_release_all() { auto& to_release = _semaphores->to_release; SmallVector<Node*> nodes; for(const auto& sem : to_release) { auto r = sem->_release(); nodes.insert(std::end(nodes), std::begin(r), std::end(r)); } return nodes; } // ---------------------------------------------------------------------------- // Graph definition // ---------------------------------------------------------------------------- // Destructor inline Graph::~Graph() { _clear(); } // Move constructor inline Graph::Graph(Graph&& other) : _nodes {std::move(other._nodes)} { } // Move assignment inline Graph& Graph::operator = (Graph&& other) { _clear(); _nodes = std::move(other._nodes); return *this; } // Procedure: clear inline void Graph::clear() { _clear(); } // Procedure: clear inline void Graph::_clear() { for(auto node : _nodes) { node_pool.recycle(node); } _nodes.clear(); } // Procedure: clear_detached inline void Graph::_clear_detached() { auto mid = std::partition(_nodes.begin(), _nodes.end(), [] (Node* node) { return !(node->_state.load(std::memory_order_relaxed) & Node::DETACHED); }); for(auto itr = mid; itr != _nodes.end(); ++itr) { node_pool.recycle(*itr); } _nodes.resize(std::distance(_nodes.begin(), mid)); } // Procedure: merge inline void Graph::_merge(Graph&& g) { for(auto n : g._nodes) { _nodes.push_back(n); } g._nodes.clear(); } // Function: erase inline void Graph::_erase(Node* node) { if(auto I = std::find(_nodes.begin(), _nodes.end(), node); I != _nodes.end()) { _nodes.erase(I); node_pool.recycle(node); } } // Function: size inline size_t Graph::size() const { return _nodes.size(); } // Function: empty inline bool Graph::empty() const { return _nodes.empty(); } // Function: emplace_back template <typename ...ArgsT> Node* Graph::_emplace_back(ArgsT&&... args) { _nodes.push_back(node_pool.animate(std::forward<ArgsT>(args)...)); return _nodes.back(); } // Function: emplace_back inline Node* Graph::_emplace_back() { _nodes.push_back(node_pool.animate()); return _nodes.back(); } } // end of namespace tf. ---------------------------------------------------
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/executor.hpp
#pragma once #include "observer.hpp" #include "taskflow.hpp" /** @file executor.hpp @brief executor include file */ namespace tf { // ---------------------------------------------------------------------------- // Executor Definition // ---------------------------------------------------------------------------- /** @class Executor @brief class to create an executor for running a taskflow graph An executor manages a set of worker threads to run one or multiple taskflows using an efficient work-stealing scheduling algorithm. @code{.cpp} // Declare an executor and a taskflow tf::Executor executor; tf::Taskflow taskflow; // Add three tasks into the taskflow tf::Task A = taskflow.emplace([] () { std::cout << "This is TaskA\n"; }); tf::Task B = taskflow.emplace([] () { std::cout << "This is TaskB\n"; }); tf::Task C = taskflow.emplace([] () { std::cout << "This is TaskC\n"; }); // Build precedence between tasks A.precede(B, C); tf::Future<void> fu = executor.run(taskflow); fu.wait(); // block until the execution completes executor.run(taskflow, [](){ std::cout << "end of 1 run"; }).wait(); executor.run_n(taskflow, 4); executor.wait_for_all(); // block until all associated executions finish executor.run_n(taskflow, 4, [](){ std::cout << "end of 4 runs"; }).wait(); executor.run_until(taskflow, [cnt=0] () mutable { return ++cnt == 10; }); @endcode All the @c run methods are @em thread-safe. You can submit multiple taskflows at the same time to an executor from different threads. */ class Executor { friend class FlowBuilder; friend class Subflow; friend class Runtime; public: /** @brief constructs the executor with @c N worker threads @param N number of workers (default std::thread::hardware_concurrency) @param wix worker interface class to alter worker (thread) behaviors The constructor spawns @c N worker threads to run tasks in a work-stealing loop. The number of workers must be greater than zero or an exception will be thrown. By default, the number of worker threads is equal to the maximum hardware concurrency returned by std::thread::hardware_concurrency. Users can alter the worker behavior, such as changing thread affinity, via deriving an instance from tf::WorkerInterface. */ explicit Executor( size_t N = std::thread::hardware_concurrency(), std::shared_ptr<WorkerInterface> wix = nullptr ); /** @brief destructs the executor The destructor calls Executor::wait_for_all to wait for all submitted taskflows to complete and then notifies all worker threads to stop and join these threads. */ ~Executor(); /** @brief runs a taskflow once @param taskflow a tf::Taskflow object @return a tf::Future that holds the result of the execution This member function executes the given taskflow once and returns a tf::Future object that eventually holds the result of the execution. @code{.cpp} tf::Future<void> future = executor.run(taskflow); // do something else future.wait(); @endcode This member function is thread-safe. @attention The executor does not own the given taskflow. It is your responsibility to ensure the taskflow remains alive during its execution. */ tf::Future<void> run(Taskflow& taskflow); /** @brief runs a moved taskflow once @param taskflow a moved tf::Taskflow object @return a tf::Future that holds the result of the execution This member function executes a moved taskflow once and returns a tf::Future object that eventually holds the result of the execution. The executor will take care of the lifetime of the moved taskflow. @code{.cpp} tf::Future<void> future = executor.run(std::move(taskflow)); // do something else future.wait(); @endcode This member function is thread-safe. */ tf::Future<void> run(Taskflow&& taskflow); /** @brief runs a taskflow once and invoke a callback upon completion @param taskflow a tf::Taskflow object @param callable a callable object to be invoked after this run @return a tf::Future that holds the result of the execution This member function executes the given taskflow once and invokes the given callable when the execution completes. This member function returns a tf::Future object that eventually holds the result of the execution. @code{.cpp} tf::Future<void> future = executor.run(taskflow, [](){ std::cout << "done"; }); // do something else future.wait(); @endcode This member function is thread-safe. @attention The executor does not own the given taskflow. It is your responsibility to ensure the taskflow remains alive during its execution. */ template<typename C> tf::Future<void> run(Taskflow& taskflow, C&& callable); /** @brief runs a moved taskflow once and invoke a callback upon completion @param taskflow a moved tf::Taskflow object @param callable a callable object to be invoked after this run @return a tf::Future that holds the result of the execution This member function executes a moved taskflow once and invokes the given callable when the execution completes. This member function returns a tf::Future object that eventually holds the result of the execution. The executor will take care of the lifetime of the moved taskflow. @code{.cpp} tf::Future<void> future = executor.run( std::move(taskflow), [](){ std::cout << "done"; } ); // do something else future.wait(); @endcode This member function is thread-safe. */ template<typename C> tf::Future<void> run(Taskflow&& taskflow, C&& callable); /** @brief runs a taskflow for @c N times @param taskflow a tf::Taskflow object @param N number of runs @return a tf::Future that holds the result of the execution This member function executes the given taskflow @c N times and returns a tf::Future object that eventually holds the result of the execution. @code{.cpp} tf::Future<void> future = executor.run_n(taskflow, 2); // run taskflow 2 times // do something else future.wait(); @endcode This member function is thread-safe. @attention The executor does not own the given taskflow. It is your responsibility to ensure the taskflow remains alive during its execution. */ tf::Future<void> run_n(Taskflow& taskflow, size_t N); /** @brief runs a moved taskflow for @c N times @param taskflow a moved tf::Taskflow object @param N number of runs @return a tf::Future that holds the result of the execution This member function executes a moved taskflow @c N times and returns a tf::Future object that eventually holds the result of the execution. The executor will take care of the lifetime of the moved taskflow. @code{.cpp} tf::Future<void> future = executor.run_n( std::move(taskflow), 2 // run the moved taskflow 2 times ); // do something else future.wait(); @endcode This member function is thread-safe. */ tf::Future<void> run_n(Taskflow&& taskflow, size_t N); /** @brief runs a taskflow for @c N times and then invokes a callback @param taskflow a tf::Taskflow @param N number of runs @param callable a callable object to be invoked after this run @return a tf::Future that holds the result of the execution This member function executes the given taskflow @c N times and invokes the given callable when the execution completes. This member function returns a tf::Future object that eventually holds the result of the execution. @code{.cpp} tf::Future<void> future = executor.run( taskflow, 2, [](){ std::cout << "done"; } // runs taskflow 2 times and invoke // the lambda to print "done" ); // do something else future.wait(); @endcode This member function is thread-safe. @attention The executor does not own the given taskflow. It is your responsibility to ensure the taskflow remains alive during its execution. */ template<typename C> tf::Future<void> run_n(Taskflow& taskflow, size_t N, C&& callable); /** @brief runs a moved taskflow for @c N times and then invokes a callback @param taskflow a moved tf::Taskflow @param N number of runs @param callable a callable object to be invoked after this run @return a tf::Future that holds the result of the execution This member function executes a moved taskflow @c N times and invokes the given callable when the execution completes. This member function returns a tf::Future object that eventually holds the result of the execution. @code{.cpp} tf::Future<void> future = executor.run_n( // run the moved taskflow 2 times and invoke the lambda to print "done" std::move(taskflow), 2, [](){ std::cout << "done"; } ); // do something else future.wait(); @endcode This member function is thread-safe. */ template<typename C> tf::Future<void> run_n(Taskflow&& taskflow, size_t N, C&& callable); /** @brief runs a taskflow multiple times until the predicate becomes true @param taskflow a tf::Taskflow @param pred a boolean predicate to return @c true for stop @return a tf::Future that holds the result of the execution This member function executes the given taskflow multiple times until the predicate returns @c true. This member function returns a tf::Future object that eventually holds the result of the execution. @code{.cpp} tf::Future<void> future = executor.run_until( taskflow, [](){ return rand()%10 == 0 } ); // do something else future.wait(); @endcode This member function is thread-safe. @attention The executor does not own the given taskflow. It is your responsibility to ensure the taskflow remains alive during its execution. */ template<typename P> tf::Future<void> run_until(Taskflow& taskflow, P&& pred); /** @brief runs a moved taskflow and keeps running it until the predicate becomes true @param taskflow a moved tf::Taskflow object @param pred a boolean predicate to return @c true for stop @return a tf::Future that holds the result of the execution This member function executes a moved taskflow multiple times until the predicate returns @c true. This member function returns a tf::Future object that eventually holds the result of the execution. The executor will take care of the lifetime of the moved taskflow. @code{.cpp} tf::Future<void> future = executor.run_until( std::move(taskflow), [](){ return rand()%10 == 0 } ); // do something else future.wait(); @endcode This member function is thread-safe. */ template<typename P> tf::Future<void> run_until(Taskflow&& taskflow, P&& pred); /** @brief runs a taskflow multiple times until the predicate becomes true and then invokes the callback @param taskflow a tf::Taskflow @param pred a boolean predicate to return @c true for stop @param callable a callable object to be invoked after this run completes @return a tf::Future that holds the result of the execution This member function executes the given taskflow multiple times until the predicate returns @c true and then invokes the given callable when the execution completes. This member function returns a tf::Future object that eventually holds the result of the execution. @code{.cpp} tf::Future<void> future = executor.run_until( taskflow, [](){ return rand()%10 == 0 }, [](){ std::cout << "done"; } ); // do something else future.wait(); @endcode This member function is thread-safe. @attention The executor does not own the given taskflow. It is your responsibility to ensure the taskflow remains alive during its execution. */ template<typename P, typename C> tf::Future<void> run_until(Taskflow& taskflow, P&& pred, C&& callable); /** @brief runs a moved taskflow and keeps running it until the predicate becomes true and then invokes the callback @param taskflow a moved tf::Taskflow @param pred a boolean predicate to return @c true for stop @param callable a callable object to be invoked after this run completes @return a tf::Future that holds the result of the execution This member function executes a moved taskflow multiple times until the predicate returns @c true and then invokes the given callable when the execution completes. This member function returns a tf::Future object that eventually holds the result of the execution. The executor will take care of the lifetime of the moved taskflow. @code{.cpp} tf::Future<void> future = executor.run_until( std::move(taskflow), [](){ return rand()%10 == 0 }, [](){ std::cout << "done"; } ); // do something else future.wait(); @endcode This member function is thread-safe. */ template<typename P, typename C> tf::Future<void> run_until(Taskflow&& taskflow, P&& pred, C&& callable); /** @brief runs a target graph and waits until it completes using an internal worker of this executor @tparam T target type which has `tf::Graph& T::graph()` defined @param target the target task graph object The method runs a target graph which has `tf::Graph& T::graph()` defined and waits until the execution completes. Unlike the typical flow of calling `tf::Executor::run` series plus waiting on the result, this method must be called by an internal worker of this executor. The caller worker will participate in the work-stealing loop of the scheduler, therby avoiding potential deadlock caused by blocked waiting. @code{.cpp} tf::Executor executor(2); tf::Taskflow taskflow; std::array<tf::Taskflow, 1000> others; std::atomic<size_t> counter{0}; for(size_t n=0; n<1000; n++) { for(size_t i=0; i<1000; i++) { others[n].emplace([&](){ counter++; }); } taskflow.emplace([&executor, &tf=others[n]](){ executor.run_and_wait(tf); //executor.run(tf).wait(); <- blocking the worker without doing anything // will introduce deadlock }); } executor.run(taskflow).wait(); @endcode The method is thread-safe as long as the target is not concurrently ran by two or more threads. @attention You must call tf::Executor::run_and_wait from a worker of the calling executor or an exception will be thrown. */ template <typename T> void run_and_wait(T& target); /** @brief keeps running the work-stealing loop until the predicate becomes true @tparam P predicate type @param predicate a boolean predicate to indicate when to stop the loop The method keeps the caller worker in the work-stealing loop such that it does not block (e.g., causing deadlock with other blocking workers) until the stop predicate becomes true. @code{.cpp} taskflow.emplace([&](){ std::future<void> fu = std::async([](){ std::sleep(100s); }); executor.loop_until([](){ return fu.wait_for(std::chrono::seconds(0)) == future_status::ready; }); }); @endcode @attention You must call tf::Executor::loop_until from a worker of the calling executor or an exception will be thrown. */ template <typename P> void loop_until(P&& predicate); /** @brief waits for all tasks to complete This member function waits until all submitted tasks (e.g., taskflows, asynchronous tasks) to finish. @code{.cpp} executor.run(taskflow1); executor.run_n(taskflow2, 10); executor.run_n(taskflow3, 100); executor.wait_for_all(); // wait until the above submitted taskflows finish @endcode */ void wait_for_all(); /** @brief queries the number of worker threads Each worker represents one unique thread spawned by an executor upon its construction time. @code{.cpp} tf::Executor executor(4); std::cout << executor.num_workers(); // 4 @endcode */ size_t num_workers() const noexcept; /** @brief queries the number of running topologies at the time of this call When a taskflow is submitted to an executor, a topology is created to store runtime metadata of the running taskflow. When the execution of the submitted taskflow finishes, its corresponding topology will be removed from the executor. @code{.cpp} executor.run(taskflow); std::cout << executor.num_topologies(); // 0 or 1 (taskflow still running) @endcode */ size_t num_topologies() const; /** @brief queries the number of running taskflows with moved ownership @code{.cpp} executor.run(std::move(taskflow)); std::cout << executor.num_taskflows(); // 0 or 1 (taskflow still running) @endcode */ size_t num_taskflows() const; /** @brief queries the id of the caller thread in this executor Each worker has an unique id in the range of @c 0 to @c N-1 associated with its parent executor. If the caller thread does not belong to the executor, @c -1 is returned. @code{.cpp} tf::Executor executor(4); // 4 workers in the executor executor.this_worker_id(); // -1 (main thread is not a worker) taskflow.emplace([&](){ std::cout << executor.this_worker_id(); // 0, 1, 2, or 3 }); executor.run(taskflow); @endcode */ int this_worker_id() const; /** @brief runs a given function asynchronously @tparam F callable type @tparam ArgsT parameter types @param f callable object to call @param args parameters to pass to the callable @return a tf::Future that will holds the result of the execution The method creates an asynchronous task to launch the given function on the given arguments. Unlike std::async, the return here is a @em tf::Future that holds an optional object to the result. If the asynchronous task is cancelled before it runs, the return is a @c std::nullopt, or the value returned by the callable. @code{.cpp} tf::Future<std::optional<int>> future = executor.async([](){ std::cout << "create an asynchronous task and returns 1\n"; return 1; }); @endcode This member function is thread-safe. */ template <typename F, typename... ArgsT> auto async(F&& f, ArgsT&&... args); /** @brief runs a given function asynchronously and gives a name to this task @tparam F callable type @tparam ArgsT parameter types @param name name of the asynchronous task @param f callable object to call @param args parameters to pass to the callable @return a tf::Future that will holds the result of the execution The method creates a named asynchronous task to launch the given function on the given arguments. Naming an asynchronous task is primarily used for profiling and visualizing the task execution timeline. Unlike std::async, the return here is a tf::Future that holds an optional object to the result. If the asynchronous task is cancelled before it runs, the return is a @c std::nullopt, or the value returned by the callable. @code{.cpp} tf::Future<std::optional<int>> future = executor.named_async("name", [](){ std::cout << "create an asynchronous task with a name and returns 1\n"; return 1; }); @endcode This member function is thread-safe. */ template <typename F, typename... ArgsT> auto named_async(const std::string& name, F&& f, ArgsT&&... args); /** @brief similar to tf::Executor::async but does not return a future object This member function is more efficient than tf::Executor::async and is encouraged to use when there is no data returned. @code{.cpp} executor.silent_async([](){ std::cout << "create an asynchronous task with no return\n"; }); @endcode This member function is thread-safe. */ template <typename F, typename... ArgsT> void silent_async(F&& f, ArgsT&&... args); /** @brief similar to tf::Executor::named_async but does not return a future object This member function is more efficient than tf::Executor::named_async and is encouraged to use when there is no data returned. @code{.cpp} executor.named_silent_async("name", [](){ std::cout << "create an asynchronous task with a name and no return\n"; }); @endcode This member function is thread-safe. */ template <typename F, typename... ArgsT> void named_silent_async(const std::string& name, F&& f, ArgsT&&... args); /** @brief constructs an observer to inspect the activities of worker threads @tparam Observer observer type derived from tf::ObserverInterface @tparam ArgsT argument parameter pack @param args arguments to forward to the constructor of the observer @return a shared pointer to the created observer Each executor manages a list of observers with shared ownership with callers. For each of these observers, the two member functions, tf::ObserverInterface::on_entry and tf::ObserverInterface::on_exit will be called before and after the execution of a task. This member function is not thread-safe. */ template <typename Observer, typename... ArgsT> std::shared_ptr<Observer> make_observer(ArgsT&&... args); /** @brief removes an observer from the executor This member function is not thread-safe. */ template <typename Observer> void remove_observer(std::shared_ptr<Observer> observer); /** @brief queries the number of observers */ size_t num_observers() const noexcept; private: const size_t _MAX_STEALS; std::condition_variable _topology_cv; std::mutex _taskflow_mutex; std::mutex _topology_mutex; std::mutex _wsq_mutex; size_t _num_topologies {0}; std::unordered_map<std::thread::id, size_t> _wids; std::vector<std::thread> _threads; std::vector<Worker> _workers; std::list<Taskflow> _taskflows; Notifier _notifier; TaskQueue<Node*> _wsq; std::atomic<bool> _done {0}; std::shared_ptr<WorkerInterface> _worker_interface; std::unordered_set<std::shared_ptr<ObserverInterface>> _observers; Worker* _this_worker(); bool _wait_for_task(Worker&, Node*&); void _observer_prologue(Worker&, Node*); void _observer_epilogue(Worker&, Node*); void _spawn(size_t); void _exploit_task(Worker&, Node*&); void _explore_task(Worker&, Node*&); void _schedule(Worker&, Node*); void _schedule(Node*); void _schedule(Worker&, const SmallVector<Node*>&); void _schedule(const SmallVector<Node*>&); void _set_up_topology(Worker*, Topology*); void _tear_down_topology(Worker&, Topology*); void _tear_down_async(Node*); void _tear_down_invoke(Worker&, Node*); void _cancel_invoke(Worker&, Node*); void _increment_topology(); void _decrement_topology(); void _decrement_topology_and_notify(); void _invoke(Worker&, Node*); void _invoke_static_task(Worker&, Node*); void _invoke_dynamic_task(Worker&, Node*); void _consume_graph(Worker&, Node*, Graph&); void _detach_dynamic_task(Worker&, Node*, Graph&); void _invoke_condition_task(Worker&, Node*, SmallVector<int>&); void _invoke_multi_condition_task(Worker&, Node*, SmallVector<int>&); void _invoke_module_task(Worker&, Node*); void _invoke_async_task(Worker&, Node*); void _invoke_silent_async_task(Worker&, Node*); void _invoke_cudaflow_task(Worker&, Node*); void _invoke_syclflow_task(Worker&, Node*); void _invoke_runtime_task(Worker&, Node*); template <typename P> void _loop_until(Worker&, P&&); template <typename C, std::enable_if_t<is_cudaflow_task_v<C>, void>* = nullptr> void _invoke_cudaflow_task_entry(Node*, C&&); template <typename C, typename Q, std::enable_if_t<is_syclflow_task_v<C>, void>* = nullptr > void _invoke_syclflow_task_entry(Node*, C&&, Q&); }; // Constructor inline Executor::Executor(size_t N, std::shared_ptr<WorkerInterface> wix) : _MAX_STEALS {((N+1) << 1)}, _threads {N}, _workers {N}, _notifier {N}, _worker_interface {std::move(wix)} { if(N == 0) { TF_THROW("no cpu workers to execute taskflows"); } _spawn(N); // instantite the default observer if requested if(has_env(TF_ENABLE_PROFILER)) { TFProfManager::get()._manage(make_observer<TFProfObserver>()); } } // Destructor inline Executor::~Executor() { // wait for all topologies to complete wait_for_all(); // shut down the scheduler _done = true; _notifier.notify(true); for(auto& t : _threads){ t.join(); } } // Function: num_workers inline size_t Executor::num_workers() const noexcept { return _workers.size(); } // Function: num_topologies inline size_t Executor::num_topologies() const { return _num_topologies; } // Function: num_taskflows inline size_t Executor::num_taskflows() const { return _taskflows.size(); } // Function: _this_worker inline Worker* Executor::_this_worker() { auto itr = _wids.find(std::this_thread::get_id()); return itr == _wids.end() ? nullptr : &_workers[itr->second]; } // Function: named_async template <typename F, typename... ArgsT> auto Executor::named_async(const std::string& name, F&& f, ArgsT&&... args) { _increment_topology(); using T = std::invoke_result_t<F, ArgsT...>; using R = std::conditional_t<std::is_same_v<T, void>, void, std::optional<T>>; std::promise<R> p; auto tpg = std::make_shared<AsyncTopology>(); Future<R> fu(p.get_future(), tpg); auto node = node_pool.animate( std::in_place_type_t<Node::Async>{}, [p=make_moc(std::move(p)), f=std::forward<F>(f), args...] (bool cancel) mutable { if constexpr(std::is_same_v<R, void>) { if(!cancel) { f(args...); } p.object.set_value(); } else { p.object.set_value(cancel ? std::nullopt : std::make_optional(f(args...))); } }, std::move(tpg) ); node->_name = name; if(auto w = _this_worker(); w) { _schedule(*w, node); } else{ _schedule(node); } return fu; } // Function: async template <typename F, typename... ArgsT> auto Executor::async(F&& f, ArgsT&&... args) { return named_async("", std::forward<F>(f), std::forward<ArgsT>(args)...); } // Function: named_silent_async template <typename F, typename... ArgsT> void Executor::named_silent_async( const std::string& name, F&& f, ArgsT&&... args ) { _increment_topology(); Node* node = node_pool.animate( std::in_place_type_t<Node::SilentAsync>{}, [f=std::forward<F>(f), args...] () mutable { f(args...); } ); node->_name = name; if(auto w = _this_worker(); w) { _schedule(*w, node); } else { _schedule(node); } } // Function: silent_async template <typename F, typename... ArgsT> void Executor::silent_async(F&& f, ArgsT&&... args) { named_silent_async("", std::forward<F>(f), std::forward<ArgsT>(args)...); } // Function: this_worker_id inline int Executor::this_worker_id() const { auto i = _wids.find(std::this_thread::get_id()); return i == _wids.end() ? -1 : static_cast<int>(_workers[i->second]._id); } // Procedure: _spawn inline void Executor::_spawn(size_t N) { std::mutex mutex; std::condition_variable cond; size_t n=0; for(size_t id=0; id<N; ++id) { _workers[id]._id = id; _workers[id]._vtm = id; _workers[id]._executor = this; _workers[id]._waiter = &_notifier._waiters[id]; _threads[id] = std::thread([this] ( Worker& w, std::mutex& mutex, std::condition_variable& cond, size_t& n ) -> void { // assign the thread w._thread = &_threads[w._id]; // enables the mapping { std::scoped_lock lock(mutex); _wids[std::this_thread::get_id()] = w._id; if(n++; n == num_workers()) { cond.notify_one(); } } Node* t = nullptr; // before entering the scheduler (work-stealing loop), // call the user-specified prologue function if(_worker_interface) { _worker_interface->scheduler_prologue(w); } // must use 1 as condition instead of !done because // the previous worker may stop while the following workers // are still preparing for entering the scheduling loop std::exception_ptr ptr{nullptr}; try { while(1) { // execute the tasks. _exploit_task(w, t); // wait for tasks if(_wait_for_task(w, t) == false) { break; } } } catch(...) { ptr = std::current_exception(); } // call the user-specified epilogue function if(_worker_interface) { _worker_interface->scheduler_epilogue(w, ptr); } }, std::ref(_workers[id]), std::ref(mutex), std::ref(cond), std::ref(n)); // POSIX-like system can use the following to affine threads to cores //cpu_set_t cpuset; //CPU_ZERO(&cpuset); //CPU_SET(id, &cpuset); //pthread_setaffinity_np( // _threads[id].native_handle(), sizeof(cpu_set_t), &cpuset //); } std::unique_lock<std::mutex> lock(mutex); cond.wait(lock, [&](){ return n==N; }); } // Function: _loop_until template <typename P> inline void Executor::_loop_until(Worker& w, P&& stop_predicate) { std::uniform_int_distribution<size_t> rdvtm(0, _workers.size()-1); exploit: while(!stop_predicate()) { //exploit: if(auto t = w._wsq.pop(); t) { _invoke(w, t); } else { size_t num_steals = 0; explore: t = (w._id == w._vtm) ? _wsq.steal() : _workers[w._vtm]._wsq.steal(); if(t) { _invoke(w, t); goto exploit; } else if(!stop_predicate()) { if(num_steals++ > _MAX_STEALS) { std::this_thread::yield(); } w._vtm = rdvtm(w._rdgen); goto explore; } else { break; } } } } // Function: _explore_task inline void Executor::_explore_task(Worker& w, Node*& t) { //assert(_workers[w].wsq.empty()); //assert(!t); size_t num_steals = 0; size_t num_yields = 0; std::uniform_int_distribution<size_t> rdvtm(0, _workers.size()-1); // Here, we write do-while to make the worker steal at once // from the assigned victim. do { t = (w._id == w._vtm) ? _wsq.steal() : _workers[w._vtm]._wsq.steal(); if(t) { break; } if(num_steals++ > _MAX_STEALS) { std::this_thread::yield(); if(num_yields++ > 100) { break; } } w._vtm = rdvtm(w._rdgen); } while(!_done); } // Procedure: _exploit_task inline void Executor::_exploit_task(Worker& w, Node*& t) { while(t) { _invoke(w, t); t = w._wsq.pop(); } } // Function: _wait_for_task inline bool Executor::_wait_for_task(Worker& worker, Node*& t) { explore_task: _explore_task(worker, t); // The last thief who successfully stole a task will wake up // another thief worker to avoid starvation. if(t) { _notifier.notify(false); return true; } // ---- 2PC guard ---- _notifier.prepare_wait(worker._waiter); if(!_wsq.empty()) { _notifier.cancel_wait(worker._waiter); worker._vtm = worker._id; goto explore_task; } if(_done) { _notifier.cancel_wait(worker._waiter); _notifier.notify(true); return false; } // We need to use index-based scanning to avoid data race // with _spawn which may initialize a worker at the same time. for(size_t vtm=0; vtm<_workers.size(); vtm++) { if(!_workers[vtm]._wsq.empty()) { _notifier.cancel_wait(worker._waiter); worker._vtm = vtm; goto explore_task; } } //--_num_thieves; //_num_thieves.fetch_sub(1, std::memory_order_release); /*//if(auto vtm = _find_vtm(me); vtm != _workers.size()) { if(!_wsq.empty()) { _notifier.cancel_wait(worker._waiter); //t = (vtm == me) ? _wsq.steal() : _workers[vtm].wsq.steal(); t = _wsq.steal(); // must steal here if(t) { if(_num_thieves.fetch_sub(1) == 1) { _notifier.notify(false); } return true; } else { worker._vtm = worker._id; goto explore_task; } } if(_done) { _notifier.cancel_wait(worker._waiter); _notifier.notify(true); --_num_thieves; return false; } if(_num_thieves.fetch_sub(1) == 1) { if(_num_actives) { _notifier.cancel_wait(worker._waiter); goto wait_for_task; } // check all queues again for(auto& w : _workers) { if(!w._wsq.empty()) { worker._vtm = w._id; _notifier.cancel_wait(worker._waiter); goto wait_for_task; } } }*/ // Now I really need to relinguish my self to others _notifier.commit_wait(worker._waiter); goto explore_task; } // Function: make_observer template<typename Observer, typename... ArgsT> std::shared_ptr<Observer> Executor::make_observer(ArgsT&&... args) { static_assert( std::is_base_of_v<ObserverInterface, Observer>, "Observer must be derived from ObserverInterface" ); // use a local variable to mimic the constructor auto ptr = std::make_shared<Observer>(std::forward<ArgsT>(args)...); ptr->set_up(_workers.size()); _observers.emplace(std::static_pointer_cast<ObserverInterface>(ptr)); return ptr; } // Procedure: remove_observer template <typename Observer> void Executor::remove_observer(std::shared_ptr<Observer> ptr) { static_assert( std::is_base_of_v<ObserverInterface, Observer>, "Observer must be derived from ObserverInterface" ); _observers.erase(std::static_pointer_cast<ObserverInterface>(ptr)); } // Function: num_observers inline size_t Executor::num_observers() const noexcept { return _observers.size(); } // Procedure: _schedule inline void Executor::_schedule(Worker& worker, Node* node) { // We need to fetch p before the release such that the read // operation is synchronized properly with other thread to // void data race. auto p = node->_priority; node->_state.fetch_or(Node::READY, std::memory_order_release); // caller is a worker to this pool - starting at v3.5 we do not use // any complicated notification mechanism as the experimental result // has shown no significant advantage. if(worker._executor == this) { worker._wsq.push(node, p); _notifier.notify(false); return; } { std::lock_guard<std::mutex> lock(_wsq_mutex); _wsq.push(node, p); } _notifier.notify(false); } // Procedure: _schedule inline void Executor::_schedule(Node* node) { // We need to fetch p before the release such that the read // operation is synchronized properly with other thread to // void data race. auto p = node->_priority; node->_state.fetch_or(Node::READY, std::memory_order_release); { std::lock_guard<std::mutex> lock(_wsq_mutex); _wsq.push(node, p); } _notifier.notify(false); } // Procedure: _schedule inline void Executor::_schedule(Worker& worker, const SmallVector<Node*>& nodes) { // We need to cacth the node count to avoid accessing the nodes // vector while the parent topology is removed! const auto num_nodes = nodes.size(); if(num_nodes == 0) { return; } // caller is a worker to this pool - starting at v3.5 we do not use // any complicated notification mechanism as the experimental result // has shown no significant advantage. if(worker._executor == this) { for(size_t i=0; i<num_nodes; ++i) { // We need to fetch p before the release such that the read // operation is synchronized properly with other thread to // void data race. auto p = nodes[i]->_priority; nodes[i]->_state.fetch_or(Node::READY, std::memory_order_release); worker._wsq.push(nodes[i], p); _notifier.notify(false); } return; } { std::lock_guard<std::mutex> lock(_wsq_mutex); for(size_t k=0; k<num_nodes; ++k) { auto p = nodes[k]->_priority; nodes[k]->_state.fetch_or(Node::READY, std::memory_order_release); _wsq.push(nodes[k], p); } } _notifier.notify_n(num_nodes); } // Procedure: _schedule inline void Executor::_schedule(const SmallVector<Node*>& nodes) { // parent topology may be removed! const auto num_nodes = nodes.size(); if(num_nodes == 0) { return; } // We need to fetch p before the release such that the read // operation is synchronized properly with other thread to // void data race. { std::lock_guard<std::mutex> lock(_wsq_mutex); for(size_t k=0; k<num_nodes; ++k) { auto p = nodes[k]->_priority; nodes[k]->_state.fetch_or(Node::READY, std::memory_order_release); _wsq.push(nodes[k], p); } } _notifier.notify_n(num_nodes); } // Procedure: _invoke inline void Executor::_invoke(Worker& worker, Node* node) { // synchronize all outstanding memory operations caused by reordering while(!(node->_state.load(std::memory_order_acquire) & Node::READY)); begin_invoke: // no need to do other things if the topology is cancelled if(node->_is_cancelled()) { _cancel_invoke(worker, node); return; } // if acquiring semaphore(s) exists, acquire them first if(node->_semaphores && !node->_semaphores->to_acquire.empty()) { SmallVector<Node*> nodes; if(!node->_acquire_all(nodes)) { _schedule(worker, nodes); return; } node->_state.fetch_or(Node::ACQUIRED, std::memory_order_release); } // condition task //int cond = -1; SmallVector<int> conds; // switch is faster than nested if-else due to jump table switch(node->_handle.index()) { // static task case Node::STATIC:{ _invoke_static_task(worker, node); } break; // dynamic task case Node::DYNAMIC: { _invoke_dynamic_task(worker, node); } break; // condition task case Node::CONDITION: { _invoke_condition_task(worker, node, conds); } break; // multi-condition task case Node::MULTI_CONDITION: { _invoke_multi_condition_task(worker, node, conds); } break; // module task case Node::MODULE: { _invoke_module_task(worker, node); } break; // async task case Node::ASYNC: { _invoke_async_task(worker, node); _tear_down_async(node); return ; } break; // silent async task case Node::SILENT_ASYNC: { _invoke_silent_async_task(worker, node); _tear_down_async(node); return ; } break; // cudaflow task case Node::CUDAFLOW: { _invoke_cudaflow_task(worker, node); } break; // syclflow task case Node::SYCLFLOW: { _invoke_syclflow_task(worker, node); } break; // runtime task case Node::RUNTIME: { _invoke_runtime_task(worker, node); } break; // monostate (placeholder) default: break; } // if releasing semaphores exist, release them if(node->_semaphores && !node->_semaphores->to_release.empty()) { _schedule(worker, node->_release_all()); } // Reset the join counter to support the cyclic control flow. // + We must do this before scheduling the successors to avoid race // condition on _dependents. // + We must use fetch_add instead of direct assigning // because the user-space call on "invoke" may explicitly schedule // this task again (e.g., pipeline) which can access the join_counter. if((node->_state.load(std::memory_order_relaxed) & Node::CONDITIONED)) { node->_join_counter.fetch_add(node->num_strong_dependents()); } else { node->_join_counter.fetch_add(node->num_dependents()); } // acquire the parent flow counter auto& j = (node->_parent) ? node->_parent->_join_counter : node->_topology->_join_counter; // Here, we want to cache the latest successor with the highest priority Node* cache {nullptr}; auto max_p = static_cast<unsigned>(TaskPriority::MAX); // Invoke the task based on the corresponding type switch(node->_handle.index()) { // condition and multi-condition tasks case Node::CONDITION: case Node::MULTI_CONDITION: { for(auto cond : conds) { if(cond >= 0 && static_cast<size_t>(cond) < node->_successors.size()) { auto s = node->_successors[cond]; // zeroing the join counter for invariant s->_join_counter.store(0, std::memory_order_relaxed); j.fetch_add(1); if(s->_priority <= max_p) { if(cache) { _schedule(worker, cache); } cache = s; max_p = s->_priority; } else { _schedule(worker, s); } } } } break; // non-condition task default: { for(size_t i=0; i<node->_successors.size(); ++i) { if(auto s = node->_successors[i]; --(s->_join_counter) == 0) { j.fetch_add(1); if(s->_priority <= max_p) { if(cache) { _schedule(worker, cache); } cache = s; max_p = s->_priority; } else { _schedule(worker, s); } } } } break; } // tear_down the invoke _tear_down_invoke(worker, node); // perform tail recursion elimination for the right-most child to reduce // the number of expensive pop/push operations through the task queue if(cache) { node = cache; //node->_state.fetch_or(Node::READY, std::memory_order_release); goto begin_invoke; } } // Procedure: _tear_down_async inline void Executor::_tear_down_async(Node* node) { if(node->_parent) { node->_parent->_join_counter.fetch_sub(1); } else { _decrement_topology_and_notify(); } node_pool.recycle(node); } // Proecdure: _tear_down_invoke inline void Executor::_tear_down_invoke(Worker& worker, Node* node) { // we must check parent first before substracting the join counter, // or it can introduce data race if(node->_parent == nullptr) { if(node->_topology->_join_counter.fetch_sub(1) == 1) { _tear_down_topology(worker, node->_topology); } } // joined subflow else { node->_parent->_join_counter.fetch_sub(1); } } // Procedure: _cancel_invoke inline void Executor::_cancel_invoke(Worker& worker, Node* node) { switch(node->_handle.index()) { // async task needs to carry out the promise case Node::ASYNC: std::get_if<Node::Async>(&(node->_handle))->work(true); _tear_down_async(node); break; // silent async doesn't need to carry out the promise case Node::SILENT_ASYNC: _tear_down_async(node); break; // tear down topology if the node is the last leaf default: { _tear_down_invoke(worker, node); } break; } } // Procedure: _observer_prologue inline void Executor::_observer_prologue(Worker& worker, Node* node) { for(auto& observer : _observers) { observer->on_entry(WorkerView(worker), TaskView(*node)); } } // Procedure: _observer_epilogue inline void Executor::_observer_epilogue(Worker& worker, Node* node) { for(auto& observer : _observers) { observer->on_exit(WorkerView(worker), TaskView(*node)); } } // Procedure: _invoke_static_task inline void Executor::_invoke_static_task(Worker& worker, Node* node) { _observer_prologue(worker, node); std::get_if<Node::Static>(&node->_handle)->work(); _observer_epilogue(worker, node); } // Procedure: _invoke_dynamic_task inline void Executor::_invoke_dynamic_task(Worker& w, Node* node) { _observer_prologue(w, node); auto handle = std::get_if<Node::Dynamic>(&node->_handle); handle->subgraph._clear(); Subflow sf(*this, w, node, handle->subgraph); handle->work(sf); if(sf._joinable) { _consume_graph(w, node, handle->subgraph); } _observer_epilogue(w, node); } // Procedure: _detach_dynamic_task inline void Executor::_detach_dynamic_task( Worker& w, Node* p, Graph& g ) { // graph is empty and has no async tasks if(g.empty() && p->_join_counter == 0) { return; } SmallVector<Node*> src; for(auto n : g._nodes) { n->_state.store(Node::DETACHED, std::memory_order_relaxed); n->_set_up_join_counter(); n->_topology = p->_topology; n->_parent = nullptr; if(n->num_dependents() == 0) { src.push_back(n); } } { std::lock_guard<std::mutex> lock(p->_topology->_taskflow._mutex); p->_topology->_taskflow._graph._merge(std::move(g)); } p->_topology->_join_counter.fetch_add(src.size()); _schedule(w, src); } // Procedure: _consume_graph inline void Executor::_consume_graph(Worker& w, Node* p, Graph& g) { // graph is empty and has no async tasks if(g.empty() && p->_join_counter == 0) { return; } SmallVector<Node*> src; for(auto n : g._nodes) { n->_state.store(0, std::memory_order_relaxed); n->_set_up_join_counter(); n->_topology = p->_topology; n->_parent = p; if(n->num_dependents() == 0) { src.push_back(n); } } p->_join_counter.fetch_add(src.size()); _schedule(w, src); _loop_until(w, [p] () -> bool { return p->_join_counter == 0; }); } // Procedure: _invoke_condition_task inline void Executor::_invoke_condition_task( Worker& worker, Node* node, SmallVector<int>& conds ) { _observer_prologue(worker, node); conds = { std::get_if<Node::Condition>(&node->_handle)->work() }; _observer_epilogue(worker, node); } // Procedure: _invoke_multi_condition_task inline void Executor::_invoke_multi_condition_task( Worker& worker, Node* node, SmallVector<int>& conds ) { _observer_prologue(worker, node); conds = std::get_if<Node::MultiCondition>(&node->_handle)->work(); _observer_epilogue(worker, node); } // Procedure: _invoke_cudaflow_task inline void Executor::_invoke_cudaflow_task(Worker& worker, Node* node) { _observer_prologue(worker, node); std::get_if<Node::cudaFlow>(&node->_handle)->work(*this, node); _observer_epilogue(worker, node); } // Procedure: _invoke_syclflow_task inline void Executor::_invoke_syclflow_task(Worker& worker, Node* node) { _observer_prologue(worker, node); std::get_if<Node::syclFlow>(&node->_handle)->work(*this, node); _observer_epilogue(worker, node); } // Procedure: _invoke_module_task inline void Executor::_invoke_module_task(Worker& w, Node* node) { _observer_prologue(w, node); _consume_graph( w, node, std::get_if<Node::Module>(&node->_handle)->graph ); _observer_epilogue(w, node); } // Procedure: _invoke_async_task inline void Executor::_invoke_async_task(Worker& w, Node* node) { _observer_prologue(w, node); std::get_if<Node::Async>(&node->_handle)->work(false); _observer_epilogue(w, node); } // Procedure: _invoke_silent_async_task inline void Executor::_invoke_silent_async_task(Worker& w, Node* node) { _observer_prologue(w, node); std::get_if<Node::SilentAsync>(&node->_handle)->work(); _observer_epilogue(w, node); } // Procedure: _invoke_runtime_task inline void Executor::_invoke_runtime_task(Worker& w, Node* node) { _observer_prologue(w, node); Runtime rt(*this, w, node); std::get_if<Node::Runtime>(&node->_handle)->work(rt); _observer_epilogue(w, node); } // Function: run inline tf::Future<void> Executor::run(Taskflow& f) { return run_n(f, 1, [](){}); } // Function: run inline tf::Future<void> Executor::run(Taskflow&& f) { return run_n(std::move(f), 1, [](){}); } // Function: run template <typename C> tf::Future<void> Executor::run(Taskflow& f, C&& c) { return run_n(f, 1, std::forward<C>(c)); } // Function: run template <typename C> tf::Future<void> Executor::run(Taskflow&& f, C&& c) { return run_n(std::move(f), 1, std::forward<C>(c)); } // Function: run_n inline tf::Future<void> Executor::run_n(Taskflow& f, size_t repeat) { return run_n(f, repeat, [](){}); } // Function: run_n inline tf::Future<void> Executor::run_n(Taskflow&& f, size_t repeat) { return run_n(std::move(f), repeat, [](){}); } // Function: run_n template <typename C> tf::Future<void> Executor::run_n(Taskflow& f, size_t repeat, C&& c) { return run_until( f, [repeat]() mutable { return repeat-- == 0; }, std::forward<C>(c) ); } // Function: run_n template <typename C> tf::Future<void> Executor::run_n(Taskflow&& f, size_t repeat, C&& c) { return run_until( std::move(f), [repeat]() mutable { return repeat-- == 0; }, std::forward<C>(c) ); } // Function: run_until template<typename P> tf::Future<void> Executor::run_until(Taskflow& f, P&& pred) { return run_until(f, std::forward<P>(pred), [](){}); } // Function: run_until template<typename P> tf::Future<void> Executor::run_until(Taskflow&& f, P&& pred) { return run_until(std::move(f), std::forward<P>(pred), [](){}); } // Function: run_until template <typename P, typename C> tf::Future<void> Executor::run_until(Taskflow& f, P&& p, C&& c) { _increment_topology(); // Need to check the empty under the lock since dynamic task may // define detached blocks that modify the taskflow at the same time bool empty; { std::lock_guard<std::mutex> lock(f._mutex); empty = f.empty(); } // No need to create a real topology but returns an dummy future if(empty || p()) { c(); std::promise<void> promise; promise.set_value(); _decrement_topology_and_notify(); return tf::Future<void>(promise.get_future(), std::monostate{}); } // create a topology for this run auto t = std::make_shared<Topology>(f, std::forward<P>(p), std::forward<C>(c)); // need to create future before the topology got torn down quickly tf::Future<void> future(t->_promise.get_future(), t); // modifying topology needs to be protected under the lock { std::lock_guard<std::mutex> lock(f._mutex); f._topologies.push(t); if(f._topologies.size() == 1) { _set_up_topology(_this_worker(), t.get()); } } return future; } // Function: run_until template <typename P, typename C> tf::Future<void> Executor::run_until(Taskflow&& f, P&& pred, C&& c) { std::list<Taskflow>::iterator itr; { std::scoped_lock<std::mutex> lock(_taskflow_mutex); itr = _taskflows.emplace(_taskflows.end(), std::move(f)); itr->_satellite = itr; } return run_until(*itr, std::forward<P>(pred), std::forward<C>(c)); } // Function: run_and_wait template <typename T> void Executor::run_and_wait(T& target) { auto w = _this_worker(); if(w == nullptr) { TF_THROW("run_and_wait must be called by a worker of the executor"); } Node parent; // dummy parent _consume_graph(*w, &parent, target.graph()); } // Function: loop_until template <typename P> void Executor::loop_until(P&& predicate) { auto w = _this_worker(); if(w == nullptr) { TF_THROW("loop_until must be called by a worker of the executor"); } _loop_until(*w, std::forward<P>(predicate)); } // Procedure: _increment_topology inline void Executor::_increment_topology() { std::lock_guard<std::mutex> lock(_topology_mutex); ++_num_topologies; } // Procedure: _decrement_topology_and_notify inline void Executor::_decrement_topology_and_notify() { std::lock_guard<std::mutex> lock(_topology_mutex); if(--_num_topologies == 0) { _topology_cv.notify_all(); } } // Procedure: _decrement_topology inline void Executor::_decrement_topology() { std::lock_guard<std::mutex> lock(_topology_mutex); --_num_topologies; } // Procedure: wait_for_all inline void Executor::wait_for_all() { std::unique_lock<std::mutex> lock(_topology_mutex); _topology_cv.wait(lock, [&](){ return _num_topologies == 0; }); } // Function: _set_up_topology inline void Executor::_set_up_topology(Worker* worker, Topology* tpg) { // ---- under taskflow lock ---- tpg->_sources.clear(); tpg->_taskflow._graph._clear_detached(); // scan each node in the graph and build up the links for(auto node : tpg->_taskflow._graph._nodes) { node->_topology = tpg; node->_parent = nullptr; node->_state.store(0, std::memory_order_relaxed); if(node->num_dependents() == 0) { tpg->_sources.push_back(node); } node->_set_up_join_counter(); } tpg->_join_counter = tpg->_sources.size(); if(worker) { _schedule(*worker, tpg->_sources); } else { _schedule(tpg->_sources); } } // Function: _tear_down_topology inline void Executor::_tear_down_topology(Worker& worker, Topology* tpg) { auto &f = tpg->_taskflow; //assert(&tpg == &(f._topologies.front())); // case 1: we still need to run the topology again if(!tpg->_is_cancelled && !tpg->_pred()) { //assert(tpg->_join_counter == 0); std::lock_guard<std::mutex> lock(f._mutex); tpg->_join_counter = tpg->_sources.size(); _schedule(worker, tpg->_sources); } // case 2: the final run of this topology else { // TODO: if the topology is cancelled, need to release all semaphores if(tpg->_call != nullptr) { tpg->_call(); } // If there is another run (interleave between lock) if(std::unique_lock<std::mutex> lock(f._mutex); f._topologies.size()>1) { //assert(tpg->_join_counter == 0); // Set the promise tpg->_promise.set_value(); f._topologies.pop(); tpg = f._topologies.front().get(); // decrement the topology but since this is not the last we don't notify _decrement_topology(); // set up topology needs to be under the lock or it can // introduce memory order error with pop _set_up_topology(&worker, tpg); } else { //assert(f._topologies.size() == 1); // Need to back up the promise first here becuz taskflow might be // destroy soon after calling get auto p {std::move(tpg->_promise)}; // Back up lambda capture in case it has the topology pointer, // to avoid it releasing on pop_front ahead of _mutex.unlock & // _promise.set_value. Released safely when leaving scope. auto c {std::move(tpg->_call)}; // Get the satellite if any auto s {f._satellite}; // Now we remove the topology from this taskflow f._topologies.pop(); //f._mutex.unlock(); lock.unlock(); // We set the promise in the end in case taskflow leaves the scope. // After set_value, the caller will return from wait p.set_value(); _decrement_topology_and_notify(); // remove the taskflow if it is managed by the executor // TODO: in the future, we may need to synchronize on wait // (which means the following code should the moved before set_value) if(s) { std::scoped_lock<std::mutex> lock(_taskflow_mutex); _taskflows.erase(*s); } } } } // ############################################################################ // Forward Declaration: Subflow // ############################################################################ inline void Subflow::join() { // assert(this_worker().worker == &_worker); if(!_joinable) { TF_THROW("subflow not joinable"); } // only the parent worker can join the subflow _executor._consume_graph(_worker, _parent, _graph); _joinable = false; } inline void Subflow::detach() { // assert(this_worker().worker == &_worker); if(!_joinable) { TF_THROW("subflow already joined or detached"); } // only the parent worker can detach the subflow _executor._detach_dynamic_task(_worker, _parent, _graph); _joinable = false; } // Function: named_async template <typename F, typename... ArgsT> auto Subflow::named_async(const std::string& name, F&& f, ArgsT&&... args) { return _named_async( *_executor._this_worker(), name, std::forward<F>(f), std::forward<ArgsT>(args)... ); } // Function: _named_async template <typename F, typename... ArgsT> auto Subflow::_named_async( Worker& w, const std::string& name, F&& f, ArgsT&&... args ) { _parent->_join_counter.fetch_add(1); using T = std::invoke_result_t<F, ArgsT...>; using R = std::conditional_t<std::is_same_v<T, void>, void, std::optional<T>>; std::promise<R> p; auto tpg = std::make_shared<AsyncTopology>(); Future<R> fu(p.get_future(), tpg); auto node = node_pool.animate( std::in_place_type_t<Node::Async>{}, [p=make_moc(std::move(p)), f=std::forward<F>(f), args...] (bool cancel) mutable { if constexpr(std::is_same_v<R, void>) { if(!cancel) { f(args...); } p.object.set_value(); } else { p.object.set_value(cancel ? std::nullopt : std::make_optional(f(args...))); } }, std::move(tpg) ); node->_name = name; node->_topology = _parent->_topology; node->_parent = _parent; _executor._schedule(w, node); return fu; } // Function: async template <typename F, typename... ArgsT> auto Subflow::async(F&& f, ArgsT&&... args) { return named_async("", std::forward<F>(f), std::forward<ArgsT>(args)...); } // Function: _named_silent_async template <typename F, typename... ArgsT> void Subflow::_named_silent_async( Worker& w, const std::string& name, F&& f, ArgsT&&... args ) { _parent->_join_counter.fetch_add(1); auto node = node_pool.animate( std::in_place_type_t<Node::SilentAsync>{}, [f=std::forward<F>(f), args...] () mutable { f(args...); } ); node->_name = name; node->_topology = _parent->_topology; node->_parent = _parent; _executor._schedule(w, node); } // Function: silent_async template <typename F, typename... ArgsT> void Subflow::named_silent_async(const std::string& name, F&& f, ArgsT&&... args) { _named_silent_async( *_executor._this_worker(), name, std::forward<F>(f), std::forward<ArgsT>(args)... ); } // Function: named_silent_async template <typename F, typename... ArgsT> void Subflow::silent_async(F&& f, ArgsT&&... args) { named_silent_async("", std::forward<F>(f), std::forward<ArgsT>(args)...); } // ############################################################################ // Forward Declaration: Runtime // ############################################################################ // Procedure: schedule inline void Runtime::schedule(Task task) { auto node = task._node; auto& j = node->_parent ? node->_parent->_join_counter : node->_topology->_join_counter; j.fetch_add(1); _executor._schedule(_worker, node); } // Procedure: emplace template <typename T> void Runtime::run_and_wait(T&& target) { // dynamic task (subflow) if constexpr(is_dynamic_task_v<T>) { Graph graph; Subflow sf(_executor, _worker, _parent, graph); target(sf); if(sf._joinable) { _executor._consume_graph(_worker, _parent, graph); } } // graph object else { _executor._consume_graph(_worker, _parent, target.graph()); } } } // end of namespace tf -----------------------------------------------------
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/task.hpp
#pragma once #include "graph.hpp" /** @file task.hpp @brief task include file */ namespace tf { // ---------------------------------------------------------------------------- // Task Types // ---------------------------------------------------------------------------- /** @enum TaskType @brief enumeration of all task types */ enum class TaskType : int { /** @brief placeholder task type */ PLACEHOLDER = 0, /** @brief cudaFlow task type */ CUDAFLOW, /** @brief syclFlow task type */ SYCLFLOW, /** @brief static task type */ STATIC, /** @brief dynamic (subflow) task type */ DYNAMIC, /** @brief condition task type */ CONDITION, /** @brief module task type */ MODULE, /** @brief asynchronous task type */ ASYNC, /** @brief runtime task type */ RUNTIME, /** @brief undefined task type (for internal use only) */ UNDEFINED }; /** @private @brief array of all task types (used for iterating task types) */ inline constexpr std::array<TaskType, 9> TASK_TYPES = { TaskType::PLACEHOLDER, TaskType::CUDAFLOW, TaskType::SYCLFLOW, TaskType::STATIC, TaskType::DYNAMIC, TaskType::CONDITION, TaskType::MODULE, TaskType::ASYNC, TaskType::RUNTIME }; /** @brief convert a task type to a human-readable string The name of each task type is the litte-case string of its characters. @code{.cpp} TaskType::PLACEHOLDER -> "placeholder" TaskType::CUDAFLOW -> "cudaflow" TaskType::SYCLFLOW -> "syclflow" TaskType::STATIC -> "static" TaskType::DYNAMIC -> "subflow" TaskType::CONDITION -> "condition" TaskType::MODULE -> "module" TaskType::ASYNC -> "async" TaskType::RUNTIME -> "runtime" @endcode */ inline const char* to_string(TaskType type) { const char* val; switch(type) { case TaskType::PLACEHOLDER: val = "placeholder"; break; case TaskType::CUDAFLOW: val = "cudaflow"; break; case TaskType::SYCLFLOW: val = "syclflow"; break; case TaskType::STATIC: val = "static"; break; case TaskType::DYNAMIC: val = "subflow"; break; case TaskType::CONDITION: val = "condition"; break; case TaskType::MODULE: val = "module"; break; case TaskType::ASYNC: val = "async"; break; case TaskType::RUNTIME: val = "runtime"; break; default: val = "undefined"; break; } return val; } // ---------------------------------------------------------------------------- // Task Traits // ---------------------------------------------------------------------------- /** @brief determines if a callable is a static task A static task is a callable object constructible from std::function<void()>. */ template <typename C> constexpr bool is_static_task_v = std::is_invocable_r_v<void, C> && !std::is_invocable_r_v<int, C> && !std::is_invocable_r_v<tf::SmallVector<int>, C>; /** @brief determines if a callable is a dynamic task A dynamic task is a callable object constructible from std::function<void(Subflow&)>. */ template <typename C> constexpr bool is_dynamic_task_v = std::is_invocable_r_v<void, C, Subflow&>; /** @brief determines if a callable is a condition task A condition task is a callable object constructible from std::function<int()>. */ template <typename C> constexpr bool is_condition_task_v = std::is_invocable_r_v<int, C>; /** @brief determines if a callable is a multi-condition task A multi-condition task is a callable object constructible from std::function<tf::SmallVector<int>()>. */ template <typename C> constexpr bool is_multi_condition_task_v = std::is_invocable_r_v<SmallVector<int>, C>; /** @brief determines if a callable is a %cudaFlow task A cudaFlow task is a callable object constructible from std::function<void(tf::cudaFlow&)> or std::function<void(tf::cudaFlowCapturer&)>. */ template <typename C> constexpr bool is_cudaflow_task_v = std::is_invocable_r_v<void, C, cudaFlow&> || std::is_invocable_r_v<void, C, cudaFlowCapturer&>; /** @brief determines if a callable is a %syclFlow task A syclFlow task is a callable object constructible from std::function<void(tf::syclFlow&)>. */ template <typename C> constexpr bool is_syclflow_task_v = std::is_invocable_r_v<void, C, syclFlow&>; /** @brief determines if a callable is a runtime task A runtime task is a callable object constructible from std::function<void(tf::Runtime&)>. */ template <typename C> constexpr bool is_runtime_task_v = std::is_invocable_r_v<void, C, Runtime&>; // ---------------------------------------------------------------------------- // Task // ---------------------------------------------------------------------------- /** @class Task @brief class to create a task handle over a node in a taskflow graph A task is a wrapper over a node in a taskflow graph. It provides a set of methods for users to access and modify the attributes of the associated node in the taskflow graph. A task is very lightweight object (i.e., only storing a node pointer) that can be trivially copied around, and it does not own the lifetime of the associated node. */ class Task { friend class FlowBuilder; friend class Runtime; friend class Taskflow; friend class TaskView; friend class Executor; public: /** @brief constructs an empty task */ Task() = default; /** @brief constructs the task with the copy of the other task */ Task(const Task& other); /** @brief replaces the contents with a copy of the other task */ Task& operator = (const Task&); /** @brief replaces the contents with a null pointer */ Task& operator = (std::nullptr_t); /** @brief compares if two tasks are associated with the same graph node */ bool operator == (const Task& rhs) const; /** @brief compares if two tasks are not associated with the same graph node */ bool operator != (const Task& rhs) const; /** @brief queries the name of the task */ const std::string& name() const; /** @brief queries the number of successors of the task */ size_t num_successors() const; /** @brief queries the number of predecessors of the task */ size_t num_dependents() const; /** @brief queries the number of strong dependents of the task */ size_t num_strong_dependents() const; /** @brief queries the number of weak dependents of the task */ size_t num_weak_dependents() const; /** @brief assigns a name to the task @param name a @std_string acceptable string @return @c *this */ Task& name(const std::string& name); /** @brief assigns a callable @tparam C callable type @param callable callable to construct one of the static, dynamic, condition, and cudaFlow tasks @return @c *this */ template <typename C> Task& work(C&& callable); /** @brief creates a module task from a taskflow @tparam T object type @param object a custom object that defines @c T::graph() method @return @c *this */ template <typename T> Task& composed_of(T& object); /** @brief adds precedence links from this to other tasks @tparam Ts parameter pack @param tasks one or multiple tasks @return @c *this */ template <typename... Ts> Task& precede(Ts&&... tasks); /** @brief adds precedence links from other tasks to this @tparam Ts parameter pack @param tasks one or multiple tasks @return @c *this */ template <typename... Ts> Task& succeed(Ts&&... tasks); /** @brief makes the task release this semaphore */ Task& release(Semaphore& semaphore); /** @brief makes the task acquire this semaphore */ Task& acquire(Semaphore& semaphore); /** @brief assigns pointer to user data @param data pointer to user data The following example shows how to attach user data to a task and run the task iteratively while changing the data value: @code{.cpp} tf::Executor executor; tf::Taskflow taskflow("attach data to a task"); int data; // create a task and attach it the data auto A = taskflow.placeholder(); A.data(&data).work([A](){ auto d = *static_cast<int*>(A.data()); std::cout << "data is " << d << std::endl; }); // run the taskflow iteratively with changing data for(data = 0; data<10; data++){ executor.run(taskflow).wait(); } @endcode @return @c *this */ Task& data(void* data); /** @brief assigns a priority value to the task A priority value can be one of the following three levels, tf::TaskPriority::HIGH (numerically equivalent to 0), tf::TaskPriority::NORMAL (numerically equivalent to 1), and tf::TaskPriority::LOW (numerically equivalent to 2). The smaller the priority value, the higher the priority. */ Task& priority(TaskPriority p); /** @brief queries the priority value of the task */ TaskPriority priority() const; /** @brief resets the task handle to null */ void reset(); /** @brief resets the associated work to a placeholder */ void reset_work(); /** @brief queries if the task handle points to a task node */ bool empty() const; /** @brief queries if the task has a work assigned */ bool has_work() const; /** @brief applies an visitor callable to each successor of the task */ template <typename V> void for_each_successor(V&& visitor) const; /** @brief applies an visitor callable to each dependents of the task */ template <typename V> void for_each_dependent(V&& visitor) const; /** @brief obtains a hash value of the underlying node */ size_t hash_value() const; /** @brief returns the task type */ TaskType type() const; /** @brief dumps the task through an output stream */ void dump(std::ostream& ostream) const; /** @brief queries pointer to user data */ void* data() const; private: Task(Node*); Node* _node {nullptr}; }; // Constructor inline Task::Task(Node* node) : _node {node} { } // Constructor inline Task::Task(const Task& rhs) : _node {rhs._node} { } // Function: precede template <typename... Ts> Task& Task::precede(Ts&&... tasks) { (_node->_precede(tasks._node), ...); //_precede(std::forward<Ts>(tasks)...); return *this; } // Function: succeed template <typename... Ts> Task& Task::succeed(Ts&&... tasks) { (tasks._node->_precede(_node), ...); //_succeed(std::forward<Ts>(tasks)...); return *this; } // Function: composed_of template <typename T> Task& Task::composed_of(T& object) { _node->_handle.emplace<Node::Module>(object); return *this; } // Operator = inline Task& Task::operator = (const Task& rhs) { _node = rhs._node; return *this; } // Operator = inline Task& Task::operator = (std::nullptr_t ptr) { _node = ptr; return *this; } // Operator == inline bool Task::operator == (const Task& rhs) const { return _node == rhs._node; } // Operator != inline bool Task::operator != (const Task& rhs) const { return _node != rhs._node; } // Function: name inline Task& Task::name(const std::string& name) { _node->_name = name; return *this; } // Function: acquire inline Task& Task::acquire(Semaphore& s) { if(!_node->_semaphores) { _node->_semaphores = std::make_unique<Node::Semaphores>(); } _node->_semaphores->to_acquire.push_back(&s); return *this; } // Function: release inline Task& Task::release(Semaphore& s) { if(!_node->_semaphores) { //_node->_semaphores.emplace(); _node->_semaphores = std::make_unique<Node::Semaphores>(); } _node->_semaphores->to_release.push_back(&s); return *this; } // Procedure: reset inline void Task::reset() { _node = nullptr; } // Procedure: reset_work inline void Task::reset_work() { _node->_handle.emplace<std::monostate>(); } // Function: name inline const std::string& Task::name() const { return _node->_name; } // Function: num_dependents inline size_t Task::num_dependents() const { return _node->num_dependents(); } // Function: num_strong_dependents inline size_t Task::num_strong_dependents() const { return _node->num_strong_dependents(); } // Function: num_weak_dependents inline size_t Task::num_weak_dependents() const { return _node->num_weak_dependents(); } // Function: num_successors inline size_t Task::num_successors() const { return _node->num_successors(); } // Function: empty inline bool Task::empty() const { return _node == nullptr; } // Function: has_work inline bool Task::has_work() const { return _node ? _node->_handle.index() != 0 : false; } // Function: task_type inline TaskType Task::type() const { switch(_node->_handle.index()) { case Node::PLACEHOLDER: return TaskType::PLACEHOLDER; case Node::STATIC: return TaskType::STATIC; case Node::DYNAMIC: return TaskType::DYNAMIC; case Node::CONDITION: return TaskType::CONDITION; case Node::MULTI_CONDITION: return TaskType::CONDITION; case Node::MODULE: return TaskType::MODULE; case Node::ASYNC: return TaskType::ASYNC; case Node::SILENT_ASYNC: return TaskType::ASYNC; case Node::CUDAFLOW: return TaskType::CUDAFLOW; case Node::SYCLFLOW: return TaskType::SYCLFLOW; case Node::RUNTIME: return TaskType::RUNTIME; default: return TaskType::UNDEFINED; } } // Function: for_each_successor template <typename V> void Task::for_each_successor(V&& visitor) const { for(size_t i=0; i<_node->_successors.size(); ++i) { visitor(Task(_node->_successors[i])); } } // Function: for_each_dependent template <typename V> void Task::for_each_dependent(V&& visitor) const { for(size_t i=0; i<_node->_dependents.size(); ++i) { visitor(Task(_node->_dependents[i])); } } // Function: hash_value inline size_t Task::hash_value() const { return std::hash<Node*>{}(_node); } // Procedure: dump inline void Task::dump(std::ostream& os) const { os << "task "; if(name().empty()) os << _node; else os << name(); os << " [type=" << to_string(type()) << ']'; } // Function: work template <typename C> Task& Task::work(C&& c) { if constexpr(is_static_task_v<C>) { _node->_handle.emplace<Node::Static>(std::forward<C>(c)); } else if constexpr(is_dynamic_task_v<C>) { _node->_handle.emplace<Node::Dynamic>(std::forward<C>(c)); } else if constexpr(is_condition_task_v<C>) { _node->_handle.emplace<Node::Condition>(std::forward<C>(c)); } else if constexpr(is_multi_condition_task_v<C>) { _node->_handle.emplace<Node::MultiCondition>(std::forward<C>(c)); } else if constexpr(is_cudaflow_task_v<C>) { _node->_handle.emplace<Node::cudaFlow>(std::forward<C>(c)); } else if constexpr(is_runtime_task_v<C>) { _node->_handle.emplace<Node::Runtime>(std::forward<C>(c)); } else { static_assert(dependent_false_v<C>, "invalid task callable"); } return *this; } // Function: data inline void* Task::data() const { return _node->_data; } // Function: data inline Task& Task::data(void* data) { _node->_data = data; return *this; } // Function: priority inline Task& Task::priority(TaskPriority p) { _node->_priority = static_cast<unsigned>(p); return *this; } // Function: priority inline TaskPriority Task::priority() const { return static_cast<TaskPriority>(_node->_priority); } // ---------------------------------------------------------------------------- // global ostream // ---------------------------------------------------------------------------- /** @brief overload of ostream inserter operator for cudaTask */ inline std::ostream& operator << (std::ostream& os, const Task& task) { task.dump(os); return os; } // ---------------------------------------------------------------------------- /** @class TaskView @brief class to access task information from the observer interface */ class TaskView { friend class Executor; public: /** @brief queries the name of the task */ const std::string& name() const; /** @brief queries the number of successors of the task */ size_t num_successors() const; /** @brief queries the number of predecessors of the task */ size_t num_dependents() const; /** @brief queries the number of strong dependents of the task */ size_t num_strong_dependents() const; /** @brief queries the number of weak dependents of the task */ size_t num_weak_dependents() const; /** @brief applies an visitor callable to each successor of the task */ template <typename V> void for_each_successor(V&& visitor) const; /** @brief applies an visitor callable to each dependents of the task */ template <typename V> void for_each_dependent(V&& visitor) const; /** @brief queries the task type */ TaskType type() const; /** @brief obtains a hash value of the underlying node */ size_t hash_value() const; private: TaskView(const Node&); TaskView(const TaskView&) = default; const Node& _node; }; // Constructor inline TaskView::TaskView(const Node& node) : _node {node} { } // Function: name inline const std::string& TaskView::name() const { return _node._name; } // Function: num_dependents inline size_t TaskView::num_dependents() const { return _node.num_dependents(); } // Function: num_strong_dependents inline size_t TaskView::num_strong_dependents() const { return _node.num_strong_dependents(); } // Function: num_weak_dependents inline size_t TaskView::num_weak_dependents() const { return _node.num_weak_dependents(); } // Function: num_successors inline size_t TaskView::num_successors() const { return _node.num_successors(); } // Function: type inline TaskType TaskView::type() const { switch(_node._handle.index()) { case Node::PLACEHOLDER: return TaskType::PLACEHOLDER; case Node::STATIC: return TaskType::STATIC; case Node::DYNAMIC: return TaskType::DYNAMIC; case Node::CONDITION: return TaskType::CONDITION; case Node::MULTI_CONDITION: return TaskType::CONDITION; case Node::MODULE: return TaskType::MODULE; case Node::ASYNC: return TaskType::ASYNC; case Node::SILENT_ASYNC: return TaskType::ASYNC; case Node::CUDAFLOW: return TaskType::CUDAFLOW; case Node::SYCLFLOW: return TaskType::SYCLFLOW; case Node::RUNTIME: return TaskType::RUNTIME; default: return TaskType::UNDEFINED; } } // Function: hash_value inline size_t TaskView::hash_value() const { return std::hash<const Node*>{}(&_node); } // Function: for_each_successor template <typename V> void TaskView::for_each_successor(V&& visitor) const { for(size_t i=0; i<_node._successors.size(); ++i) { visitor(TaskView(_node._successors[i])); } } // Function: for_each_dependent template <typename V> void TaskView::for_each_dependent(V&& visitor) const { for(size_t i=0; i<_node._dependents.size(); ++i) { visitor(TaskView(_node._dependents[i])); } } } // end of namespace tf. --------------------------------------------------- namespace std { /** @struct hash @brief hash specialization for std::hash<tf::Task> */ template <> struct hash<tf::Task> { auto operator() (const tf::Task& task) const noexcept { return task.hash_value(); } }; /** @struct hash @brief hash specialization for std::hash<tf::TaskView> */ template <> struct hash<tf::TaskView> { auto operator() (const tf::TaskView& task_view) const noexcept { return task_view.hash_value(); } }; } // end of namespace std ----------------------------------------------------
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/error.hpp
#pragma once #include <iostream> #include <sstream> #include <exception> #include "../utility/stream.hpp" namespace tf { // Procedure: throw_se // Throws the system error under a given error code. template <typename... ArgsT> //void throw_se(const char* fname, const size_t line, Error::Code c, ArgsT&&... args) { void throw_re(const char* fname, const size_t line, ArgsT&&... args) { std::ostringstream oss; oss << "[" << fname << ":" << line << "] "; //ostreamize(oss, std::forward<ArgsT>(args)...); (oss << ... << args); throw std::runtime_error(oss.str()); } } // ------------------------------------------------------------------------ #define TF_THROW(...) tf::throw_re(__FILE__, __LINE__, __VA_ARGS__);
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/async_task.hpp
#pragma once #include "graph.hpp" /** @file async_task.hpp @brief asynchronous task include file */ namespace tf { // ---------------------------------------------------------------------------- // AsyncTask // ---------------------------------------------------------------------------- /** @brief class to create a dependent asynchronous task A tf::AsyncTask is a lightweight handle that retains @em shared ownership of a dependent async task created by an executor. This shared ownership ensures that the async task remains alive when adding it to the dependency list of another async task, thus avoiding the classical [ABA problem](https://en.wikipedia.org/wiki/ABA_problem). @code{.cpp} // main thread retains shared ownership of async task A tf::AsyncTask A = executor.silent_dependent_async([](){}); // task A remains alive (i.e., at least one ref count by the main thread) // when being added to the dependency list of async task B tf::AsyncTask B = executor.silent_dependent_async([](){}, A); @endcode Currently, tf::AsyncTask is implemented based on the logic of C++ smart pointer std::shared_ptr and is considered cheap to copy or move as long as only a handful of objects own it. When a worker completes an async task, it will remove the task from the executor, decrementing the number of shared owners by one. If that counter reaches zero, the task is destroyed. */ class AsyncTask { friend class Executor; public: /** @brief constructs an empty task handle */ AsyncTask() = default; /** @brief destroys the managed asynchronous task if this is the last owner */ ~AsyncTask(); /** @brief constructs an asynchronous task that shares ownership of @c rhs */ AsyncTask(const AsyncTask& rhs); /** @brief move-constructs an asynchronous task from @c rhs */ AsyncTask(AsyncTask&& rhs); /** @brief copy-assigns the asynchronous task from @c rhs Releases the managed object of @c this and retains a new shared ownership of @c rhs. */ AsyncTask& operator = (const AsyncTask& rhs); /** @brief move-assigns the asynchronous task from @c rhs Releases the managed object of @c this and takes over the ownership of @c rhs. */ AsyncTask& operator = (AsyncTask&& rhs); /** @brief checks if the asynchronous task stores nothing */ bool empty() const; /** @brief release the managed object of @c this */ void reset(); /** @brief obtains a hash value of this asynchronous task */ size_t hash_value() const; /** @brief returns the number of shared owners that are currently managing this asynchronous task */ size_t use_count() const; /** @brief returns the boolean indicating whether the async task is done */ bool is_done() const; private: explicit AsyncTask(Node*); Node* _node {nullptr}; void _incref(); void _decref(); }; // Constructor inline AsyncTask::AsyncTask(Node* ptr) : _node{ptr} { _incref(); } // Function: _incref inline void AsyncTask::_incref() { if(_node) { std::get_if<Node::DependentAsync>(&(_node->_handle))->use_count.fetch_add( 1, std::memory_order_relaxed ); } } // Function: _decref inline void AsyncTask::_decref() { if(_node && std::get_if<Node::DependentAsync>(&(_node->_handle))->use_count.fetch_sub( 1, std::memory_order_acq_rel ) == 1) { node_pool.recycle(_node); } } // Copy Constructor inline AsyncTask::AsyncTask(const AsyncTask& rhs) : _node{rhs._node} { _incref(); } // Move Constructor inline AsyncTask::AsyncTask(AsyncTask&& rhs) : _node {rhs._node} { rhs._node = nullptr; } // Destructor inline AsyncTask::~AsyncTask() { _decref(); } // Copy assignment inline AsyncTask& AsyncTask::operator = (const AsyncTask& rhs) { _decref(); _node = rhs._node; _incref(); return *this; } // Move assignment inline AsyncTask& AsyncTask::operator = (AsyncTask&& rhs) { _decref(); _node = rhs._node; rhs._node = nullptr; return *this; } // Function: empty inline bool AsyncTask::empty() const { return _node == nullptr; } // Function: reset inline void AsyncTask::reset() { _decref(); _node = nullptr; } // Function: hash_value inline size_t AsyncTask::hash_value() const { return std::hash<Node*>{}(_node); } // Function: use_count inline size_t AsyncTask::use_count() const { return _node == nullptr ? size_t{0} : std::get_if<Node::DependentAsync>(&(_node->_handle))->use_count.load( std::memory_order_relaxed ); } // Function: is_done inline bool AsyncTask::is_done() const { return std::get_if<Node::DependentAsync>(&(_node->_handle))->state.load( std::memory_order_acquire ) == Node::AsyncState::FINISHED; } } // end of namespace tf ----------------------------------------------------
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/async.hpp
#pragma once #include "executor.hpp" // https://hackmd.io/@sysprog/concurrency-atomics namespace tf { // ---------------------------------------------------------------------------- // Async // ---------------------------------------------------------------------------- // Function: async template <typename F> auto Executor::async(const std::string& name, F&& f) { _increment_topology(); using R = std::invoke_result_t<std::decay_t<F>>; std::promise<R> p; auto fu{p.get_future()}; auto node = node_pool.animate( name, 0, nullptr, nullptr, 0, std::in_place_type_t<Node::Async>{}, _make_promised_async(std::move(p), std::forward<F>(f)) ); _schedule_async_task(node); return fu; } // Function: async template <typename F> auto Executor::async(F&& f) { return async("", std::forward<F>(f)); } // ---------------------------------------------------------------------------- // Silent Async // ---------------------------------------------------------------------------- // Function: silent_async template <typename F> void Executor::silent_async(const std::string& name, F&& f) { _increment_topology(); auto node = node_pool.animate( name, 0, nullptr, nullptr, 0, std::in_place_type_t<Node::Async>{}, std::forward<F>(f) ); _schedule_async_task(node); } // Function: silent_async template <typename F> void Executor::silent_async(F&& f) { silent_async("", std::forward<F>(f)); } // ---------------------------------------------------------------------------- // Async Helper Methods // ---------------------------------------------------------------------------- // Function: _make_promised_async template <typename R, typename F> auto Executor::_make_promised_async(std::promise<R>&& p, F&& func) { return [p=make_moc(std::move(p)), func=std::forward<F>(func)]() mutable { if constexpr(std::is_same_v<R, void>) { func(); p.object.set_value(); } else { p.object.set_value(func()); } }; } // Procedure: _schedule_async_task inline void Executor::_schedule_async_task(Node* node) { if(auto w = _this_worker(); w) { _schedule(*w, node); } else{ _schedule(node); } } // Procedure: _tear_down_async inline void Executor::_tear_down_async(Node* node) { // from runtime if(node->_parent) { node->_parent->_join_counter.fetch_sub(1, std::memory_order_release); } // from executor else { _decrement_topology(); } node_pool.recycle(node); } // ---------------------------------------------------------------------------- // Silent Dependent Async // ---------------------------------------------------------------------------- // Function: silent_dependent_async template <typename F, typename... Tasks, std::enable_if_t<all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>* > tf::AsyncTask Executor::silent_dependent_async(F&& func, Tasks&&... tasks) { return silent_dependent_async("", std::forward<F>(func), std::forward<Tasks>(tasks)...); } // Function: silent_dependent_async template <typename F, typename... Tasks, std::enable_if_t<all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>* > tf::AsyncTask Executor::silent_dependent_async( const std::string& name, F&& func, Tasks&&... tasks ){ _increment_topology(); size_t num_dependents = sizeof...(Tasks); // create a task before scheduling the node to retain a shared ownership first AsyncTask task(node_pool.animate( name, 0, nullptr, nullptr, num_dependents, std::in_place_type_t<Node::DependentAsync>{}, std::forward<F>(func) )); if constexpr(sizeof...(Tasks) > 0) { (_process_async_dependent(task._node, tasks, num_dependents), ...); } if(num_dependents == 0) { _schedule_async_task(task._node); } return task; } // Function: silent_dependent_async template <typename F, typename I, std::enable_if_t<!std::is_same_v<std::decay_t<I>, AsyncTask>, void>* > tf::AsyncTask Executor::silent_dependent_async(F&& func, I first, I last) { return silent_dependent_async("", std::forward<F>(func), first, last); } // Function: silent_dependent_async template <typename F, typename I, std::enable_if_t<!std::is_same_v<std::decay_t<I>, AsyncTask>, void>* > tf::AsyncTask Executor::silent_dependent_async( const std::string& name, F&& func, I first, I last ) { _increment_topology(); size_t num_dependents = std::distance(first, last); AsyncTask task(node_pool.animate( name, 0, nullptr, nullptr, num_dependents, std::in_place_type_t<Node::DependentAsync>{}, std::forward<F>(func) )); for(; first != last; first++){ _process_async_dependent(task._node, *first, num_dependents); } if(num_dependents == 0) { _schedule_async_task(task._node); } return task; } // ---------------------------------------------------------------------------- // Dependent Async // ---------------------------------------------------------------------------- // Function: dependent_async template <typename F, typename... Tasks, std::enable_if_t<all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>* > auto Executor::dependent_async(F&& func, Tasks&&... tasks) { return dependent_async("", std::forward<F>(func), std::forward<Tasks>(tasks)...); } // Function: dependent_async template <typename F, typename... Tasks, std::enable_if_t<all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>* > auto Executor::dependent_async( const std::string& name, F&& func, Tasks&&... tasks ) { _increment_topology(); using R = std::invoke_result_t<std::decay_t<F>>; std::promise<R> p; auto fu{p.get_future()}; size_t num_dependents = sizeof...(tasks); AsyncTask task(node_pool.animate( name, 0, nullptr, nullptr, num_dependents, std::in_place_type_t<Node::DependentAsync>{}, _make_promised_async(std::move(p), std::forward<F>(func)) )); if constexpr(sizeof...(Tasks) > 0) { (_process_async_dependent(task._node, tasks, num_dependents), ...); } if(num_dependents == 0) { _schedule_async_task(task._node); } return std::make_pair(std::move(task), std::move(fu)); } // Function: dependent_async template <typename F, typename I, std::enable_if_t<!std::is_same_v<std::decay_t<I>, AsyncTask>, void>* > auto Executor::dependent_async(F&& func, I first, I last) { return dependent_async("", std::forward<F>(func), first, last); } // Function: dependent_async template <typename F, typename I, std::enable_if_t<!std::is_same_v<std::decay_t<I>, AsyncTask>, void>* > auto Executor::dependent_async( const std::string& name, F&& func, I first, I last ) { _increment_topology(); using R = std::invoke_result_t<std::decay_t<F>>; std::promise<R> p; auto fu{p.get_future()}; size_t num_dependents = std::distance(first, last); AsyncTask task(node_pool.animate( name, 0, nullptr, nullptr, num_dependents, std::in_place_type_t<Node::DependentAsync>{}, _make_promised_async(std::move(p), std::forward<F>(func)) )); for(; first != last; first++) { _process_async_dependent(task._node, *first, num_dependents); } if(num_dependents == 0) { _schedule_async_task(task._node); } return std::make_pair(std::move(task), std::move(fu)); } // ---------------------------------------------------------------------------- // Dependent Async Helper Functions // ---------------------------------------------------------------------------- // Procedure: _process_async_dependent inline void Executor::_process_async_dependent( Node* node, tf::AsyncTask& task, size_t& num_dependents ) { auto& state = std::get_if<Node::DependentAsync>(&(task._node->_handle))->state; add_successor: auto target = Node::AsyncState::UNFINISHED; // acquires the lock if(state.compare_exchange_weak(target, Node::AsyncState::LOCKED, std::memory_order_acq_rel, std::memory_order_acquire)) { task._node->_successors.push_back(node); state.store(Node::AsyncState::UNFINISHED, std::memory_order_release); } // dep's state is FINISHED, which means dep finished its callable already // thus decrement the node's join counter by 1 else if (target == Node::AsyncState::FINISHED) { num_dependents = node->_join_counter.fetch_sub(1, std::memory_order_acq_rel) - 1; } // another worker adding its async task to the same successors of this node else { goto add_successor; } } // Procedure: _tear_down_dependent_async inline void Executor::_tear_down_dependent_async(Worker& worker, Node* node) { auto handle = std::get_if<Node::DependentAsync>(&(node->_handle)); // this async task comes from Executor auto target = Node::AsyncState::UNFINISHED; while(!handle->state.compare_exchange_weak(target, Node::AsyncState::FINISHED, std::memory_order_acq_rel, std::memory_order_relaxed)) { target = Node::AsyncState::UNFINISHED; } // spaw successors whenever their dependencies are resolved worker._cache = nullptr; for(size_t i=0; i<node->_successors.size(); ++i) { if(auto s = node->_successors[i]; s->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1 ) { if(worker._cache) { _schedule(worker, worker._cache); } worker._cache = s; } } // now the executor no longer needs to retain ownership if(handle->use_count.fetch_sub(1, std::memory_order_acq_rel) == 1) { node_pool.recycle(node); } _decrement_topology(); } } // end of namespace tf -----------------------------------------------------
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/environment.hpp
#pragma once #define TF_ENABLE_PROFILER "TF_ENABLE_PROFILER" namespace tf { } // end of namespace tf -----------------------------------------------------
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/observer.hpp
#pragma once #include "task.hpp" #include "worker.hpp" /** @file observer.hpp @brief observer include file */ namespace tf { // ---------------------------------------------------------------------------- // timeline data structure // ---------------------------------------------------------------------------- /** @brief default time point type of observers */ using observer_stamp_t = std::chrono::time_point<std::chrono::steady_clock>; /** @private */ struct Segment { std::string name; TaskType type; observer_stamp_t beg; observer_stamp_t end; template <typename Archiver> auto save(Archiver& ar) const { return ar(name, type, beg, end); } template <typename Archiver> auto load(Archiver& ar) { return ar(name, type, beg, end); } Segment() = default; Segment( const std::string& n, TaskType t, observer_stamp_t b, observer_stamp_t e ) : name {n}, type {t}, beg {b}, end {e} { } auto span() const { return end-beg; } }; /** @private */ struct Timeline { size_t uid; observer_stamp_t origin; std::vector<std::vector<std::vector<Segment>>> segments; Timeline() = default; Timeline(const Timeline& rhs) = delete; Timeline(Timeline&& rhs) = default; Timeline& operator = (const Timeline& rhs) = delete; Timeline& operator = (Timeline&& rhs) = default; template <typename Archiver> auto save(Archiver& ar) const { return ar(uid, origin, segments); } template <typename Archiver> auto load(Archiver& ar) { return ar(uid, origin, segments); } }; /** @private */ struct ProfileData { std::vector<Timeline> timelines; ProfileData() = default; ProfileData(const ProfileData& rhs) = delete; ProfileData(ProfileData&& rhs) = default; ProfileData& operator = (const ProfileData& rhs) = delete; ProfileData& operator = (ProfileData&&) = default; template <typename Archiver> auto save(Archiver& ar) const { return ar(timelines); } template <typename Archiver> auto load(Archiver& ar) { return ar(timelines); } }; // ---------------------------------------------------------------------------- // observer interface // ---------------------------------------------------------------------------- /** @class: ObserverInterface @brief class to derive an executor observer The tf::ObserverInterface class allows users to define custom methods to monitor the behaviors of an executor. This is particularly useful when you want to inspect the performance of an executor and visualize when each thread participates in the execution of a task. To prevent users from direct access to the internal threads and tasks, tf::ObserverInterface provides immutable wrappers, tf::WorkerView and tf::TaskView, over workers and tasks. Please refer to tf::WorkerView and tf::TaskView for details. Example usage: @code{.cpp} struct MyObserver : public tf::ObserverInterface { MyObserver(const std::string& name) { std::cout << "constructing observer " << name << '\n'; } void set_up(size_t num_workers) override final { std::cout << "setting up observer with " << num_workers << " workers\n"; } void on_entry(WorkerView w, tf::TaskView tv) override final { std::ostringstream oss; oss << "worker " << w.id() << " ready to run " << tv.name() << '\n'; std::cout << oss.str(); } void on_exit(WorkerView w, tf::TaskView tv) override final { std::ostringstream oss; oss << "worker " << w.id() << " finished running " << tv.name() << '\n'; std::cout << oss.str(); } }; tf::Taskflow taskflow; tf::Executor executor; // insert tasks into taskflow // ... // create a custom observer std::shared_ptr<MyObserver> observer = executor.make_observer<MyObserver>("MyObserver"); // run the taskflow executor.run(taskflow).wait(); @endcode */ class ObserverInterface { public: /** @brief virtual destructor */ virtual ~ObserverInterface() = default; /** @brief constructor-like method to call when the executor observer is fully created @param num_workers the number of the worker threads in the executor */ virtual void set_up(size_t num_workers) = 0; /** @brief method to call before a worker thread executes a closure @param wv an immutable view of this worker thread @param task_view a constant wrapper object to the task */ virtual void on_entry(WorkerView wv, TaskView task_view) = 0; /** @brief method to call after a worker thread executed a closure @param wv an immutable view of this worker thread @param task_view a constant wrapper object to the task */ virtual void on_exit(WorkerView wv, TaskView task_view) = 0; }; // ---------------------------------------------------------------------------- // ChromeObserver definition // ---------------------------------------------------------------------------- /** @class: ChromeObserver @brief class to create an observer based on Chrome tracing format A tf::ChromeObserver inherits tf::ObserverInterface and defines methods to dump the observed thread activities into a format that can be visualized through @ChromeTracing. @code{.cpp} tf::Taskflow taskflow; tf::Executor executor; // insert tasks into taskflow // ... // create a custom observer std::shared_ptr<tf::ChromeObserver> observer = executor.make_observer<tf::ChromeObserver>(); // run the taskflow executor.run(taskflow).wait(); // dump the thread activities to a chrome-tracing format. observer->dump(std::cout); @endcode */ class ChromeObserver : public ObserverInterface { friend class Executor; // data structure to record each task execution struct Segment { std::string name; observer_stamp_t beg; observer_stamp_t end; Segment( const std::string& n, observer_stamp_t b, observer_stamp_t e ); }; // data structure to store the entire execution timeline struct Timeline { observer_stamp_t origin; std::vector<std::vector<Segment>> segments; std::vector<std::stack<observer_stamp_t>> stacks; }; public: /** @brief dumps the timelines into a @ChromeTracing format through an output stream */ void dump(std::ostream& ostream) const; /** @brief dumps the timelines into a @ChromeTracing format */ inline std::string dump() const; /** @brief clears the timeline data */ inline void clear(); /** @brief queries the number of tasks observed */ inline size_t num_tasks() const; private: inline void set_up(size_t num_workers) override final; inline void on_entry(WorkerView w, TaskView task_view) override final; inline void on_exit(WorkerView w, TaskView task_view) override final; Timeline _timeline; }; // constructor inline ChromeObserver::Segment::Segment( const std::string& n, observer_stamp_t b, observer_stamp_t e ) : name {n}, beg {b}, end {e} { } // Procedure: set_up inline void ChromeObserver::set_up(size_t num_workers) { _timeline.segments.resize(num_workers); _timeline.stacks.resize(num_workers); for(size_t w=0; w<num_workers; ++w) { _timeline.segments[w].reserve(32); } _timeline.origin = observer_stamp_t::clock::now(); } // Procedure: on_entry inline void ChromeObserver::on_entry(WorkerView wv, TaskView) { _timeline.stacks[wv.id()].push(observer_stamp_t::clock::now()); } // Procedure: on_exit inline void ChromeObserver::on_exit(WorkerView wv, TaskView tv) { size_t w = wv.id(); assert(!_timeline.stacks[w].empty()); auto beg = _timeline.stacks[w].top(); _timeline.stacks[w].pop(); _timeline.segments[w].emplace_back( tv.name(), beg, observer_stamp_t::clock::now() ); } // Function: clear inline void ChromeObserver::clear() { for(size_t w=0; w<_timeline.segments.size(); ++w) { _timeline.segments[w].clear(); while(!_timeline.stacks[w].empty()) { _timeline.stacks[w].pop(); } } } // Procedure: dump inline void ChromeObserver::dump(std::ostream& os) const { using namespace std::chrono; size_t first; for(first = 0; first<_timeline.segments.size(); ++first) { if(_timeline.segments[first].size() > 0) { break; } } os << '['; for(size_t w=first; w<_timeline.segments.size(); w++) { if(w != first && _timeline.segments[w].size() > 0) { os << ','; } for(size_t i=0; i<_timeline.segments[w].size(); i++) { os << '{'<< "\"cat\":\"ChromeObserver\","; // name field os << "\"name\":\""; if(_timeline.segments[w][i].name.empty()) { os << w << '_' << i; } else { os << _timeline.segments[w][i].name; } os << "\","; // segment field os << "\"ph\":\"X\"," << "\"pid\":1," << "\"tid\":" << w << ',' << "\"ts\":" << duration_cast<microseconds>( _timeline.segments[w][i].beg - _timeline.origin ).count() << ',' << "\"dur\":" << duration_cast<microseconds>( _timeline.segments[w][i].end - _timeline.segments[w][i].beg ).count(); if(i != _timeline.segments[w].size() - 1) { os << "},"; } else { os << '}'; } } } os << "]\n"; } // Function: dump inline std::string ChromeObserver::dump() const { std::ostringstream oss; dump(oss); return oss.str(); } // Function: num_tasks inline size_t ChromeObserver::num_tasks() const { return std::accumulate( _timeline.segments.begin(), _timeline.segments.end(), size_t{0}, [](size_t sum, const auto& exe){ return sum + exe.size(); } ); } // ---------------------------------------------------------------------------- // TFProfObserver definition // ---------------------------------------------------------------------------- /** @class TFProfObserver @brief class to create an observer based on the built-in taskflow profiler format A tf::TFProfObserver inherits tf::ObserverInterface and defines methods to dump the observed thread activities into a format that can be visualized through @TFProf. @code{.cpp} tf::Taskflow taskflow; tf::Executor executor; // insert tasks into taskflow // ... // create a custom observer std::shared_ptr<tf::TFProfObserver> observer = executor.make_observer<tf::TFProfObserver>(); // run the taskflow executor.run(taskflow).wait(); // dump the thread activities to Taskflow Profiler format. observer->dump(std::cout); @endcode */ class TFProfObserver : public ObserverInterface { friend class Executor; friend class TFProfManager; /** @private overall task summary */ struct TaskSummary { size_t count {0}; size_t total_span {0}; size_t min_span; size_t max_span; float avg_span() const { return total_span * 1.0f / count; } }; /** @private worker summary at a level */ struct WorkerSummary { size_t id; size_t level; size_t count {0}; size_t total_span {0}; size_t min_span{0}; size_t max_span{0}; std::array<TaskSummary, TASK_TYPES.size()> tsum; float avg_span() const { return total_span * 1.0f / count; } //return count < 2 ? 0.0f : total_delay * 1.0f / (count-1); }; /** @private */ struct Summary { std::array<TaskSummary, TASK_TYPES.size()> tsum; std::vector<WorkerSummary> wsum; void dump_tsum(std::ostream&) const; void dump_wsum(std::ostream&) const; void dump(std::ostream&) const; }; public: /** @brief dumps the timelines into a @TFProf format through an output stream */ void dump(std::ostream& ostream) const; /** @brief dumps the timelines into a JSON string */ std::string dump() const; /** @brief shows the summary report through an output stream */ void summary(std::ostream& ostream) const; /** @brief returns the summary report in a string */ std::string summary() const; /** @brief clears the timeline data */ void clear(); /** @brief queries the number of tasks observed */ size_t num_tasks() const; /** @brief queries the number of observed workers */ size_t num_workers() const; private: Timeline _timeline; std::vector<std::stack<observer_stamp_t>> _stacks; inline void set_up(size_t num_workers) override final; inline void on_entry(WorkerView, TaskView) override final; inline void on_exit(WorkerView, TaskView) override final; }; // dump the task summary inline void TFProfObserver::Summary::dump_tsum(std::ostream& os) const { // task summary size_t type_w{10}, count_w{5}, time_w{9}, avg_w{8}, min_w{8}, max_w{8}; std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){ if(i.count == 0) return; count_w = std::max(count_w, std::to_string(i.count).size()); }); std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){ if(i.count == 0) return; time_w = std::max(time_w, std::to_string(i.total_span).size()); }); std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){ if(i.count == 0) return; avg_w = std::max(time_w, std::to_string(i.avg_span()).size()); }); std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){ if(i.count == 0) return; min_w = std::max(min_w, std::to_string(i.min_span).size()); }); std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){ if(i.count == 0) return; max_w = std::max(max_w, std::to_string(i.max_span).size()); }); os << std::setw(type_w) << "-Task-" << std::setw(count_w+2) << "Count" << std::setw(time_w+2) << "Time (us)" << std::setw(avg_w+2) << "Avg (us)" << std::setw(min_w+2) << "Min (us)" << std::setw(max_w+2) << "Max (us)" << '\n'; for(size_t i=0; i<TASK_TYPES.size(); i++) { if(tsum[i].count == 0) { continue; } os << std::setw(type_w) << to_string(TASK_TYPES[i]) << std::setw(count_w+2) << tsum[i].count << std::setw(time_w+2) << tsum[i].total_span << std::setw(avg_w+2) << std::to_string(tsum[i].avg_span()) << std::setw(min_w+2) << tsum[i].min_span << std::setw(max_w+2) << tsum[i].max_span << '\n'; } } // dump the worker summary inline void TFProfObserver::Summary::dump_wsum(std::ostream& os) const { // task summary size_t w_w{10}, t_w{10}, l_w{5}, c_w{5}, d_w{9}, avg_w{8}, min_w{8}, max_w{8}; std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){ if(i.count == 0) return; l_w = std::max(l_w, std::to_string(i.level).size()); }); std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){ if(i.count == 0) return; c_w = std::max(c_w, std::to_string(i.count).size()); }); std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){ if(i.count == 0) return; d_w = std::max(d_w, std::to_string(i.total_span).size()); }); std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){ if(i.count == 0) return; avg_w = std::max(avg_w, std::to_string(i.avg_span()).size()); }); std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){ if(i.count == 0) return; min_w = std::max(min_w, std::to_string(i.min_span).size()); }); std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){ if(i.count == 0) return; max_w = std::max(max_w, std::to_string(i.max_span).size()); }); os << std::setw(w_w) << "-Worker-" << std::setw(l_w+2) << "Level" << std::setw(t_w) << "Task" << std::setw(c_w+2) << "Count" << std::setw(d_w+2) << "Time (us)" << std::setw(avg_w+2) << "Avg (us)" << std::setw(min_w+2) << "Min (us)" << std::setw(max_w+2) << "Max (us)" << '\n'; for(const auto& ws : wsum) { if(ws.count == 0) { continue; } os << std::setw(w_w) << ws.id << std::setw(l_w+2) << ws.level; bool first = true; for(size_t i=0; i<TASK_TYPES.size(); i++) { if(ws.tsum[i].count == 0) { continue; } os << (first ? std::setw(t_w) : std::setw(w_w + l_w + 2 + t_w)); first = false; os << to_string(TASK_TYPES[i]) << std::setw(c_w+2) << ws.tsum[i].count << std::setw(d_w+2) << ws.tsum[i].total_span << std::setw(avg_w+2) << std::to_string(ws.tsum[i].avg_span()) << std::setw(min_w+2) << ws.tsum[i].min_span << std::setw(max_w+2) << ws.tsum[i].max_span << '\n'; } // per-worker summary os << std::setw(w_w + l_w + t_w + c_w + 4) << ws.count << std::setw(d_w+2) << ws.total_span << std::setw(avg_w+2) << std::to_string(ws.avg_span()) << std::setw(min_w+2) << ws.min_span << std::setw(max_w+2) << ws.max_span << '\n'; //for(size_t j=0; j<w_w+l_w+t_w+4; j++) os << ' '; //for(size_t j=0; j<c_w+d_w+avg_w+min_w+max_w+8; j++) os << '-'; //os <<'\n'; } } // dump the summary report through an ostream inline void TFProfObserver::Summary::dump(std::ostream& os) const { dump_tsum(os); os << '\n'; dump_wsum(os); } // Procedure: set_up inline void TFProfObserver::set_up(size_t num_workers) { _timeline.uid = unique_id<size_t>(); _timeline.origin = observer_stamp_t::clock::now(); _timeline.segments.resize(num_workers); _stacks.resize(num_workers); } // Procedure: on_entry inline void TFProfObserver::on_entry(WorkerView wv, TaskView) { _stacks[wv.id()].push(observer_stamp_t::clock::now()); } // Procedure: on_exit inline void TFProfObserver::on_exit(WorkerView wv, TaskView tv) { size_t w = wv.id(); assert(!_stacks[w].empty()); if(_stacks[w].size() > _timeline.segments[w].size()) { _timeline.segments[w].resize(_stacks[w].size()); } auto beg = _stacks[w].top(); _stacks[w].pop(); _timeline.segments[w][_stacks[w].size()].emplace_back( tv.name(), tv.type(), beg, observer_stamp_t::clock::now() ); } // Function: clear inline void TFProfObserver::clear() { for(size_t w=0; w<_timeline.segments.size(); ++w) { for(size_t l=0; l<_timeline.segments[w].size(); ++l) { _timeline.segments[w][l].clear(); } while(!_stacks[w].empty()) { _stacks[w].pop(); } } } // Procedure: dump inline void TFProfObserver::dump(std::ostream& os) const { using namespace std::chrono; size_t first; for(first = 0; first<_timeline.segments.size(); ++first) { if(_timeline.segments[first].size() > 0) { break; } } // not timeline data to dump if(first == _timeline.segments.size()) { os << "{}\n"; return; } os << "{\"executor\":\"" << _timeline.uid << "\",\"data\":["; bool comma = false; for(size_t w=first; w<_timeline.segments.size(); w++) { for(size_t l=0; l<_timeline.segments[w].size(); l++) { if(_timeline.segments[w][l].empty()) { continue; } if(comma) { os << ','; } else { comma = true; } os << "{\"worker\":" << w << ",\"level\":" << l << ",\"data\":["; for(size_t i=0; i<_timeline.segments[w][l].size(); ++i) { const auto& s = _timeline.segments[w][l][i]; if(i) os << ','; // span os << "{\"span\":[" << duration_cast<microseconds>(s.beg - _timeline.origin).count() << "," << duration_cast<microseconds>(s.end - _timeline.origin).count() << "],"; // name os << "\"name\":\""; if(s.name.empty()) { os << w << '_' << i; } else { os << s.name; } os << "\","; // e.g., category "type": "Condition Task" os << "\"type\":\"" << to_string(s.type) << "\""; os << "}"; } os << "]}"; } } os << "]}\n"; } // Function: dump inline std::string TFProfObserver::dump() const { std::ostringstream oss; dump(oss); return oss.str(); } // Procedure: summary inline void TFProfObserver::summary(std::ostream& os) const { using namespace std::chrono; Summary summary; std::optional<observer_stamp_t> view_beg, view_end; // find the first non-empty worker size_t first; for(first = 0; first<_timeline.segments.size(); ++first) { if(_timeline.segments[first].size() > 0) { break; } } // not timeline data to dump if(first == _timeline.segments.size()) { goto end_of_summary; } for(size_t w=first; w<_timeline.segments.size(); w++) { for(size_t l=0; l<_timeline.segments[w].size(); l++) { if(_timeline.segments[w][l].empty()) { continue; } // worker w at level l WorkerSummary ws; ws.id = w; ws.level = l; ws.count = _timeline.segments[w][l].size(); // scan all tasks at level l for(size_t i=0; i<_timeline.segments[w][l].size(); ++i) { // update the entire span auto& s = _timeline.segments[w][l][i]; view_beg = view_beg ? std::min(*view_beg, s.beg) : s.beg; view_end = view_end ? std::max(*view_end, s.end) : s.end; // update the task summary size_t t = duration_cast<microseconds>(s.end - s.beg).count(); auto& x = summary.tsum[static_cast<int>(s.type)]; x.count += 1; x.total_span += t; x.min_span = (x.count == 1) ? t : std::min(t, x.min_span); x.max_span = (x.count == 1) ? t : std::max(t, x.max_span); // update the worker summary ws.total_span += t; ws.min_span = (i == 0) ? t : std::min(t, ws.min_span); ws.max_span = (i == 0) ? t : std::max(t, ws.max_span); auto&y = ws.tsum[static_cast<int>(s.type)]; y.count += 1; y.total_span += t; y.min_span = (y.count == 1) ? t : std::min(t, y.min_span); y.max_span = (y.count == 1) ? t : std::max(t, y.max_span); // update the delay //if(i) { // size_t d = duration_cast<nanoseconds>( // s.beg - _timeline.segments[w][l][i-1].end // ).count(); // ws.total_delay += d; // ws.min_delay = (i == 1) ? d : std::min(ws.min_delay, d); // ws.max_delay = (i == 1) ? d : std::max(ws.max_delay, d); //} } summary.wsum.push_back(ws); } } end_of_summary: size_t view = 0; if(view_beg && view_end) { view = duration_cast<microseconds>(*view_end - *view_beg).count(); } os << "==Observer " << _timeline.uid << ": " << num_workers() << " workers completed " << num_tasks() << " tasks in " << view << " us\n"; summary.dump(os); } // Procedure: summary inline std::string TFProfObserver::summary() const { std::ostringstream oss; summary(oss); return oss.str(); } // Function: num_tasks inline size_t TFProfObserver::num_tasks() const { size_t s = 0; for(size_t w=0; w<_timeline.segments.size(); ++w) { for(size_t l=0; l<_timeline.segments[w].size(); ++l) { s += _timeline.segments[w][l].size(); } } return s; } // Function: num_workers inline size_t TFProfObserver::num_workers() const { size_t w = 0; for(size_t i=0; i<_timeline.segments.size(); ++i) { w += (!_timeline.segments[i].empty()); } return w; } // ---------------------------------------------------------------------------- // TFProfManager // ---------------------------------------------------------------------------- /** @private */ class TFProfManager { friend class Executor; public: ~TFProfManager(); TFProfManager(const TFProfManager&) = delete; TFProfManager& operator=(const TFProfManager&) = delete; static TFProfManager& get(); void dump(std::ostream& ostream) const; private: const std::string _fpath; std::mutex _mutex; std::vector<std::shared_ptr<TFProfObserver>> _observers; TFProfManager(); void _manage(std::shared_ptr<TFProfObserver> observer); }; // constructor inline TFProfManager::TFProfManager() : _fpath {get_env(TF_ENABLE_PROFILER)} { } // Procedure: manage inline void TFProfManager::_manage(std::shared_ptr<TFProfObserver> observer) { std::lock_guard lock(_mutex); _observers.push_back(std::move(observer)); } // Procedure: dump inline void TFProfManager::dump(std::ostream& os) const { for(size_t i=0; i<_observers.size(); ++i) { if(i) os << ','; _observers[i]->dump(os); } } // Destructor inline TFProfManager::~TFProfManager() { std::ofstream ofs(_fpath); if(ofs) { // .tfp if(_fpath.rfind(".tfp") != std::string::npos) { ProfileData data; data.timelines.reserve(_observers.size()); for(size_t i=0; i<_observers.size(); ++i) { data.timelines.push_back(std::move(_observers[i]->_timeline)); } Serializer<std::ofstream> serializer(ofs); serializer(data); } // .json else { // if(_fpath.rfind(".json") != std::string::npos) { ofs << "[\n"; for(size_t i=0; i<_observers.size(); ++i) { if(i) ofs << ','; _observers[i]->dump(ofs); } ofs << "]\n"; } } // do a summary report in stderr for each observer else { std::ostringstream oss; for(size_t i=0; i<_observers.size(); ++i) { _observers[i]->summary(oss); } fprintf(stderr, "%s", oss.str().c_str()); } } // Function: get inline TFProfManager& TFProfManager::get() { static TFProfManager mgr; return mgr; } // ---------------------------------------------------------------------------- // Identifier for Each Built-in Observer // ---------------------------------------------------------------------------- /** @enum ObserverType @brief enumeration of all observer types */ enum class ObserverType : int { TFPROF = 0, CHROME, UNDEFINED }; /** @brief convert an observer type to a human-readable string */ inline const char* to_string(ObserverType type) { switch(type) { case ObserverType::TFPROF: return "tfprof"; case ObserverType::CHROME: return "chrome"; default: return "undefined"; } } } // end of namespace tf -----------------------------------------------------
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/notifier.hpp
// 2019/02/09 - created by Tsung-Wei Huang // - modified the event count from Eigen #pragma once #include <iostream> #include <vector> #include <cstdlib> #include <cstdio> #include <atomic> #include <memory> #include <deque> #include <mutex> #include <condition_variable> #include <thread> #include <algorithm> #include <numeric> #include <cassert> // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Dmitry Vyukov <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. namespace tf { // Notifier allows to wait for arbitrary predicates in non-blocking // algorithms. Think of condition variable, but wait predicate does not need to // be protected by a mutex. Usage: // Waiting thread does: // // if (predicate) // return act(); // Notifier::Waiter& w = waiters[my_index]; // ec.prepare_wait(&w); // if (predicate) { // ec.cancel_wait(&w); // return act(); // } // ec.commit_wait(&w); // // Notifying thread does: // // predicate = true; // ec.notify(true); // // notify is cheap if there are no waiting threads. prepare_wait/commit_wait are not // cheap, but they are executed only if the preceeding predicate check has // failed. // // Algorihtm outline: // There are two main variables: predicate (managed by user) and _state. // Operation closely resembles Dekker mutual algorithm: // https://en.wikipedia.org/wiki/Dekker%27s_algorithm // Waiting thread sets _state then checks predicate, Notifying thread sets // predicate then checks _state. Due to seq_cst fences in between these // operations it is guaranteed than either waiter will see predicate change // and won't block, or notifying thread will see _state change and will unblock // the waiter, or both. But it can't happen that both threads don't see each // other changes, which would lead to deadlock. class Notifier { friend class Executor; public: struct Waiter { std::atomic<Waiter*> next; std::mutex mu; std::condition_variable cv; uint64_t epoch; unsigned state; enum { kNotSignaled, kWaiting, kSignaled, }; }; explicit Notifier(size_t N) : _waiters{N} { assert(_waiters.size() < (1 << kWaiterBits) - 1); // Initialize epoch to something close to overflow to test overflow. _state = kStackMask | (kEpochMask - kEpochInc * _waiters.size() * 2); } ~Notifier() { // Ensure there are no waiters. assert((_state.load() & (kStackMask | kWaiterMask)) == kStackMask); } // prepare_wait prepares for waiting. // After calling this function the thread must re-check the wait predicate // and call either cancel_wait or commit_wait passing the same Waiter object. void prepare_wait(Waiter* w) { w->epoch = _state.fetch_add(kWaiterInc, std::memory_order_relaxed); std::atomic_thread_fence(std::memory_order_seq_cst); } // commit_wait commits waiting. void commit_wait(Waiter* w) { w->state = Waiter::kNotSignaled; // Modification epoch of this waiter. uint64_t epoch = (w->epoch & kEpochMask) + (((w->epoch & kWaiterMask) >> kWaiterShift) << kEpochShift); uint64_t state = _state.load(std::memory_order_seq_cst); for (;;) { if (int64_t((state & kEpochMask) - epoch) < 0) { // The preceeding waiter has not decided on its fate. Wait until it // calls either cancel_wait or commit_wait, or is notified. std::this_thread::yield(); state = _state.load(std::memory_order_seq_cst); continue; } // We've already been notified. if (int64_t((state & kEpochMask) - epoch) > 0) return; // Remove this thread from prewait counter and add it to the waiter list. assert((state & kWaiterMask) != 0); uint64_t newstate = state - kWaiterInc + kEpochInc; //newstate = (newstate & ~kStackMask) | (w - &_waiters[0]); newstate = static_cast<uint64_t>((newstate & ~kStackMask) | static_cast<uint64_t>(w - &_waiters[0])); if ((state & kStackMask) == kStackMask) w->next.store(nullptr, std::memory_order_relaxed); else w->next.store(&_waiters[state & kStackMask], std::memory_order_relaxed); if (_state.compare_exchange_weak(state, newstate, std::memory_order_release)) break; } _park(w); } // cancel_wait cancels effects of the previous prepare_wait call. void cancel_wait(Waiter* w) { uint64_t epoch = (w->epoch & kEpochMask) + (((w->epoch & kWaiterMask) >> kWaiterShift) << kEpochShift); uint64_t state = _state.load(std::memory_order_relaxed); for (;;) { if (int64_t((state & kEpochMask) - epoch) < 0) { // The preceeding waiter has not decided on its fate. Wait until it // calls either cancel_wait or commit_wait, or is notified. std::this_thread::yield(); state = _state.load(std::memory_order_relaxed); continue; } // We've already been notified. if (int64_t((state & kEpochMask) - epoch) > 0) return; // Remove this thread from prewait counter. assert((state & kWaiterMask) != 0); if (_state.compare_exchange_weak(state, state - kWaiterInc + kEpochInc, std::memory_order_relaxed)) return; } } // notify wakes one or all waiting threads. // Must be called after changing the associated wait predicate. void notify(bool all) { std::atomic_thread_fence(std::memory_order_seq_cst); uint64_t state = _state.load(std::memory_order_acquire); for (;;) { // Easy case: no waiters. if ((state & kStackMask) == kStackMask && (state & kWaiterMask) == 0) return; uint64_t waiters = (state & kWaiterMask) >> kWaiterShift; uint64_t newstate; if (all) { // Reset prewait counter and empty wait list. newstate = (state & kEpochMask) + (kEpochInc * waiters) + kStackMask; } else if (waiters) { // There is a thread in pre-wait state, unblock it. newstate = state + kEpochInc - kWaiterInc; } else { // Pop a waiter from list and unpark it. Waiter* w = &_waiters[state & kStackMask]; Waiter* wnext = w->next.load(std::memory_order_relaxed); uint64_t next = kStackMask; //if (wnext != nullptr) next = wnext - &_waiters[0]; if (wnext != nullptr) next = static_cast<uint64_t>(wnext - &_waiters[0]); // Note: we don't add kEpochInc here. ABA problem on the lock-free stack // can't happen because a waiter is re-pushed onto the stack only after // it was in the pre-wait state which inevitably leads to epoch // increment. newstate = (state & kEpochMask) + next; } if (_state.compare_exchange_weak(state, newstate, std::memory_order_acquire)) { if (!all && waiters) return; // unblocked pre-wait thread if ((state & kStackMask) == kStackMask) return; Waiter* w = &_waiters[state & kStackMask]; if (!all) w->next.store(nullptr, std::memory_order_relaxed); _unpark(w); return; } } } // notify n workers void notify_n(size_t n) { if(n >= _waiters.size()) { notify(true); } else { for(size_t k=0; k<n; ++k) { notify(false); } } } size_t size() const { return _waiters.size(); } private: // State_ layout: // - low kStackBits is a stack of waiters committed wait. // - next kWaiterBits is count of waiters in prewait state. // - next kEpochBits is modification counter. static const uint64_t kStackBits = 16; static const uint64_t kStackMask = (1ull << kStackBits) - 1; static const uint64_t kWaiterBits = 16; static const uint64_t kWaiterShift = 16; static const uint64_t kWaiterMask = ((1ull << kWaiterBits) - 1) << kWaiterShift; static const uint64_t kWaiterInc = 1ull << kWaiterBits; static const uint64_t kEpochBits = 32; static const uint64_t kEpochShift = 32; static const uint64_t kEpochMask = ((1ull << kEpochBits) - 1) << kEpochShift; static const uint64_t kEpochInc = 1ull << kEpochShift; std::atomic<uint64_t> _state; std::vector<Waiter> _waiters; void _park(Waiter* w) { std::unique_lock<std::mutex> lock(w->mu); while (w->state != Waiter::kSignaled) { w->state = Waiter::kWaiting; w->cv.wait(lock); } } void _unpark(Waiter* waiters) { Waiter* next = nullptr; for (Waiter* w = waiters; w; w = next) { next = w->next.load(std::memory_order_relaxed); unsigned state; { std::unique_lock<std::mutex> lock(w->mu); state = w->state; w->state = Waiter::kSignaled; } // Avoid notifying if it wasn't waiting. if (state == Waiter::kWaiting) w->cv.notify_one(); } } }; } // namespace tf ------------------------------------------------------------
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/topology.hpp
#pragma once namespace tf { // ---------------------------------------------------------------------------- // class: TopologyBase class TopologyBase { friend class Executor; friend class Node; template <typename T> friend class Future; protected: std::atomic<bool> _is_cancelled { false }; }; // ---------------------------------------------------------------------------- // class: AsyncTopology class AsyncTopology : public TopologyBase { }; // ---------------------------------------------------------------------------- // class: Topology class Topology : public TopologyBase { friend class Executor; friend class Runtime; public: template <typename P, typename C> Topology(Taskflow&, P&&, C&&); private: Taskflow& _taskflow; std::promise<void> _promise; SmallVector<Node*> _sources; std::function<bool()> _pred; std::function<void()> _call; std::atomic<size_t> _join_counter {0}; }; // Constructor template <typename P, typename C> Topology::Topology(Taskflow& tf, P&& p, C&& c): _taskflow(tf), _pred {std::forward<P>(p)}, _call {std::forward<C>(c)} { } } // end of namespace tf. ----------------------------------------------------
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/semaphore.hpp
#pragma once #include <vector> #include <mutex> #include "declarations.hpp" /** @file semaphore.hpp @brief semaphore include file */ namespace tf { // ---------------------------------------------------------------------------- // Semaphore // ---------------------------------------------------------------------------- /** @class Semaphore @brief class to create a semophore object for building a concurrency constraint A semaphore creates a constraint that limits the maximum concurrency, i.e., the number of workers, in a set of tasks. You can let a task acquire/release one or multiple semaphores before/after executing its work. A task can acquire and release a semaphore, or just acquire or just release it. A tf::Semaphore object starts with an initial count. As long as that count is above 0, tasks can acquire the semaphore and do their work. If the count is 0 or less, a task trying to acquire the semaphore will not run but goes to a waiting list of that semaphore. When the semaphore is released by another task, it reschedules all tasks on that waiting list. @code{.cpp} tf::Executor executor(8); // create an executor of 8 workers tf::Taskflow taskflow; tf::Semaphore semaphore(1); // create a semaphore with initial count 1 std::vector<tf::Task> tasks { taskflow.emplace([](){ std::cout << "A" << std::endl; }), taskflow.emplace([](){ std::cout << "B" << std::endl; }), taskflow.emplace([](){ std::cout << "C" << std::endl; }), taskflow.emplace([](){ std::cout << "D" << std::endl; }), taskflow.emplace([](){ std::cout << "E" << std::endl; }) }; for(auto & task : tasks) { // each task acquires and release the semaphore task.acquire(semaphore); task.release(semaphore); } executor.run(taskflow).wait(); @endcode The above example creates five tasks with no dependencies between them. Under normal circumstances, the five tasks would be executed concurrently. However, this example has a semaphore with initial count 1, and all tasks need to acquire that semaphore before running and release that semaphore after they are done. This arrangement limits the number of concurrently running tasks to only one. */ class Semaphore { friend class Node; public: /** @brief constructs a semaphore with the given counter A semaphore creates a constraint that limits the maximum concurrency, i.e., the number of workers, in a set of tasks. @code{.cpp} tf::Semaphore semaphore(4); // concurrency constraint of 4 workers @endcode */ explicit Semaphore(size_t max_workers); /** @brief queries the counter value (not thread-safe during the run) */ size_t count() const; private: std::mutex _mtx; size_t _counter; std::vector<Node*> _waiters; bool _try_acquire_or_wait(Node*); std::vector<Node*> _release(); }; inline Semaphore::Semaphore(size_t max_workers) : _counter(max_workers) { } inline bool Semaphore::_try_acquire_or_wait(Node* me) { std::lock_guard<std::mutex> lock(_mtx); if(_counter > 0) { --_counter; return true; } else { _waiters.push_back(me); return false; } } inline std::vector<Node*> Semaphore::_release() { std::lock_guard<std::mutex> lock(_mtx); ++_counter; std::vector<Node*> r{std::move(_waiters)}; return r; } inline size_t Semaphore::count() const { return _counter; } } // end of namespace tf. ---------------------------------------------------
hpp
oneAPI-samples
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_simpleCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/taskflow.hpp
#pragma once #include "flow_builder.hpp" /** @file taskflow/core/taskflow.hpp @brief taskflow include file */ namespace tf { // ---------------------------------------------------------------------------- /** @class Taskflow @brief class to create a taskflow object A %taskflow manages a task dependency graph where each task represents a callable object (e.g., @std_lambda, @std_function) and an edge represents a dependency between two tasks. A task is one of the following types: 1. static task : the callable constructible from @c std::function<void()> 2. dynamic task : the callable constructible from @c std::function<void(tf::Subflow&)> 3. condition task : the callable constructible from @c std::function<int()> 4. multi-condition task: the callable constructible from @c %std::function<tf::SmallVector<int>()> 5. module task : the task constructed from tf::Taskflow::composed_of 6. runtime task : the callable constructible from @c std::function<void(tf::Runtime&)> 7. %cudaFlow task : the callable constructible from @c std::function<void(tf::cudaFlow&)> or @c std::function<void(tf::cudaFlowCapturer&)> 8. %syclFlow task : the callable constructible from @c std::function<void(tf::syclFlow&)> Each task is a basic computation unit and is run by one worker thread from an executor. The following example creates a simple taskflow graph of four static tasks, @c A, @c B, @c C, and @c D, where @c A runs before @c B and @c C and @c D runs after @c B and @c C. @code{.cpp} tf::Executor executor; tf::Taskflow taskflow("simple"); tf::Task A = taskflow.emplace([](){ std::cout << "TaskA\n"; }); tf::Task B = taskflow.emplace([](){ std::cout << "TaskB\n"; }); tf::Task C = taskflow.emplace([](){ std::cout << "TaskC\n"; }); tf::Task D = taskflow.emplace([](){ std::cout << "TaskD\n"; }); A.precede(B, C); // A runs before B and C D.succeed(B, C); // D runs after B and C executor.run(taskflow).wait(); @endcode The taskflow object itself is NOT thread-safe. You should not modifying the graph while it is running, such as adding new tasks, adding new dependencies, and moving the taskflow to another. To minimize the overhead of task creation, our runtime leverages a global object pool to recycle tasks in a thread-safe manner. Please refer to @ref Cookbook to learn more about each task type and how to submit a taskflow to an executor. */ class Taskflow : public FlowBuilder { friend class Topology; friend class Executor; friend class FlowBuilder; struct Dumper { size_t id; std::stack<std::pair<const Node*, const Graph*>> stack; std::unordered_map<const Graph*, size_t> visited; }; public: /** @brief constructs a taskflow with the given name @code{.cpp} tf::Taskflow taskflow("My Taskflow"); std::cout << taskflow.name(); // "My Taskflow" @endcode */ Taskflow(const std::string& name); /** @brief constructs a taskflow */ Taskflow(); /** @brief constructs a taskflow from a moved taskflow Constructing a taskflow @c taskflow1 from a moved taskflow @c taskflow2 will migrate the graph of @c taskflow2 to @c taskflow1. After the move, @c taskflow2 will become empty. @code{.cpp} tf::Taskflow taskflow1(std::move(taskflow2)); assert(taskflow2.empty()); @endcode Notice that @c taskflow2 should not be running in an executor during the move operation, or the behavior is undefined. */ Taskflow(Taskflow&& rhs); /** @brief move assignment operator Moving a taskflow @c taskflow2 to another taskflow @c taskflow1 will destroy the existing graph of @c taskflow1 and assign it the graph of @c taskflow2. After the move, @c taskflow2 will become empty. @code{.cpp} taskflow1 = std::move(taskflow2); assert(taskflow2.empty()); @endcode Notice that both @c taskflow1 and @c taskflow2 should not be running in an executor during the move operation, or the behavior is undefined. */ Taskflow& operator = (Taskflow&& rhs); /** @brief default destructor When the destructor is called, all tasks and their associated data (e.g., captured data) will be destroyed. It is your responsibility to ensure all submitted execution of this taskflow have completed before destroying it. For instance, the following code results in undefined behavior since the executor may still be running the taskflow while it is destroyed after the block. @code{.cpp} { tf::Taskflow taskflow; executor.run(taskflow); } @endcode To fix the problem, we must wait for the execution to complete before destroying the taskflow. @code{.cpp} { tf::Taskflow taskflow; executor.run(taskflow).wait(); } @endcode */ ~Taskflow() = default; /** @brief dumps the taskflow to a DOT format through a std::ostream target @code{.cpp} taskflow.dump(std::cout); // dump the graph to the standard output std::ofstream ofs("output.dot"); taskflow.dump(ofs); // dump the graph to the file output.dot @endcode For dynamically spawned tasks, such as module tasks, subflow tasks, and GPU tasks, you need to run the taskflow first before you can dump the entire graph. @code{.cpp} tf::Task parent = taskflow.emplace([](tf::Subflow sf){ sf.emplace([](){ std::cout << "child\n"; }); }); taskflow.dump(std::cout); // this dumps only the parent tasks executor.run(taskflow).wait(); taskflow.dump(std::cout); // this dumps both parent and child tasks @endcode */ void dump(std::ostream& ostream) const; /** @brief dumps the taskflow to a std::string of DOT format This method is similar to tf::Taskflow::dump(std::ostream& ostream), but returning a string of the graph in DOT format. */ std::string dump() const; /** @brief queries the number of tasks */ size_t num_tasks() const; /** @brief queries the emptiness of the taskflow An empty taskflow has no tasks. That is the return of tf::Taskflow::num_tasks is zero. */ bool empty() const; /** @brief assigns a name to the taskflow @code{.cpp} taskflow.name("assign another name"); @endcode */ void name(const std::string&); /** @brief queries the name of the taskflow @code{.cpp} std::cout << "my name is: " << taskflow.name(); @endcode */ const std::string& name() const; /** @brief clears the associated task dependency graph When you clear a taskflow, all tasks and their associated data (e.g., captured data in task callables) will be destroyed. The behavior of clearing a running taskflow is undefined. */ void clear(); /** @brief applies a visitor to each task in the taskflow A visitor is a callable that takes an argument of type tf::Task and returns nothing. The following example iterates each task in a taskflow and prints its name: @code{.cpp} taskflow.for_each_task([](tf::Task task){ std::cout << task.name() << '\n'; }); @endcode */ template <typename V> void for_each_task(V&& visitor) const; /** @brief returns a reference to the underlying graph object A graph object (of type tf::Graph) is the ultimate storage for the task dependency graph and should only be used as an opaque data structure to interact with the executor (e.g., composition). */ Graph& graph(); private: mutable std::mutex _mutex; std::string _name; Graph _graph; std::queue<std::shared_ptr<Topology>> _topologies; std::optional<std::list<Taskflow>::iterator> _satellite; void _dump(std::ostream&, const Graph*) const; void _dump(std::ostream&, const Node*, Dumper&) const; void _dump(std::ostream&, const Graph*, Dumper&) const; }; // Constructor inline Taskflow::Taskflow(const std::string& name) : FlowBuilder {_graph}, _name {name} { } // Constructor inline Taskflow::Taskflow() : FlowBuilder{_graph} { } // Move constructor inline Taskflow::Taskflow(Taskflow&& rhs) : FlowBuilder{_graph} { std::scoped_lock<std::mutex> lock(rhs._mutex); _name = std::move(rhs._name); _graph = std::move(rhs._graph); _topologies = std::move(rhs._topologies); _satellite = rhs._satellite; rhs._satellite.reset(); } // Move assignment inline Taskflow& Taskflow::operator = (Taskflow&& rhs) { if(this != &rhs) { std::scoped_lock<std::mutex, std::mutex> lock(_mutex, rhs._mutex); _name = std::move(rhs._name); _graph = std::move(rhs._graph); _topologies = std::move(rhs._topologies); _satellite = rhs._satellite; rhs._satellite.reset(); } return *this; } // Procedure: inline void Taskflow::clear() { _graph._clear(); } // Function: num_tasks inline size_t Taskflow::num_tasks() const { return _graph.size(); } // Function: empty inline bool Taskflow::empty() const { return _graph.empty(); } // Function: name inline void Taskflow::name(const std::string &name) { _name = name; } // Function: name inline const std::string& Taskflow::name() const { return _name; } // Function: graph inline Graph& Taskflow::graph() { return _graph; } // Function: for_each_task template <typename V> void Taskflow::for_each_task(V&& visitor) const { for(size_t i=0; i<_graph._nodes.size(); ++i) { visitor(Task(_graph._nodes[i])); } } // Procedure: dump inline std::string Taskflow::dump() const { std::ostringstream oss; dump(oss); return oss.str(); } // Function: dump inline void Taskflow::dump(std::ostream& os) const { os << "digraph Taskflow {\n"; _dump(os, &_graph); os << "}\n"; } // Procedure: _dump inline void Taskflow::_dump(std::ostream& os, const Graph* top) const { Dumper dumper; dumper.id = 0; dumper.stack.push({nullptr, top}); dumper.visited[top] = dumper.id++; while(!dumper.stack.empty()) { auto [p, f] = dumper.stack.top(); dumper.stack.pop(); os << "subgraph cluster_p" << f << " {\nlabel=\""; // n-level module if(p) { os << 'm' << dumper.visited[f]; } // top-level taskflow graph else { os << "Taskflow: "; if(_name.empty()) os << 'p' << this; else os << _name; } os << "\";\n"; _dump(os, f, dumper); os << "}\n"; } } // Procedure: _dump inline void Taskflow::_dump( std::ostream& os, const Node* node, Dumper& dumper ) const { os << 'p' << node << "[label=\""; if(node->_name.empty()) os << 'p' << node; else os << node->_name; os << "\" "; // shape for node switch(node->_handle.index()) { case Node::CONDITION: case Node::MULTI_CONDITION: os << "shape=diamond color=black fillcolor=aquamarine style=filled"; break; case Node::RUNTIME: os << "shape=component"; break; case Node::CUDAFLOW: os << " style=\"filled\"" << " color=\"black\" fillcolor=\"purple\"" << " fontcolor=\"white\"" << " shape=\"folder\""; break; case Node::SYCLFLOW: os << " style=\"filled\"" << " color=\"black\" fillcolor=\"red\"" << " fontcolor=\"white\"" << " shape=\"folder\""; break; default: break; } os << "];\n"; for(size_t s=0; s<node->_successors.size(); ++s) { if(node->_is_conditioner()) { // case edge is dashed os << 'p' << node << " -> p" << node->_successors[s] << " [style=dashed label=\"" << s << "\"];\n"; } else { os << 'p' << node << " -> p" << node->_successors[s] << ";\n"; } } // subflow join node if(node->_parent && node->_parent->_handle.index() == Node::DYNAMIC && node->_successors.size() == 0 ) { os << 'p' << node << " -> p" << node->_parent << ";\n"; } // node info switch(node->_handle.index()) { case Node::DYNAMIC: { auto& sbg = std::get_if<Node::Dynamic>(&node->_handle)->subgraph; if(!sbg.empty()) { os << "subgraph cluster_p" << node << " {\nlabel=\"Subflow: "; if(node->_name.empty()) os << 'p' << node; else os << node->_name; os << "\";\n" << "color=blue\n"; _dump(os, &sbg, dumper); os << "}\n"; } } break; case Node::CUDAFLOW: { std::get_if<Node::cudaFlow>(&node->_handle)->graph->dump( os, node, node->_name ); } break; case Node::SYCLFLOW: { std::get_if<Node::syclFlow>(&node->_handle)->graph->dump( os, node, node->_name ); } break; default: break; } } // Procedure: _dump inline void Taskflow::_dump( std::ostream& os, const Graph* graph, Dumper& dumper ) const { for(const auto& n : graph->_nodes) { // regular task if(n->_handle.index() != Node::MODULE) { _dump(os, n, dumper); } // module task else { //auto module = &(std::get_if<Node::Module>(&n->_handle)->module); auto module = &(std::get_if<Node::Module>(&n->_handle)->graph); os << 'p' << n << "[shape=box3d, color=blue, label=\""; if(n->_name.empty()) os << 'p' << n; else os << n->_name; if(dumper.visited.find(module) == dumper.visited.end()) { dumper.visited[module] = dumper.id++; dumper.stack.push({n, module}); } os << " [m" << dumper.visited[module] << "]\"];\n"; for(const auto s : n->_successors) { os << 'p' << n << "->" << 'p' << s << ";\n"; } } } } // ---------------------------------------------------------------------------- // class definition: Future // ---------------------------------------------------------------------------- /** @class Future @brief class to access the result of an execution tf::Future is a derived class from std::future that will eventually hold the execution result of a submitted taskflow (tf::Executor::run) or an asynchronous task (tf::Executor::async, tf::Executor::silent_async). In addition to the base methods inherited from std::future, you can call tf::Future::cancel to cancel the execution of the running taskflow associated with this future object. The following example cancels a submission of a taskflow that contains 1000 tasks each running one second. @code{.cpp} tf::Executor executor; tf::Taskflow taskflow; for(int i=0; i<1000; i++) { taskflow.emplace([](){ std::this_thread::sleep_for(std::chrono::seconds(1)); }); } // submit the taskflow tf::Future fu = executor.run(taskflow); // request to cancel the submitted execution above fu.cancel(); // wait until the cancellation finishes fu.get(); @endcode */ template <typename T> class Future : public std::future<T> { friend class Executor; friend class Subflow; using handle_t = std::variant< std::monostate, std::weak_ptr<Topology>, std::weak_ptr<AsyncTopology> >; // variant index constexpr static auto ASYNC = get_index_v<std::weak_ptr<AsyncTopology>, handle_t>; constexpr static auto TASKFLOW = get_index_v<std::weak_ptr<Topology>, handle_t>; public: /** @brief default constructor */ Future() = default; /** @brief disabled copy constructor */ Future(const Future&) = delete; /** @brief default move constructor */ Future(Future&&) = default; /** @brief disabled copy assignment */ Future& operator = (const Future&) = delete; /** @brief default move assignment */ Future& operator = (Future&&) = default; /** @brief cancels the execution of the running taskflow associated with this future object @return @c true if the execution can be cancelled or @c false if the execution has already completed When you request a cancellation, the executor will stop scheduling any tasks onwards. Tasks that are already running will continue to finish (non-preemptive). You can call tf::Future::wait to wait for the cancellation to complete. */ bool cancel(); private: handle_t _handle; template <typename P> Future(std::future<T>&&, P&&); }; template <typename T> template <typename P> Future<T>::Future(std::future<T>&& fu, P&& p) : std::future<T> {std::move(fu)}, _handle {std::forward<P>(p)} { } // Function: cancel template <typename T> bool Future<T>::cancel() { return std::visit([](auto&& arg){ using P = std::decay_t<decltype(arg)>; if constexpr(std::is_same_v<P, std::monostate>) { return false; } else { auto ptr = arg.lock(); if(ptr) { ptr->_is_cancelled.store(true, std::memory_order_relaxed); return true; } return false; } }, _handle); } } // end of namespace tf. ---------------------------------------------------
hpp