repo_name
stringclasses
10 values
file_path
stringlengths
29
222
content
stringlengths
24
926k
extention
stringclasses
5 values
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/src/common/gui/gdivideo.cpp
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= // common Windows parts #include "winvideo.h" // include GDI+ headers #include <gdiplus.h> // and another headers #include <stdio.h> // tag linking library #pragma comment(lib, "gdiplus.lib") // global specific variables Gdiplus::Bitmap * g_pBitmap; // main drawing bitmap ULONG_PTR gdiplusToken; Gdiplus::GdiplusStartupInput gdiplusStartupInput;// GDI+ //! display system error bool DisplayError(LPSTR lpstrErr, HRESULT hres) { static bool InError = false; int retval = 0; if (!InError) { InError = true; LPCSTR lpMsgBuf; if(!hres) hres = GetLastError(); FormatMessage( FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, hres, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPTSTR) &lpMsgBuf, 0, NULL ); retval = MessageBox(g_hAppWnd, lpstrErr, lpMsgBuf, MB_OK|MB_ICONERROR); LocalFree( (HLOCAL)lpMsgBuf ); InError = false; } return false; } //! Win event processing function LRESULT CALLBACK InternalWndProc(HWND hwnd, UINT iMsg, WPARAM wParam, LPARAM lParam) { switch (iMsg) { case WM_MOVE: // Check to make sure our window exists before we tell it to repaint. // This will fail the first time (while the window is being created). if (hwnd) { InvalidateRect(hwnd, NULL, FALSE); UpdateWindow(hwnd); } return 0L; case WM_PAINT: { PAINTSTRUCT ps; Gdiplus::Graphics graphics( BeginPaint(hwnd, &ps) ); // redraw just requested area. This call is as fast as simple DrawImage() call. if(g_video->updating) graphics.DrawImage(g_pBitmap, ps.rcPaint.left, ps.rcPaint.top, ps.rcPaint.left, ps.rcPaint.top, ps.rcPaint.right, ps.rcPaint.bottom, Gdiplus::UnitPixel); EndPaint(hwnd, &ps); } return 0L; // Proccess all mouse and keyboard events case WM_LBUTTONDOWN: g_video->on_mouse( (int)LOWORD(lParam), (int)HIWORD(lParam), 1); break; case WM_LBUTTONUP: g_video->on_mouse( (int)LOWORD(lParam), (int)HIWORD(lParam), -1); break; case WM_RBUTTONDOWN: g_video->on_mouse( (int)LOWORD(lParam), (int)HIWORD(lParam), 2); break; case WM_RBUTTONUP: g_video->on_mouse( (int)LOWORD(lParam), (int)HIWORD(lParam), -2); break; case WM_MBUTTONDOWN: g_video->on_mouse( (int)LOWORD(lParam), (int)HIWORD(lParam), 3); break; case WM_MBUTTONUP: g_video->on_mouse( (int)LOWORD(lParam), (int)HIWORD(lParam), -3); break; case WM_CHAR: g_video->on_key( (int)wParam); break; // some useless stuff case WM_ERASEBKGND: return 1; // keeps erase-background events from happening, reduces chop case WM_DISPLAYCHANGE: return 0; // Now, shut down the window... case WM_DESTROY: PostQuitMessage(0); return 0; } // call user defined proc, if exists return g_pUserProc? g_pUserProc(hwnd, iMsg, wParam, lParam) : DefWindowProc(hwnd, iMsg, wParam, lParam); } ///////////// video functions //////////////// bool video::init_window(int sizex, int sizey) { assert(win_hInstance != 0); g_sizex = sizex; g_sizey = sizey; if (!WinInit(win_hInstance, win_iCmdShow, gWndClass, title, true)) { DisplayError("Unable to initialize the program's window."); return false; } ShowWindow(g_hAppWnd, SW_SHOW); Gdiplus::GdiplusStartup(&gdiplusToken, &gdiplusStartupInput, NULL); g_pImg = new unsigned int[sizex*sizey]; g_pBitmap = new Gdiplus::Bitmap(g_sizex, g_sizey, 4*g_sizex, PixelFormat32bppRGB, (BYTE*)g_pImg ); running = true; return true; } void video::terminate() { if(g_pBitmap) { delete g_pBitmap; g_pBitmap = 0; } Gdiplus::GdiplusShutdown(gdiplusToken); g_video = 0; running = false; if(g_pImg) { delete[] g_pImg; g_pImg = 0; } } //////////// drawing area constructor & destructor ///////////// drawing_area::drawing_area(int x, int y, int sizex, int sizey) : start_x(x), start_y(y), size_x(sizex), size_y(sizey), pixel_depth(24), base_index(y*g_sizex + x), max_index(g_sizex*g_sizey), index_stride(g_sizex), ptr32(g_pImg) { assert(x < g_sizex); assert(y < g_sizey); assert(x+sizex <= g_sizex); assert(y+sizey <= g_sizey); index = base_index; // current index } void drawing_area::update() { if(g_video->updating) { RECT r; r.left = start_x; r.right = start_x + size_x; r.top = start_y; r.bottom = start_y + size_y; InvalidateRect(g_hAppWnd, &r, false); } }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/src/common/gui/xvideo.cpp
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= // Uncomment next line to disable shared memory features if you do not have libXext // (http://www.xfree86.org/current/mit-shm.html) //#define X_NOSHMEM // Note that it may happen that the build environment supports the shared-memory extension // (so there's no build-time reason to disable the relevant code by defining X_NOSHMEM), // but that using shared memory still fails at run time. // This situation will (ultimately) cause the error handler set by XSetErrorHandler() // to be invoked with XErrorEvent::minor_code==X_ShmAttach. The code below tries to make // such a determination at XShmAttach() time, which seems plausible, but unfortunately // it has also been observed in a specific environment that the error may be reported // at a later time instead, even after video::init_window() has returned. // It is not clear whether this may happen in that way in any environment where it might // depend on the kind of display, e.g., local vs. over "ssh -X", so #define'ing X_NOSHMEM // may not always be the appropriate solution, therefore an environment variable // has been introduced to disable shared memory at run time. // A diagnostic has been added to advise the user about possible workarounds. // X_ShmAttach macro was changed to 1 due to recent changes to X11/extensions/XShm.h header. #include "video.h" #include <string.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <X11/Xlib.h> #include <X11/Xutil.h> #include <X11/keysym.h> #include <sys/time.h> #include <signal.h> #include <pthread.h> #ifndef X_NOSHMEM #include <errno.h> #include <X11/extensions/XShm.h> #include <sys/ipc.h> #include <sys/shm.h> static XShmSegmentInfo shmseginfo; static Pixmap pixmap = 0; static bool already_called_X_ShmAttach = false; static bool already_advised_about_NOSHMEM_workarounds = false; static const char* NOSHMEM_env_var_name = "TBB_EXAMPLES_X_NOSHMEM"; #endif static char *display_name = NULL; static Display *dpy = NULL; static Screen *scrn; static Visual *vis; static Colormap cmap; static GC gc; static Window win, rootW; static int dispdepth = 0; static XGCValues xgcv; static XImage *ximage; static int x_error = 0; static int vidtype = 3; int g_sizex, g_sizey; static video *g_video = 0; unsigned int *g_pImg = 0; static int g_fps = 0; struct timeval g_time; static pthread_mutex_t g_mutex = PTHREAD_MUTEX_INITIALIZER; Atom _XA_WM_DELETE_WINDOW = 0;// like in Xatom.h ///////////////////////////////////////////// public methods of video class /////////////////////// video::video() { assert(g_video == 0); g_video = this; title = "Video"; calc_fps = running = false; updating = true; } inline void mask2bits(unsigned int mask, unsigned int &save, depth_t &shift) { save = mask; if(!mask) { shift = dispdepth/3; return; } shift = 0; while(!(mask&1)) ++shift, mask >>= 1; int bits = 0; while(mask&1) ++bits, mask >>= 1; shift += bits - 8; } int xerr_handler(Display* dpy_, XErrorEvent *error) { x_error = error->error_code; if(g_video) g_video->running = false; #ifndef X_NOSHMEM if (error->minor_code==1/*X_ShmAttach*/ && already_called_X_ShmAttach && !already_advised_about_NOSHMEM_workarounds) { char err[256]; XGetErrorText(dpy_, x_error, err, 255); fprintf(stderr, "Warning: Can't attach shared memory to display: %s (%d)\n", err, x_error); fprintf(stderr, "If you are seeing a black output window, try setting %s environment variable to 1" " to disable shared memory extensions (0 to re-enable, other values undefined)," " or rebuilding with X_NOSHMEM defined in " __FILE__ "\n", NOSHMEM_env_var_name); already_advised_about_NOSHMEM_workarounds = true; } #else (void) dpy_; // warning prevention #endif return 0; } bool video::init_window(int xsize, int ysize) { { //enclose local variables before fail label g_sizex = xsize; g_sizey = ysize; // Open the display if (!dpy) { dpy = XOpenDisplay(display_name); if (!dpy) { fprintf(stderr, "Can't open X11 display %s\n", XDisplayName(display_name)); goto fail; } } int theScreen = DefaultScreen(dpy); scrn = ScreenOfDisplay(dpy, theScreen); dispdepth = DefaultDepth(dpy, theScreen); XVisualInfo vinfo; if (!( (dispdepth >= 15 && dispdepth <= 32 && XMatchVisualInfo(dpy, theScreen, dispdepth, TrueColor, &vinfo) ) || XMatchVisualInfo(dpy, theScreen, 24, TrueColor, &vinfo) || XMatchVisualInfo(dpy, theScreen, 32, TrueColor, &vinfo) || XMatchVisualInfo(dpy, theScreen, 16, TrueColor, &vinfo) || XMatchVisualInfo(dpy, theScreen, 15, TrueColor, &vinfo) )) { fprintf(stderr, "Display has no appropriate True Color visual\n"); goto fail; } vis = vinfo.visual; depth = dispdepth = vinfo.depth; mask2bits(vinfo.red_mask, red_mask, red_shift); mask2bits(vinfo.green_mask, green_mask, green_shift); mask2bits(vinfo.blue_mask, blue_mask, blue_shift); rootW = RootWindow(dpy, theScreen); cmap = XCreateColormap(dpy, rootW, vis, AllocNone); XSetWindowAttributes attrs; attrs.backing_store = Always; attrs.colormap = cmap; attrs.event_mask = StructureNotifyMask|KeyPressMask|ButtonPressMask|ButtonReleaseMask; attrs.background_pixel = BlackPixelOfScreen(scrn); attrs.border_pixel = WhitePixelOfScreen(scrn); win = XCreateWindow(dpy, rootW, 0, 0, xsize, ysize, 2, dispdepth, InputOutput, vis, CWBackingStore | CWColormap | CWEventMask | CWBackPixel | CWBorderPixel, &attrs); if(!win) { fprintf(stderr, "Can't create the window\n"); goto fail; } XSizeHints sh; sh.flags = PSize | PMinSize | PMaxSize; sh.width = sh.min_width = sh.max_width = xsize; sh.height = sh.min_height = sh.max_height = ysize; XSetStandardProperties( dpy, win, g_video->title, g_video->title, None, NULL, 0, &sh ); _XA_WM_DELETE_WINDOW = XInternAtom(dpy, "WM_DELETE_WINDOW", false); XSetWMProtocols(dpy, win, &_XA_WM_DELETE_WINDOW, 1); gc = XCreateGC(dpy, win, 0L, &xgcv); XMapRaised(dpy, win); XFlush(dpy); #ifdef X_FULLSYNC XSynchronize(dpy, true); #endif XSetErrorHandler(xerr_handler); int imgbytes = xsize*ysize*(dispdepth<=16?2:4); const char *vidstr; #ifndef X_NOSHMEM int major, minor, pixmaps; if(XShmQueryExtension(dpy) && XShmQueryVersion(dpy, &major, &minor, &pixmaps)) { // Shared memory if(NULL!=getenv(NOSHMEM_env_var_name) && 0!=strcmp("0",getenv(NOSHMEM_env_var_name))) { goto generic; } shmseginfo.shmid = shmget(IPC_PRIVATE, imgbytes, IPC_CREAT|0777); if(shmseginfo.shmid < 0) { fprintf(stderr, "Warning: Can't get shared memory: %s\n", strerror(errno)); goto generic; } g_pImg = (unsigned int*)(shmseginfo.shmaddr = (char*)shmat(shmseginfo.shmid, 0, 0)); if(g_pImg == (unsigned int*)-1) { fprintf(stderr, "Warning: Can't attach to shared memory: %s\n", strerror(errno)); shmctl(shmseginfo.shmid, IPC_RMID, NULL); goto generic; } shmseginfo.readOnly = false; if(!XShmAttach(dpy, &shmseginfo) || x_error) { char err[256]; XGetErrorText(dpy, x_error, err, 255); fprintf(stderr, "Warning: Can't attach shared memory to display: %s (%d)\n", err, x_error); shmdt(shmseginfo.shmaddr); shmctl(shmseginfo.shmid, IPC_RMID, NULL); goto generic; } already_called_X_ShmAttach = true; #ifndef X_NOSHMPIX if(pixmaps && XShmPixmapFormat(dpy) == ZPixmap) { // Pixmaps vidtype = 2; vidstr = "X11 shared memory pixmap"; pixmap = XShmCreatePixmap(dpy, win, (char*)g_pImg, &shmseginfo, xsize, ysize, dispdepth); XSetWindowBackgroundPixmap(dpy, win, pixmap); } else #endif//!X_NOSHMPIX { // Standard vidtype = 1; vidstr = "X11 shared memory"; ximage = XShmCreateImage(dpy, vis, dispdepth, ZPixmap, 0, &shmseginfo, xsize, ysize); if(!ximage) { fprintf(stderr, "Can't create the shared image\n"); goto fail; } assert(ximage->bytes_per_line == xsize*(dispdepth<=16?2:4)); ximage->data = shmseginfo.shmaddr; } } else #endif { #ifndef X_NOSHMEM generic: #endif vidtype = 0; vidstr = "generic X11"; g_pImg = new unsigned int[imgbytes/sizeof(int)]; ximage = XCreateImage(dpy, vis, dispdepth, ZPixmap, 0, (char*)g_pImg, xsize, ysize, 32, imgbytes/ysize); if(!ximage) { fprintf(stderr, "Can't create the image\n"); goto fail; } } // Note: It may be more efficient to adopt the server's byte order // and swap once per get_color() call instead of once per pixel. const uint32_t probe = 0x03020100; const bool big_endian = (((const char*)(&probe))[0]==0x03); ximage->byte_order = big_endian ? MSBFirst : LSBFirst; printf("Note: using %s with %s visual for %d-bit color depth\n", vidstr, vis==DefaultVisual(dpy, theScreen)?"default":"non-default", dispdepth); running = true; return true; } // end of enclosing local variables fail: terminate(); init_console(); return false; } bool video::init_console() { if(!g_pImg && g_sizex && g_sizey) { dispdepth = 24; red_shift = 16; vidtype = 3; // fake video g_pImg = new unsigned int[g_sizex*g_sizey]; running = true; } return true; } void video::terminate() { running = false; if(dpy) { vidtype = 3; // stop video if(threaded) { pthread_mutex_lock(&g_mutex); pthread_mutex_unlock(&g_mutex); } if(ximage) { XDestroyImage(ximage); ximage = 0; g_pImg = 0; } // it frees g_pImg for vidtype == 0 #ifndef X_NOSHMEM if(pixmap) XFreePixmap(dpy, pixmap); if(shmseginfo.shmaddr) { XShmDetach(dpy, &shmseginfo); shmdt(shmseginfo.shmaddr); g_pImg = 0; } if(shmseginfo.shmid >= 0) shmctl(shmseginfo.shmid, IPC_RMID, NULL); #endif if(gc) XFreeGC(dpy, gc); if(win) XDestroyWindow(dpy, win); XCloseDisplay(dpy); dpy = 0; } if(g_pImg) { delete[] g_pImg; g_pImg = 0; } // if was allocated for console mode } video::~video() { if(g_video) terminate(); g_video = 0; } //! Do standard event loop void video::main_loop() { struct timezone tz; gettimeofday(&g_time, &tz); on_process(); } //! Check for pending events once bool video::next_frame() { if(!running) return false; //! try acquire mutex if threaded code, returns on failure if(vidtype == 3 || threaded && pthread_mutex_trylock(&g_mutex)) return running; //! Refresh screen picture g_fps++; #ifndef X_NOSHMPIX if(vidtype == 2 && updating) XClearWindow(dpy, win); #endif while( XPending(dpy) ) { XEvent report; XNextEvent(dpy, &report); switch( report.type ) { case ClientMessage: if(report.xclient.format != 32 || report.xclient.data.l[0] != _XA_WM_DELETE_WINDOW) break; case DestroyNotify: running = false; case KeyPress: on_key( XLookupKeysym(&report.xkey, 0) ); break; case ButtonPress: on_mouse( report.xbutton.x, report.xbutton.y, report.xbutton.button ); break; case ButtonRelease: on_mouse( report.xbutton.x, report.xbutton.y, -report.xbutton.button ); break; } } struct timezone tz; struct timeval now_time; gettimeofday(&now_time, &tz); double sec = (now_time.tv_sec+1.0*now_time.tv_usec/1000000.0) - (g_time.tv_sec+1.0*g_time.tv_usec/1000000.0); if(sec > 1) { memcpy(&g_time, &now_time, sizeof(g_time)); if(calc_fps) { double fps = g_fps; g_fps = 0; char buffer[256]; snprintf(buffer, 256, "%s%s: %d fps", title, updating?"":" (no updating)", int(fps/sec)); XStoreName(dpy, win, buffer); } #ifndef X_FULLSYNC XSync(dpy, false); // It is often better then using XSynchronize(dpy, true) #endif//X_FULLSYNC } if(threaded) pthread_mutex_unlock(&g_mutex); return true; } //! Change window title void video::show_title() { if(vidtype < 3) XStoreName(dpy, win, title); } drawing_area::drawing_area(int x, int y, int sizex, int sizey) : start_x(x), start_y(y), size_x(sizex), size_y(sizey), pixel_depth(dispdepth), base_index(y*g_sizex + x), max_index(g_sizex*g_sizey), index_stride(g_sizex), ptr32(g_pImg) { assert(x < g_sizex); assert(y < g_sizey); assert(x+sizex <= g_sizex); assert(y+sizey <= g_sizey); index = base_index; // current index } void drawing_area::update() { if(!g_video->updating) return; #ifndef X_NOSHMEM switch(vidtype) { case 0: #endif pthread_mutex_lock(&g_mutex); if(vidtype == 0) XPutImage(dpy, win, gc, ximage, start_x, start_y, start_x, start_y, size_x, size_y); pthread_mutex_unlock(&g_mutex); #ifndef X_NOSHMEM break; case 1: pthread_mutex_lock(&g_mutex); if(vidtype == 1) XShmPutImage(dpy, win, gc, ximage, start_x, start_y, start_x, start_y, size_x, size_y, false); pthread_mutex_unlock(&g_mutex); break; /*case 2: make it in next_frame(); break;*/ } #endif }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/src/common/utility/fast_random.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= #ifndef FAST_RANDOM_H_ #define FAST_RANDOM_H_ namespace utility{ //------------------------------------------------------------------------ // FastRandom //------------------------------------------------------------------------ namespace internal{ size_t GetPrime ( size_t seed ); } //! A fast random number generator. /** Uses linear congruential method. */ class FastRandom { size_t x, a; public: //! Get a random number. unsigned short get() { return get(x); } //! Get a random number for the given seed; update the seed for next use. unsigned short get( size_t& seed ) { unsigned short r = (unsigned short)(seed>>16); seed = seed*a+1; return r; } //! Construct a random number generator. FastRandom( size_t seed ) { x = seed*internal::GetPrime(seed); a = internal::GetPrime(x); } }; } namespace utility { namespace internal{ //! Table of primes used by fast random-number generator (FastRandom). static const unsigned Primes[] = { 0x9e3779b1, 0xffe6cc59, 0x2109f6dd, 0x43977ab5, 0xba5703f5, 0xb495a877, 0xe1626741, 0x79695e6b, 0xbc98c09f, 0xd5bee2b3, 0x287488f9, 0x3af18231, 0x9677cd4d, 0xbe3a6929, 0xadc6a877, 0xdcf0674b, 0xbe4d6fe9, 0x5f15e201, 0x99afc3fd, 0xf3f16801, 0xe222cfff, 0x24ba5fdb, 0x0620452d, 0x79f149e3, 0xc8b93f49, 0x972702cd, 0xb07dd827, 0x6c97d5ed, 0x085a3d61, 0x46eb5ea7, 0x3d9910ed, 0x2e687b5b, 0x29609227, 0x6eb081f1, 0x0954c4e1, 0x9d114db9, 0x542acfa9, 0xb3e6bd7b, 0x0742d917, 0xe9f3ffa7, 0x54581edb, 0xf2480f45, 0x0bb9288f, 0xef1affc7, 0x85fa0ca7, 0x3ccc14db, 0xe6baf34b, 0x343377f7, 0x5ca19031, 0xe6d9293b, 0xf0a9f391, 0x5d2e980b, 0xfc411073, 0xc3749363, 0xb892d829, 0x3549366b, 0x629750ad, 0xb98294e5, 0x892d9483, 0xc235baf3, 0x3d2402a3, 0x6bdef3c9, 0xbec333cd, 0x40c9520f }; size_t GetPrime ( size_t seed ) { return Primes[seed%(sizeof(Primes)/sizeof(Primes[0]))]; } } } #endif /* FAST_RANDOM_H_ */
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/src/common/utility/utility.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= #ifndef UTILITY_H_ #define UTILITY_H_ #if __TBB_MIC_OFFLOAD #pragma offload_attribute (push,target(mic)) #include <exception> #include <cstdio> #pragma offload_attribute (pop) #endif // __TBB_MIC_OFFLOAD #include <string> #include <cstring> #include <vector> #include <map> #include <set> #include <algorithm> #include <sstream> #include <numeric> #include <stdexcept> #include <memory> #include <cassert> #include <iostream> #include <cstdlib> // TBB headers should not be used, as some examples may need to be built without TBB. namespace utility{ namespace internal{ #if ((__GNUC__*100+__GNUC_MINOR__>=404 && __GXX_EXPERIMENTAL_CXX0X__) || _MSC_VER >= 1600) && (!__INTEL_COMPILER || __INTEL_COMPILER >= 1200) || __INTEL_LLVM_COMPILER // std::unique_ptr is available, and compiler can use it #define smart_ptr std::unique_ptr using std::swap; #else #if __INTEL_COMPILER && __GXX_EXPERIMENTAL_CXX0X__ // std::unique_ptr is unavailable, so suppress std::auto_prt<> deprecation warning #pragma warning(disable: 1478) #endif #define smart_ptr std::auto_ptr // in some C++ libraries, std::swap does not work with std::auto_ptr template<typename T> void swap( smart_ptr<T>& ptr1, smart_ptr<T>& ptr2 ) { smart_ptr<T> tmp; tmp = ptr2; ptr2 = ptr1; ptr1 = tmp; } #endif //TODO: add tcs template<class dest_type> dest_type& string_to(std::string const& s, dest_type& result){ std::stringstream stream(s); stream>>result; if ((!stream)||(stream.fail())){ throw std::invalid_argument("error converting string '"+std::string(s)+"'"); } return result; } template<class dest_type> dest_type string_to(std::string const& s){ dest_type result; return string_to(s,result); } template<typename> struct is_bool { static bool value(){return false;}}; template<> struct is_bool<bool> { static bool value(){return true;}}; class type_base { type_base& operator=(const type_base&); public: const std::string name; const std::string description; type_base (std::string a_name, std::string a_description) : name(a_name), description(a_description) {} virtual void parse_and_store (const std::string & s)=0; virtual std::string value() const =0; virtual smart_ptr<type_base> clone()const =0; virtual ~type_base(){} }; template <typename type> class type_impl : public type_base { private: type_impl& operator=(const type_impl&); typedef bool(*validating_function_type)(const type&); private: type & target; validating_function_type validating_function; public: type_impl(std::string a_name, std::string a_description, type & a_target, validating_function_type a_validating_function = NULL) : type_base (a_name,a_description), target(a_target),validating_function(a_validating_function) {}; void parse_and_store (const std::string & s){ try{ const bool is_bool = internal::is_bool<type>::value(); if (is_bool && s.empty()){ //to avoid directly assigning true //(as it will impose additional layer of indirection) //so, simply pass it as string internal::string_to("1",target); }else { internal::string_to(s,target); } }catch(std::invalid_argument& e){ std::stringstream str; str <<"'"<<s<<"' is incorrect input for argument '"<<name<<"'" <<" ("<<e.what()<<")"; throw std::invalid_argument(str.str()); } if (validating_function){ if (!((validating_function)(target))){ std::stringstream str; str <<"'"<<target<<"' is invalid value for argument '"<<name<<"'"; throw std::invalid_argument(str.str()); } } } virtual std::string value()const{ std::stringstream str; str<<target; return str.str(); } virtual smart_ptr<type_base> clone() const { return smart_ptr<type_base>(new type_impl(*this)); } }; class argument{ private: smart_ptr<type_base> p_type; bool matched_; public: argument(argument const& other) : p_type(other.p_type.get() ? (other.p_type->clone()).release() : NULL) ,matched_(other.matched_) {} argument& operator=(argument a){ this->swap(a); return *this; } void swap(argument& other){ internal::swap(p_type, other.p_type); std::swap(matched_,other.matched_); } template<class type> argument(std::string a_name, std::string a_description, type& dest, bool(*a_validating_function)(const type&)= NULL) :p_type(new type_impl<type>(a_name,a_description,dest,a_validating_function)) ,matched_(false) {} std::string value()const{ return p_type->value(); } std::string name()const{ return p_type->name; } std::string description() const{ return p_type->description; } void parse_and_store(const std::string & s){ p_type->parse_and_store(s); matched_=true; } bool is_matched() const{return matched_;} }; } // namespace internal class cli_argument_pack{ typedef std::map<std::string,internal::argument> args_map_type; typedef std::vector<std::string> args_display_order_type; typedef std::vector<std::string> positional_arg_names_type; private: args_map_type args_map; args_display_order_type args_display_order; positional_arg_names_type positional_arg_names; std::set<std::string> bool_args_names; private: void add_arg(internal::argument const& a){ std::pair<args_map_type::iterator, bool> result = args_map.insert(std::make_pair(a.name(),a)); if (!result.second){ throw std::invalid_argument("argument with name: '"+a.name()+"' already registered"); } args_display_order.push_back(a.name()); } public: template<typename type> cli_argument_pack& arg(type& dest,std::string const& name, std::string const& description, bool(*validate)(const type &)= NULL){ internal::argument a(name,description,dest,validate); add_arg(a); if (internal::is_bool<type>::value()){ bool_args_names.insert(name); } return *this; } //Positional means that argument name can be omitted in actual CL //only key to match values for parameters with template<typename type> cli_argument_pack& positional_arg(type& dest,std::string const& name, std::string const& description, bool(*validate)(const type &)= NULL){ internal::argument a(name,description,dest,validate); add_arg(a); if (internal::is_bool<type>::value()){ bool_args_names.insert(name); } positional_arg_names.push_back(name); return *this; } void parse(std::size_t argc, char const* argv[]){ { std::size_t current_positional_index=0; for (std::size_t j=1;j<argc;j++){ internal::argument* pa = NULL; std::string argument_value; const char * const begin=argv[j]; const char * const end=begin+std::strlen(argv[j]); const char * const assign_sign = std::find(begin,end,'='); struct throw_unknown_parameter{ static void _(std::string const& location){ throw std::invalid_argument(std::string("unknown parameter starting at:'")+location+"'"); }}; //first try to interpret it like parameter=value string if (assign_sign!=end){ std::string name_found = std::string(begin,assign_sign); args_map_type::iterator it = args_map.find(name_found ); if(it!=args_map.end()){ pa= &((*it).second); argument_value = std::string(assign_sign+1,end); }else { throw_unknown_parameter::_(argv[j]); } } //then see is it a named flag else{ args_map_type::iterator it = args_map.find(argv[j] ); if(it!=args_map.end()){ pa= &((*it).second); argument_value = ""; } //then try it as positional argument without name specified else if (current_positional_index < positional_arg_names.size()){ std::stringstream str(argv[j]); args_map_type::iterator found_positional_arg = args_map.find(positional_arg_names.at(current_positional_index)); //TODO: probably use of smarter assert would help here assert(found_positional_arg!=args_map.end()/*&&"positional_arg_names and args_map are out of sync"*/); if (found_positional_arg==args_map.end()){ throw std::logic_error("positional_arg_names and args_map are out of sync"); } pa= &((*found_positional_arg).second); argument_value = argv[j]; current_positional_index++; }else { //TODO: add tc to check throw_unknown_parameter::_(argv[j]); } } assert(pa); if (pa->is_matched()){ throw std::invalid_argument(std::string("several values specified for: '")+pa->name()+"' argument"); } pa->parse_and_store(argument_value); } } } std::string usage_string(const std::string& binary_name)const{ std::string command_line_params; std::string summary_description; for (args_display_order_type::const_iterator it = args_display_order.begin();it!=args_display_order.end();++it){ const bool is_bool = (0!=bool_args_names.count((*it))); args_map_type::const_iterator argument_it = args_map.find(*it); //TODO: probably use of smarter assert would help here assert(argument_it!=args_map.end()/*&&"args_display_order and args_map are out of sync"*/); if (argument_it==args_map.end()){ throw std::logic_error("args_display_order and args_map are out of sync"); } const internal::argument & a = (*argument_it).second; command_line_params +=" [" + a.name() + (is_bool ?"":"=value")+ "]"; summary_description +=" " + a.name() + " - " + a.description() +" ("+a.value() +")" + "\n"; } std::string positional_arg_cl; for (positional_arg_names_type::const_iterator it = positional_arg_names.begin();it!=positional_arg_names.end();++it){ positional_arg_cl +=" ["+(*it); } for (std::size_t i=0;i<positional_arg_names.size();++i){ positional_arg_cl+="]"; } command_line_params+=positional_arg_cl; std::stringstream str; using std::endl; str << " Program usage is:" << endl << " " << binary_name << command_line_params << endl << endl << " where:" << endl << summary_description ; return str.str(); } }; // class cli_argument_pack namespace internal { template<typename T> bool is_power_of_2( T val ) { size_t intval = size_t(val); return (intval&(intval-1)) == size_t(0); } int step_function_plus(int previous, double step){ return static_cast<int>(previous+step); } int step_function_multiply(int previous, double multiply){ return static_cast<int>(previous*multiply); } // "Power-of-2 ladder": nsteps is the desired number of steps between any subsequent powers of 2. // The actual step is the quotient of the nearest smaller power of 2 divided by that number (but at least 1). // E.g., '1:32:#4' means 1,2,3,4,5,6,7,8,10,12,14,16,20,24,28,32 int step_function_power2_ladder(int previous, double nsteps){ int steps = int(nsteps); assert( is_power_of_2(steps) ); // must be a power of 2 // The actual step is 1 until the value is twice as big as nsteps if( previous < 2*steps ) return previous+1; // calculate the previous power of 2 int prev_power2 = previous/2; // start with half the given value int rshift = 1; // and with the shift of 1; while( int shifted = prev_power2>>rshift ) { // shift the value right; while the result is non-zero, prev_power2 |= shifted; // add the bits set in 'shifted'; rshift <<= 1; // double the shift, as twice as many top bits are set; } // repeat. ++prev_power2; // all low bits set; now it's just one less than the desired power of 2 assert( is_power_of_2(prev_power2) ); assert( (prev_power2<=previous)&&(2*prev_power2>previous) ); // The actual step value is the previous power of 2 divided by steps return previous + (prev_power2/steps); } typedef int (* step_function_ptr_type)(int,double); struct step_function_descriptor { char mnemonic; step_function_ptr_type function; public: step_function_descriptor(char a_mnemonic, step_function_ptr_type a_function) : mnemonic(a_mnemonic), function(a_function) {} private: void operator=(step_function_descriptor const&); }; step_function_descriptor step_function_descriptors[] = { step_function_descriptor('*',step_function_multiply), step_function_descriptor('+',step_function_plus), step_function_descriptor('#',step_function_power2_ladder) }; template<typename T, size_t N> inline size_t array_length(const T(&)[N]) { return N; } struct thread_range_step { step_function_ptr_type step_function; double step_function_argument; thread_range_step ( step_function_ptr_type step_function_, double step_function_argument_) :step_function(step_function_),step_function_argument(step_function_argument_) { if (!step_function_) throw std::invalid_argument("step_function for thread range step should not be NULL"); } int operator()(int previous)const { assert(0<=previous); // test 0<=first and loop discipline const int ret = step_function(previous,step_function_argument); assert(previous<ret); return ret; } friend std::istream& operator>>(std::istream& input_stream, thread_range_step& step){ char function_char; double function_argument; input_stream >> function_char >> function_argument; size_t i = 0; while ((i<array_length(step_function_descriptors)) && (step_function_descriptors[i].mnemonic!=function_char)) ++i; if (i >= array_length(step_function_descriptors)){ throw std::invalid_argument("unknown step function mnemonic: "+std::string(1,function_char)); } else if ((function_char=='#') && !is_power_of_2(function_argument)) { throw std::invalid_argument("the argument of # should be a power of 2"); } step.step_function = step_function_descriptors[i].function; step.step_function_argument = function_argument; return input_stream; } }; } // namespace internal struct thread_number_range{ int (*auto_number_of_threads)(); int first; // 0<=first (0 can be used as a special value) int last; // first<=last internal::thread_range_step step; thread_number_range( int (*auto_number_of_threads_)(),int low_=1, int high_=-1 , internal::thread_range_step step_ = internal::thread_range_step(internal::step_function_power2_ladder,4) ) : auto_number_of_threads(auto_number_of_threads_), first(low_), last((high_>-1) ? high_ : auto_number_of_threads_()) ,step(step_) { if (first<0) { throw std::invalid_argument("negative value not allowed"); } if (first>last) { throw std::invalid_argument("decreasing sequence not allowed"); } } friend std::istream& operator>>(std::istream& i, thread_number_range& range){ try{ std::string s; i>>s; struct string_to_number_of_threads{ int auto_value; string_to_number_of_threads(int auto_value_):auto_value(auto_value_){} int operator()(const std::string & value)const{ return (value=="auto")? auto_value : internal::string_to<int>(value); } }; string_to_number_of_threads string_to_number_of_threads(range.auto_number_of_threads()); int low, high; std::size_t colon = s.find(':'); if ( colon == std::string::npos ){ low = high = string_to_number_of_threads(s); } else { //it is a range std::size_t second_colon = s.find(':',colon+1); low = string_to_number_of_threads(std::string(s, 0, colon)); //not copying the colon high = string_to_number_of_threads(std::string(s, colon+1, second_colon - (colon+1))); //not copying the colons if (second_colon != std::string::npos){ internal::string_to(std::string(s,second_colon + 1),range.step); } } range = thread_number_range(range.auto_number_of_threads,low,high,range.step); }catch(std::invalid_argument&){ i.setstate(std::ios::failbit); throw; } return i; } friend std::ostream& operator<<(std::ostream& o, thread_number_range const& range){ using namespace internal; size_t i = 0; for (; i < array_length(step_function_descriptors) && step_function_descriptors[i].function != range.step.step_function; ++i ) {} if (i >= array_length(step_function_descriptors)){ throw std::invalid_argument("unknown step function for thread range"); } o<<range.first<<":"<<range.last<<":"<<step_function_descriptors[i].mnemonic<<range.step.step_function_argument; return o; } }; // struct thread_number_range //TODO: fix unused warning here //TODO: update the thread range description in the .html files static const char* thread_number_range_desc="number of threads to use; a range of the form low[:high[:(+|*|#)step]]," "\n\twhere low and optional high are non-negative integers or 'auto' for the default choice," "\n\tand optional step expression specifies how thread numbers are chosen within the range." "\n\tSee examples/common/index.html for detailed description." ; inline void report_elapsed_time(double seconds){ std::cout << "elapsed time : "<<seconds<<" seconds \n"; } inline void parse_cli_arguments(int argc, const char* argv[], utility::cli_argument_pack cli_pack){ bool show_help = false; cli_pack.arg(show_help,"-h","show this message"); bool invalid_input=false; try { cli_pack.parse(argc,argv); }catch(std::exception& e){ std::cerr <<"error occurred while parsing command line."<<std::endl <<"error text: "<<e.what()<<std::endl <<std::flush; invalid_input =true; } if (show_help || invalid_input){ std::cout<<cli_pack.usage_string(argv[0])<<std::flush; std::exit(0); } } inline void parse_cli_arguments(int argc, char* argv[], utility::cli_argument_pack cli_pack){ parse_cli_arguments(argc, const_cast<const char**>(argv), cli_pack); } } #endif /* UTILITY_H_ */
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/src/build_with_openmp/build_with_openmp.cpp
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /****************************************************************************** The tachyon sample program is for use with the Guided Vtune Tutorial. Please refer to the github's readme for build instructions. *******************************************************************************/ #include "machine.h" #include "types.h" #include "macros.h" #include "vector.h" #include "tgafile.h" #include "trace.h" #include "light.h" #include "shade.h" #include "camera.h" #include "util.h" #include "intersect.h" #include "global.h" #include "ui.h" #include "tachyon_video.h" #include <omp.h> // shared but read-only so could be private too static thr_parms *all_parms; static scenedef scene; static int startx; static int stopx; static int starty; static int stopy; static flt jitterscale; static int totaly; // This function is shared among all implementations: static color_t render_one_pixel (int x, int y, unsigned int *local_mbox, unsigned int &serial, int startx, int stopx, int starty, int stopy) { /* private vars moved inside loop */ ray primary, sample; color col, avcol; int R,G,B; intersectstruct local_intersections; int alias; /* end private */ primary = camray(&scene, x, y); primary.intstruct = &local_intersections; primary.flags = RT_RAY_REGULAR; serial++; primary.serial = serial; primary.mbox = local_mbox; primary.maxdist = FHUGE; primary.scene = &scene; col = trace(&primary); serial = primary.serial; /* Handle overexposure and underexposure here... */ R = (int)(col.r * 255); if ( R > 255 ) R = 255; else if ( R < 0 ) R = 0; G = (int)(col.g * 255); if ( G > 255 ) G = 255; else if ( G < 0 ) G = 0; B = (int)(col.b * 255); if ( B > 255 ) B = 255; else if ( B < 0 ) B = 0; return video->get_color(R, G, B); } #if DO_ITT_NOTIFY #include"ittnotify.h" #endif // To start off with our OpenMP implementation, we are being overly cautious and adding a mutex // lock around our for loop. The idea behind this is there is a chance multiple threads are accessing // the same variables, so we can add a mutex lock around it to only allow one thread to access that section // at a time. Run this with Vtune to see what improvements can be made. static void parallel_thread(void) { unsigned int mboxsize = sizeof(unsigned int) * (max_objectid() + 20); #pragma omp parallel for for (int y = starty; y < stopy; y++) { unsigned int serial = 1; unsigned int local_mbox[mboxsize]; memset(local_mbox, 0, mboxsize); drawing_area drawing(startx, totaly - y, stopx - startx, 1); for (int x = startx; x < stopx; x++) { color_t c = render_one_pixel(x, y, local_mbox, serial, startx, stopx, starty, stopy); drawing.put_pixel(c); } video->next_frame(); } } // This function is shared among all implementations: void * thread_trace(thr_parms * parms) { // shared but read-only so could be private too all_parms = parms; scene = parms->scene; startx = parms->startx; stopx = parms->stopx; starty = parms->starty; stopy = parms->stopy; jitterscale = 40.0*(scene.hres + scene.vres); totaly = parms->scene.vres - 1; #if DO_ITT_NOTIFY __itt_resume(); #endif parallel_thread(); #if DO_ITT_NOTIFY __itt_pause(); #endif return(NULL); }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/src/build_with_openmp/build_with_openmp_optimized.cpp
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /****************************************************************************** The tachyon sample program is for use with the Guided Vtune Tutorial. Please refer to the github's readme for build instructions. *******************************************************************************/ #include "machine.h" #include "types.h" #include "macros.h" #include "vector.h" #include "tgafile.h" #include "trace.h" #include "light.h" #include "shade.h" #include "camera.h" #include "util.h" #include "intersect.h" #include "global.h" #include "ui.h" #include "tachyon_video.h" // shared but read-only so could be private too static thr_parms *all_parms; static scenedef scene; static int startx; static int stopx; static int starty; static int stopy; static flt jitterscale; static int totaly; // This function is shared among all implementations: static color_t render_one_pixel (int x, int y, unsigned int *local_mbox, unsigned int &serial, int startx, int stopx, int starty, int stopy) { /* private vars moved inside loop */ ray primary, sample; color col, avcol; int R,G,B; intersectstruct local_intersections; int alias; /* end private */ primary = camray(&scene, x, y); primary.intstruct = &local_intersections; primary.flags = RT_RAY_REGULAR; serial++; primary.serial = serial; primary.mbox = local_mbox; primary.maxdist = FHUGE; primary.scene = &scene; col = trace(&primary); serial = primary.serial; /* Handle overexposure and underexposure here... */ R = (int)(col.r * 255); if ( R > 255 ) R = 255; else if ( R < 0 ) R = 0; G = (int)(col.g * 255); if ( G > 255 ) G = 255; else if ( G < 0 ) G = 0; B = (int)(col.b * 255); if ( B > 255 ) B = 255; else if ( B < 0 ) B = 0; return video->get_color(R, G, B); } #if DO_ITT_NOTIFY #include"ittnotify.h" #endif // After removing the mutex, we find that the protection of variables is not actually needed. // Since they are local to the individual threads, each thread cannot actually access the other // variables. Profile this with Vtune and notice the differences. static void parallel_thread(void) { unsigned int mboxsize = sizeof(unsigned int)*(max_objectid() + 20); #pragma omp parallel for schedule(dynamic) for ( int y = starty; y < stopy; y++ ) { unsigned int serial = 1; unsigned int local_mbox[mboxsize]; memset(local_mbox, 0, mboxsize); drawing_area drawing(startx, totaly - y, stopx - startx, 1); for ( int x = startx; x < stopx; x++ ) { color_t c = render_one_pixel(x, y, local_mbox, serial, startx, stopx, starty, stopy); drawing.put_pixel(c); } video->next_frame(); } } // This function is shared among all implementations: void * thread_trace(thr_parms * parms) { // shared but read-only so could be private too all_parms = parms; scene = parms->scene; startx = parms->startx; stopx = parms->stopx; starty = parms->starty; stopy = parms->stopy; jitterscale = 40.0*(scene.hres + scene.vres); totaly = parms->scene.vres - 1; #if DO_ITT_NOTIFY __itt_resume(); #endif parallel_thread(); #if DO_ITT_NOTIFY __itt_pause(); #endif return(NULL); }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/src/build_with_serial/trace.serial.cpp
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "machine.h" #include "types.h" #include "macros.h" #include "vector.h" #include "tgafile.h" #include "trace.h" #include "light.h" #include "shade.h" #include "camera.h" #include "util.h" #include "intersect.h" #include "global.h" #include "ui.h" #include "tachyon_video.h" // shared but read-only so could be private too static thr_parms *all_parms; static scenedef scene; static int startx; static int stopx; static int starty; static int stopy; static flt jitterscale; static int totaly; static color_t render_one_pixel (int x, int y, unsigned int *local_mbox, unsigned int &serial, int startx, int stopx, int starty, int stopy) { /* private vars moved inside loop */ ray primary, sample; color col, avcol; int R,G,B; intersectstruct local_intersections; int alias; /* end private */ primary=camray(&scene, x, y); primary.intstruct = &local_intersections; primary.flags = RT_RAY_REGULAR; serial++; primary.serial = serial; primary.mbox = local_mbox; primary.maxdist = FHUGE; primary.scene = &scene; col=trace(&primary); serial = primary.serial; /* perform antialiasing if enabled.. */ if (scene.antialiasing > 0) { for (alias=0; alias < scene.antialiasing; alias++) { serial++; /* increment serial number */ sample=primary; /* copy the regular primary ray to start with */ sample.serial = serial; { sample.d.x+=((std::rand() % 100) - 50) / jitterscale; sample.d.y+=((std::rand() % 100) - 50) / jitterscale; sample.d.z+=((std::rand() % 100) - 50) / jitterscale; } avcol=trace(&sample); serial = sample.serial; /* update our overall serial # */ col.r += avcol.r; col.g += avcol.g; col.b += avcol.b; } col.r /= (scene.antialiasing + 1.0); col.g /= (scene.antialiasing + 1.0); col.b /= (scene.antialiasing + 1.0); } /* Handle overexposure and underexposure here... */ R=(int) (col.r*255); if (R > 255) R = 255; else if (R < 0) R = 0; G=(int) (col.g*255); if (G > 255) G = 255; else if (G < 0) G = 0; B=(int) (col.b*255); if (B > 255) B = 255; else if (B < 0) B = 0; return video->get_color(R, G, B); } static void parallel_thread (void) { // thread-local storage unsigned int serial = 1; unsigned int mboxsize = sizeof(unsigned int)*(max_objectid() + 20); unsigned int * local_mbox = (unsigned int *) alloca(mboxsize); memset(local_mbox,0,mboxsize); for (int y = starty; y < stopy; y++) { { drawing_area drawing(startx, totaly-y, stopx-startx, 1); for (int x = startx; x < stopx; x++) { color_t c = render_one_pixel (x, y, local_mbox, serial, startx, stopx, starty, stopy); drawing.put_pixel(c); } } if(!video->next_frame()) return; } } void * thread_trace(thr_parms * parms) { // shared but read-only so could be private too all_parms = parms; scene = parms->scene; startx = parms->startx; stopx = parms->stopx; starty = parms->starty; stopy = parms->stopy; jitterscale = 40.0*(scene.hres + scene.vres); totaly = parms->scene.vres-1; parallel_thread (); return(NULL); }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/src/build_with_serial/build_with_serial.cpp
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /****************************************************************************** The tachyon sample program is for use with the Guided Vtune Tutorial. Please refer to the github's readme for instructions. *******************************************************************************/ #include "machine.h" #include "types.h" #include "macros.h" #include "vector.h" #include "tgafile.h" #include "trace.h" #include "light.h" #include "shade.h" #include "camera.h" #include "util.h" #include "intersect.h" #include "global.h" #include "ui.h" #include "tachyon_video.h" // shared but read-only so could be private too static thr_parms *all_parms; static scenedef scene; static int startx; static int stopx; static int starty; static int stopy; static flt jitterscale; static int totaly; // This function is shared among all implementations: static color_t render_one_pixel (int x, int y, unsigned int *local_mbox, unsigned int &serial, int startx, int stopx, int starty, int stopy) { /* private vars moved inside loop */ ray primary, sample; color col, avcol; int R,G,B; intersectstruct local_intersections; int alias; /* end private */ primary = camray(&scene, x, y); primary.intstruct = &local_intersections; primary.flags = RT_RAY_REGULAR; serial++; primary.serial = serial; primary.mbox = local_mbox; primary.maxdist = FHUGE; primary.scene = &scene; col = trace(&primary); serial = primary.serial; /* Handle overexposure and underexposure here... */ R = (int)(col.r * 255); if ( R > 255 ) R = 255; else if ( R < 0 ) R = 0; G = (int)(col.g * 255); if ( G > 255 ) G = 255; else if ( G < 0 ) G = 0; B = (int)(col.b * 255); if ( B > 255 ) B = 255; else if ( B < 0 ) B = 0; return video->get_color(R, G, B); } #if DO_ITT_NOTIFY #include"ittnotify.h" #endif // This is the solely serial version of this function. After running this and profiling with Vtune, you will see that // this code may run a lot quicker if the work was divided among threads. static void parallel_thread(void) { unsigned int serial = 1; unsigned int mboxsize = sizeof(unsigned int)*(max_objectid() + 20); unsigned int * local_mbox = (unsigned int *)alloca(mboxsize); memset(local_mbox, 0, mboxsize); for (int y = starty; y < stopy; y++) { { drawing_area drawing(startx, totaly - y, stopx - startx, 1); for (int x = startx; x < stopx; x++) { color_t c = render_one_pixel(x, y, local_mbox, serial, startx, stopx, starty, stopy); drawing.put_pixel(c); } } if (!video->next_frame()) return; } } // This function is shared among all implementations. void * thread_trace(thr_parms * parms) { // shared but read-only so could be private too all_parms = parms; scene = parms->scene; startx = parms->startx; stopx = parms->stopx; starty = parms->starty; stopy = parms->stopy; jitterscale = 40.0*(scene.hres + scene.vres); totaly = parms->scene.vres - 1; #if DO_ITT_NOTIFY __itt_resume(); #endif parallel_thread(); #if DO_ITT_NOTIFY __itt_pause(); #endif return(NULL); }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/src/build_with_tbb/build_with_tbb_optimized.cpp
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /****************************************************************************** The tachyon sample program is for use with the Guided Vtune Tutorial. Please refer to the github's readme for build instructions. *******************************************************************************/ #include "machine.h" #include "types.h" #include "macros.h" #include "vector.h" #include "tgafile.h" #include "trace.h" #include "light.h" #include "shade.h" #include "camera.h" #include "util.h" #include "intersect.h" #include "global.h" #include "ui.h" #include "tachyon_video.h" #include <tbb/tbb.h> // shared but read-only so could be private too static thr_parms *all_parms; static scenedef scene; static int startx; static int stopx; static int starty; static int stopy; static flt jitterscale; static int totaly; // This function is shared among all implementations: static color_t render_one_pixel (int x, int y, unsigned int *local_mbox, unsigned int &serial, int startx, int stopx, int starty, int stopy) { /* private vars moved inside loop */ ray primary, sample; color col, avcol; int R,G,B; intersectstruct local_intersections; int alias; /* end private */ primary = camray(&scene, x, y); primary.intstruct = &local_intersections; primary.flags = RT_RAY_REGULAR; serial++; primary.serial = serial; primary.mbox = local_mbox; primary.maxdist = FHUGE; primary.scene = &scene; col = trace(&primary); serial = primary.serial; /* Handle overexposure and underexposure here... */ R = (int)(col.r * 255); if ( R > 255 ) R = 255; else if ( R < 0 ) R = 0; G = (int)(col.g * 255); if ( G > 255 ) G = 255; else if ( G < 0 ) G = 0; B = (int)(col.b * 255); if ( B > 255 ) B = 255; else if ( B < 0 ) B = 0; return video->get_color(R, G, B); } #if DO_ITT_NOTIFY #include"ittnotify.h" #endif // After removing the mutex, we find that the protection of variables is not actually needed. // Since they are local to the individual threads, each thread cannot actually access the other // variables. Profile this with Vtune and notice the differences. static void parallel_thread(void) { unsigned int mboxsize = sizeof(unsigned int)*(max_objectid() + 20); tbb::parallel_for(starty, stopy, [mboxsize] (int y) { unsigned int serial = 1; unsigned int local_mbox[mboxsize]; memset(local_mbox, 0, mboxsize); drawing_area drawing(startx, totaly - y, stopx - startx, 1); for ( int x = startx; x < stopx; x++ ) { color_t c = render_one_pixel(x, y, local_mbox, serial, startx, stopx, starty, stopy); drawing.put_pixel(c); } video->next_frame(); } ); } // This function is shared among all implementations: void * thread_trace(thr_parms * parms) { // shared but read-only so could be private too all_parms = parms; scene = parms->scene; startx = parms->startx; stopx = parms->stopx; starty = parms->starty; stopy = parms->stopy; jitterscale = 40.0*(scene.hres + scene.vres); totaly = parms->scene.vres - 1; #if DO_ITT_NOTIFY __itt_resume(); #endif parallel_thread(); #if DO_ITT_NOTIFY __itt_pause(); #endif return(NULL); }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/src/build_with_tbb/build_with_tbb.cpp
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /****************************************************************************** The tachyon sample program is for use with the Guided Vtune Tutorial. Please refer to the github's readme for build instructions. *******************************************************************************/ #include "machine.h" #include "types.h" #include "macros.h" #include "vector.h" #include "tgafile.h" #include "trace.h" #include "light.h" #include "shade.h" #include "camera.h" #include "util.h" #include "intersect.h" #include "global.h" #include "ui.h" #include "tachyon_video.h" #include <tbb/tbb.h> // Initialize the mutex to be used later: tbb::spin_mutex mtx; // shared but read-only so could be private too static thr_parms *all_parms; static scenedef scene; static int startx; static int stopx; static int starty; static int stopy; static flt jitterscale; static int totaly; // This function is shared among all implementations: static color_t render_one_pixel (int x, int y, unsigned int *local_mbox, unsigned int &serial, int startx, int stopx, int starty, int stopy) { /* private vars moved inside loop */ ray primary, sample; color col, avcol; int R,G,B; intersectstruct local_intersections; int alias; /* end private */ primary = camray(&scene, x, y); primary.intstruct = &local_intersections; primary.flags = RT_RAY_REGULAR; serial++; primary.serial = serial; primary.mbox = local_mbox; primary.maxdist = FHUGE; primary.scene = &scene; col = trace(&primary); serial = primary.serial; /* Handle overexposure and underexposure here... */ R = (int)(col.r * 255); if ( R > 255 ) R = 255; else if ( R < 0 ) R = 0; G = (int)(col.g * 255); if ( G > 255 ) G = 255; else if ( G < 0 ) G = 0; B = (int)(col.b * 255); if ( B > 255 ) B = 255; else if ( B < 0 ) B = 0; return video->get_color(R, G, B); } #if DO_ITT_NOTIFY #include"ittnotify.h" #endif // To start off with our TBB implementation, we are being overly cautious and adding a mutex // lock around our for loop. The idea behind this is there is a chance multiple threads are accessing // the same variables, so we can add a mutex lock around it to only allow one thread to access that section // at a time. Run this with Vtune to see what improvements can be made. static void parallel_thread(void) { unsigned int mboxsize = sizeof(unsigned int) * (max_objectid() + 20); tbb::parallel_for(starty, stopy, [mboxsize] (int y) { tbb::spin_mutex::scoped_lock lock(mtx); unsigned int serial = 1; unsigned int local_mbox[mboxsize]; memset(local_mbox, 0, mboxsize); drawing_area drawing(startx, totaly - y, stopx - startx, 1); for ( int x = startx; x < stopx; x++ ) { color_t c = render_one_pixel(x, y, local_mbox, serial, startx, stopx, starty, stopy); drawing.put_pixel(c); } video->next_frame(); } ); } // This function is shared among all implementations: void * thread_trace(thr_parms * parms) { // shared but read-only so could be private too all_parms = parms; scene = parms->scene; startx = parms->startx; stopx = parms->stopx; starty = parms->starty; stopy = parms->stopy; jitterscale = 40.0*(scene.hres + scene.vres); totaly = parms->scene.vres - 1; #if DO_ITT_NOTIFY __itt_resume(); #endif parallel_thread(); #if DO_ITT_NOTIFY __itt_pause(); #endif return(NULL); }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/plane.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * plane.h - This file contains the defines for planes etc. * * $Id: plane.h,v 1.2 2007-02-22 17:54:16 Exp $ */ object * newplane(void * tex, vector ctr, vector norm); #ifdef PLANE_PRIVATE typedef struct { unsigned int id; /* Unique Object serial number */ void * nextobj; /* pointer to next object in list */ object_methods * methods; /* this object's methods */ texture * tex; /* object texture */ flt d; vector norm; } plane; static void plane_intersect(plane *, ray *); static int plane_bbox(void * obj, vector * min, vector * max); static void plane_normal(plane *, vector *, ray * incident, vector *); #endif
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/types.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #if __MINGW32__ #include <malloc.h> #elif _WIN32 #include <malloc.h> #define alloca _alloca #elif __FreeBSD__||__NetBSD__ #include <stdlib.h> #else #include <alloca.h> #endif /* * types.h - This file contains all of the type definitions for the raytracer * * $Id: types.h,v 1.2 2007-02-22 17:54:16 Exp $ */ #define MAXOCTNODES 25 /* subdivide octants /w > # of children */ #define SPEPSILON 0.000001 /* amount to crawl down a ray */ #define EPSILON 0.000001 /* amount to crawl down a ray */ #define TWOPI 6.2831853 /* guess */ #define FHUGE 1e18 /* biggest fp number we can represent */ /* Maximum internal table sizes */ /* Use prime numbers for best memory system performance */ #define INTTBSIZE 1024 /* maximum intersections we can hold */ #define MAXLIGHTS 39 /* maximum number of lights in a scene */ #define MAXIMGS 39 /* maxiumum number of distinct images */ #define RPCQSIZE 113 /* number of RPC messages to queue */ /* Parameter values for rt_boundmode() */ #define RT_BOUNDING_DISABLED 0 /* spatial subdivision/bounding disabled */ #define RT_BOUNDING_ENABLED 1 /* spatial subdivision/bounding enabled */ /* Parameter values for rt_displaymode() */ #define RT_DISPLAY_DISABLED 0 /* video output enabled */ #define RT_DISPLAY_ENABLED 1 /* video output disabled */ /* Ray flags */ #define RT_RAY_REGULAR 1 #define RT_RAY_SHADOW 2 #define RT_RAY_BOUNDED 4 #define RT_RAY_FINISHED 8 #ifdef USESINGLEFLT typedef float flt; /* generic floating point number, using float */ #else typedef double flt; /* generic floating point number, using double */ #endif typedef unsigned char byte; /* 1 byte */ typedef signed int word; /* 32 bit integer */ typedef struct { flt x; /* X coordinate value */ flt y; /* Y coordinate value */ flt z; /* Z coordinate value */ } vector; typedef struct { flt r; /* Red component */ flt g; /* Green component */ flt b; /* Blue component */ } color; typedef struct { unsigned char r; /* Red component */ unsigned char g; /* Green component */ unsigned char b; /* Blue component */ } bytecolor; typedef struct { /* Raw 24 bit image structure, for tga, ppm etc */ int loaded; /* image memory residence flag */ int xres; /* image X axis size */ int yres; /* image Y axis size */ int bpp; /* image bits per pixel */ char name[96]; /* image filename (with path) */ unsigned char * data; /* pointer to raw byte image data */ } rawimage; typedef struct { /* Scalar Volume Data */ int loaded; /* Volume data memory residence flag */ int xres; /* volume X axis size */ int yres; /* volume Y axis size */ int zres; /* volume Z axis size */ flt opacity; /* opacity per unit length */ char name[96]; /* Volume data filename */ unsigned char * data; /* pointer to raw byte volume data */ } scalarvol; typedef struct { color (* texfunc)(void *, void *, void *); int shadowcast; /* does the object cast a shadow */ int islight; /* light flag... */ color col; /* base object color */ flt ambient; /* ambient lighting */ flt diffuse; /* diffuse reflection */ flt phong; /* phong specular highlights */ flt phongexp; /* phong exponent/shininess factor */ int phongtype; /* phong type: 0 == plastic, nonzero == metal */ flt specular; /* specular reflection */ flt opacity; /* how opaque the object is */ vector ctr; /* origin of texture */ vector rot; /* rotation of texture about origin */ vector scale; /* scale of texture in x,y,z */ vector uaxs; /* planar map U axis */ vector vaxs; /* planar map V axis */ void * img; /* pointer to image for image mapping */ void * obj; /* object ptr, hack for volume shaders for now */ } texture; typedef struct { void (* intersect)(void *, void *); /* intersection func ptr */ void (* normal)(void *, void *, void *, void *); /* normal function ptr */ int (* bbox)(void *, vector *, vector *); /* return the object bbox */ void (* free)(void *); /* free the object */ } object_methods; typedef struct { unsigned int id; /* Unique Object serial number */ void * nextobj; /* pointer to next object in list */ object_methods * methods; /* this object's methods */ texture * tex; /* object texture */ } object; typedef struct { object * obj; /* to object we hit */ flt t; /* distance along the ray to the hit point */ } intersection; typedef struct { int num; /* number of intersections */ intersection closest; /* closest intersection > 0.0 */ intersection list[INTTBSIZE]; /* list of all intersections */ } intersectstruct; typedef struct { char outfilename[200]; /* name of the output image */ unsigned char * rawimage; /* pointer to a raw rgb image to be stored */ int hres; /* horizontal output image resolution */ int vres; /* vertical output image resolution */ flt aspectratio; /* aspect ratio of output image */ int raydepth; /* maximum recursion depth */ int antialiasing; /* number of antialiasing rays to fire */ int verbosemode; /* verbose reporting flag */ int boundmode; /* automatic spatial subdivision flag */ int boundthresh; /* threshold number of subobjects */ int displaymode; /* run-time X11 display flag */ vector camcent; /* center of the camera in world coords */ vector camviewvec; /* view direction of the camera (Z axis) */ vector camrightvec; /* right axis for the camera (X axis) */ vector camupvec; /* up axis for the camera (Y axis) */ flt camzoom; /* zoom factor for the camera */ color background; /* scene background color */ } scenedef; typedef struct { intersectstruct * intstruct; /* ptr to thread's intersection data */ unsigned int depth; /* levels left to recurse.. (maxdepth - curdepth) */ unsigned int flags; /* ray flags, any special treatment needed etc */ unsigned int serial; /* serial number of the ray */ unsigned int * mbox; /* mailbox array for optimizing intersections */ vector o; /* origin of the ray X,Y,Z */ vector d; /* normalized direction of the ray */ flt maxdist; /* maximum distance to search for intersections */ vector s; /* startpoint of the ray (may differ from origin */ vector e; /* endpoint of the ray if bounded */ scenedef * scene; /* pointer to the scene, for global parms such as */ /* background colors etc */ } ray; typedef struct { int type; /* RPC call type */ int from; /* Sending processor */ int len; /* length of parms in bytes */ void * parms; /* Parameters to RPC */ } rpcmsg;
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/macros.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * macros.h - This file contains macro versions of functions that would be best * used as inlined code rather than function calls. * * $Id: macros.h,v 1.2 2007-02-22 17:54:15 Exp $ */ #define MYMAX(a , b) ((a) > (b) ? (a) : (b)) #define MYMIN(a , b) ((a) < (b) ? (a) : (b)) #define VDOT(return, a, b) \ return=(a.x * b.x + a.y * b.y + a.z * b.z); \ #define RAYPNT(c, a, b) \ c.x = a.o.x + ( a.d.x * b ); \ c.y = a.o.y + ( a.d.y * b ); \ c.z = a.o.z + ( a.d.z * b ); \ #define VSUB(a, b, c) \ c.x = (a.x - b.x); \ c.y = (a.y - b.y); \ c.z = (a.z - b.z); \ #define VCROSS(a, b, c) \ c->x = (a->y * b->z) - (a->z * b->y); \ c->y = (a->z * b->x) - (a->x * b->z); \ c->z = (a->x * b->y) - (a->y * b->x); \
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/sphere.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * sphere.h - This file contains the defines for spheres etc. * * $Id: sphere.h,v 1.2 2007-02-22 17:54:16 Exp $ */ object * newsphere(void *, vector, flt); #ifdef SPHERE_PRIVATE typedef struct { unsigned int id; /* Unique Object serial number */ void * nextobj; /* pointer to next object in list */ object_methods * methods; /* this object's methods */ texture * tex; /* object texture */ vector ctr; flt rad; } sphere; static int sphere_bbox(void * obj, vector * min, vector * max); static void sphere_intersect(sphere *, ray *); static void sphere_normal(sphere *, vector *, ray *, vector *); #endif /* SPHERE_PRIVATE */
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/ppm.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * ppm.h - This file deals with PPM format image files (reading/writing) * * $Id: ppm.h,v 1.2 2007-02-22 17:54:16 Exp $ */ /* For our puposes, we're interested only in the 3 byte per pixel 24 bit truecolor sort of file.. Probably won't implement any decent checking at this point, probably choke on things like the # comments.. */ int readppm(char * name, int * xres, int * yres, unsigned char **imgdata);
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/util.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * util.h - This file contains defines for the timer functions... * * $Id: util.h,v 1.3 2007-02-22 17:54:17 Exp $ */ #include "machine.h" #if defined( _WIN32 ) #include <windows.h> #if defined(WINAPI_FAMILY) && (WINAPI_FAMILY == WINAPI_FAMILY_APP) #define WIN8UI_EXAMPLE 1 #include <thread> typedef ULONGLONG timer; #ifdef GetTickCount #undef GetTickCount #endif #define GetTickCount GetTickCount64 #else typedef DWORD timer; #endif #else #include <sys/time.h> #include <unistd.h> #if defined( STDTIME ) typedef timeval timer; #elif defined ( OLDUNIXTIME ) typedef time_t timer; #endif /* OLDUNIXTIME */ /* STDTIME */ #endif /* _WIN32 */ timer gettimer(void); flt timertime(timer st, timer fn); void rt_sleep(int); int rt_meminuse(void); void * rt_getmem(unsigned int); unsigned int rt_freemem(void *); void rtbomb(const char *); void rtmesg(const char *);
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/quadric.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * quadric.h - This file contains the defines for quadrics. * * $Id: quadric.h,v 1.2 2007-02-22 17:54:16 Exp $ */ typedef struct { flt a; flt b; flt c; flt d; flt e; flt f; flt g; flt h; flt i; flt j; } quadmatrix; typedef struct { unsigned int id; /* Unique Object serial number */ void * nextobj; /* pointer to next object in list */ object_methods * methods; /* this object's methods */ texture * tex; /* object texture */ vector ctr; quadmatrix mat; } quadric; quadric * newquadric(void); void quadric_intersect(quadric *, ray *); void quadric_normal(quadric *, vector *, ray *, vector *);
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/tgafile.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * tgafile.h - this file contains defines and structures for tgafile.c * * $Id: tgafile.h,v 1.2 2007-02-22 17:54:16 Exp $ */ /* declare other functions */ void createtgafile(char *, unsigned short, unsigned short); void * opentgafile(char *); void writetgaregion(void *, int, int, int, int, int, int, char *); int readtga(char * name, int * xres, int * yres, unsigned char **imgdata);
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/video.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= #ifndef __VIDEO_H__ #define __VIDEO_H__ #include <cassert> #if _MSC_VER #include <stddef.h> // for uintptr_t #else #include <stdint.h> // for uintptr_t #endif #if _WIN32 || _WIN64 #include <windows.h> #else #include <unistd.h> #endif typedef unsigned int color_t; typedef unsigned char colorcomp_t; typedef signed char depth_t; //! Class for getting access to drawing memory class drawing_memory { #ifdef __TBB_MIC_OFFLOAD // The address is kept as uintptr_t since // the compiler could not offload a pointer #endif uintptr_t my_address; public: depth_t pixel_depth; int sizex, sizey; //! Get drawing memory inline char* get_address() const { return reinterpret_cast<char*>(my_address); } //! Get drawing memory size inline int get_size() const { return ((pixel_depth>16) ? 4:2) * sizex * sizey; } //! Set drawing memory inline void set_address(char *mem) { my_address = reinterpret_cast<uintptr_t>(mem); } friend class drawing_area; friend class video; }; //! Simple proxy class for managing of different video systems class video { //! colorspace information depth_t depth, red_shift, green_shift, blue_shift; color_t red_mask, green_mask, blue_mask; friend class drawing_area; public: //! Constructor video(); //! Destructor ~video(); //! member to set window name const char *title; //! true is enable to show fps bool calc_fps; //! if true: on windows fork processing thread for on_process(), on non-windows note that next_frame() is called concurrently. bool threaded; //! true while running within main_loop() bool running; //! if true, do gui updating bool updating; //! initialize graphical video system bool init_window(int sizex, int sizey); //! initialize console. returns true if console is available bool init_console(); //! terminate video system void terminate(); //! Do standard event & processing loop. Use threaded = true to separate event/updating loop from frame processing void main_loop(); //! Process next frame bool next_frame(); //! Change window title void show_title(); //! translate RGB components into packed type inline color_t get_color(colorcomp_t red, colorcomp_t green, colorcomp_t blue) const; //! Get drawing memory descriptor inline drawing_memory get_drawing_memory() const; //! code of the ESCape key static const int esc_key = 27; //! Mouse events handler. virtual void on_mouse(int x, int y, int key) { } //! Mouse events handler. virtual void on_key(int key) { } //! Main processing loop. Redefine with your own virtual void on_process() { while(next_frame()); } #ifdef _WINDOWS //! Windows specific members //! if VIDEO_WINMAIN isn't defined then set this just before init() by arguments of WinMain static HINSTANCE win_hInstance; static int win_iCmdShow; //! optionally call it just before init() to set own. Use ascii strings convention void win_set_class(WNDCLASSEX &); //! load and set accelerator table from resources void win_load_accelerators(int idc); #endif }; //! Drawing class class drawing_area { const size_t base_index, max_index, index_stride; const depth_t pixel_depth; unsigned int * const ptr32; size_t index; public: const int start_x, start_y, size_x, size_y; //! constructors drawing_area(int x, int y, int sizex, int sizey); inline drawing_area(int x, int y, int sizex, int sizey, const drawing_memory &dmem); //! destructor inline ~drawing_area(); //! update the image void update(); //! set current position. local_x could be bigger then size_x inline void set_pos(int local_x, int local_y); //! put pixel in current position with incremental address calculating to next right pixel inline void put_pixel(color_t color); //! draw pixel at position by packed color void set_pixel(int localx, int localy, color_t color) { set_pos(localx, localy); put_pixel(color); } }; extern int g_sizex; extern int g_sizey; extern unsigned int *g_pImg; inline drawing_memory video::get_drawing_memory() const { drawing_memory dmem; dmem.pixel_depth = depth; dmem.my_address = reinterpret_cast<uintptr_t>(g_pImg); dmem.sizex = g_sizex; dmem.sizey = g_sizey; return dmem; } inline color_t video::get_color(colorcomp_t red, colorcomp_t green, colorcomp_t blue) const { if(red_shift == 16) // only for depth == 24 && red_shift > blue_shift return (red<<16) | (green<<8) | blue; else if(depth >= 24) return #if __ANDROID__ // Setting Alpha to 0xFF 0xFF000000 | #endif (red<<red_shift) | (green<<green_shift) | (blue<<blue_shift); else if(depth > 0) { depth_t bs = blue_shift, rs = red_shift; if(blue_shift < 0) blue >>= -bs, bs = 0; else /*red_shift < 0*/ red >>= -rs, rs = 0; return ((red<<rs)&red_mask) | ((green<<green_shift)&green_mask) | ((blue<<bs)&blue_mask); } else { // UYVY colorspace unsigned y, u, v; y = red * 77 + green * 150 + blue * 29; // sum(77+150+29=256) * max(=255): limit->2^16 u = (2048 + (blue << 3) - (y >> 5)) >> 4; // (limit->2^12)>>4 v = (2048 + (red << 3) - (y >> 5)) >> 4; y = y >> 8; return u | (y << 8) | (v << 16) | (y << 24); } } inline drawing_area::drawing_area(int x, int y, int sizex, int sizey, const drawing_memory &dmem) : start_x(x), start_y(y), size_x(sizex), size_y(sizey), pixel_depth(dmem.pixel_depth), base_index(y*dmem.sizex + x), max_index(dmem.sizex*dmem.sizey), index_stride(dmem.sizex), ptr32(reinterpret_cast<unsigned int*>(dmem.my_address)) { assert(x < dmem.sizex); assert(y < dmem.sizey); assert(x+sizex <= dmem.sizex); assert(y+sizey <= dmem.sizey); index = base_index; // current index } inline void drawing_area::set_pos(int local_x, int local_y) { index = base_index + local_x + local_y*index_stride; } inline void drawing_area::put_pixel(color_t color) { assert(index < max_index); if(pixel_depth > 16) ptr32[index++] = color; else if(pixel_depth > 0) ((unsigned short*)ptr32)[index++] = (unsigned short)color; else { // UYVY colorspace if(index&1) color >>= 16; ((unsigned short*)ptr32)[index++] = (unsigned short)color; } } inline drawing_area::~drawing_area() { #if ! __TBB_DEFINE_MIC update(); #endif } #if defined(_WINDOWS) && (defined(VIDEO_WINMAIN) || defined(VIDEO_WINMAIN_ARGS) ) #include <cstdlib> //! define WinMain for subsystem:windows. #ifdef VIDEO_WINMAIN_ARGS int main(int, char *[]); #else int main(); #endif int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE, PSTR szCmdLine, int iCmdShow) { video::win_hInstance = hInstance; video::win_iCmdShow = iCmdShow; #ifdef VIDEO_WINMAIN_ARGS return main(__argc, __argv); #else return main(); #endif } #endif #endif// __VIDEO_H__
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/bndbox.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * bndbox.h - This file contains the defines for bounding boxes etc. * * $Id: bndbox.h,v 1.2 2007-02-22 17:54:15 Exp $ */ typedef struct { unsigned int id; /* Unique Object serial number */ void * nextobj; /* pointer to next object in list */ object_methods * methods; /* this object's methods */ texture * tex; /* object texture */ vector min; vector max; object * objlist; } bndbox; bndbox * newbndbox(vector min, vector max); #ifdef BNDBOX_PRIVATE static int bndbox_bbox(void * obj, vector * min, vector * max); static void free_bndbox(void * v); static void bndbox_intersect(bndbox *, ray *); #endif
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/light.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * light.h - this file includes declarations and defines for light sources. * * $Id: light.h,v 1.2 2007-02-22 17:54:15 Exp $ */ typedef struct { unsigned int id; /* Unique Object serial number */ void * nextobj; /* pointer to next object in list */ object_methods * methods; /* this object's methods */ texture * tex; /* object texture */ vector ctr; flt rad; } point_light; point_light * newlight(void *, vector, flt); #ifdef LIGHT_PRIVATE static int light_bbox(void * obj, vector * min, vector * max); static void light_intersect(point_light *, ray *); static void light_normal(point_light *, vector *, ray *, vector *); #endif
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/grid.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * grid.h - spatial subdivision efficiency structures * * $Id: grid.h,v 1.2 2007-02-22 17:54:15 Exp $ * */ int engrid_scene(object ** list); object * newgrid(int xsize, int ysize, int zsize, vector min, vector max); #ifdef GRID_PRIVATE typedef struct objectlist { struct objectlist * next; /* next link in the list */ object * obj; /* the actual object */ } objectlist; typedef struct { unsigned int id; /* Unique Object serial number */ void * nextobj; /* pointer to next object in list */ object_methods * methods; /* this object's methods */ texture * tex; /* object texture */ int xsize; /* number of cells along the X direction */ int ysize; /* number of cells along the Y direction */ int zsize; /* number of cells along the Z direction */ vector min; /* the minimum coords for the box containing the grid */ vector max; /* the maximum coords for the box containing the grid */ vector voxsize; /* the size of a grid cell/voxel */ object * objects; /* all objects contained in the grid */ objectlist ** cells; /* the grid cells themselves */ } grid; typedef struct { int x; /* Voxel X address */ int y; /* Voxel Y address */ int z; /* Voxel Z address */ } gridindex; /* * Convert from voxel number along X/Y/Z to corresponding coordinate. */ #define voxel2x(g,X) ((X) * (g->voxsize.x) + (g->min.x)) #define voxel2y(g,Y) ((Y) * (g->voxsize.y) + (g->min.y)) #define voxel2z(g,Z) ((Z) * (g->voxsize.z) + (g->min.z)) /* * And vice-versa. */ #define x2voxel(g,x) (((x) - g->min.x) / g->voxsize.x) #define y2voxel(g,y) (((y) - g->min.y) / g->voxsize.y) #define z2voxel(g,z) (((z) - g->min.z) / g->voxsize.z) static int grid_bbox(void * obj, vector * min, vector * max); static void grid_free(void * v); static int cellbound(grid *g, gridindex *index, vector * cmin, vector * cmax); void engrid_objlist(grid * g, object ** list); static int engrid_object(grid * g, object * obj); static int engrid_objectlist(grid * g, objectlist ** list); static int engrid_cell(grid *, gridindex *); static int pos2grid(grid * g, vector * pos, gridindex * index); static void grid_intersect(grid *, ray *); static void voxel_intersect(grid * g, ray * ry, int voxaddr); static int grid_bounds_intersect(grid * g, ray * ry, flt *near, flt *far); #endif
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/pthread_w.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef EMULATE_PTHREADS #ifndef _PTHREAD_H_DEFINED #define _PTHREAD_H_DEFINED #include <windows.h> #include <errno.h> #ifndef ENOTSUP #define ENOTSUP EPERM #endif /* just need <stddef.h> on Windows to get size_t defined */ #include <stddef.h> #define ERROR_PTHREAD 1000 #define ERROR_MODE 1001 #define ERROR_UNIMPL 1002 /* Basics */ struct pthread_s { HANDLE winthread_handle; DWORD winthread_id; }; typedef struct pthread_s *pthread_t; /* one of the few types that's pointer, not struct */ typedef struct { int i; /* not yet defined... */ } pthread_attr_t; /* Mutex */ typedef struct { int i; /* not yet defined... */ } pthread_mutexattr_t; typedef struct { CRITICAL_SECTION critsec; } pthread_mutex_t; /* Function prototypes */ extern int pthread_create (pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg); extern int pthread_join (pthread_t th, void **thread_return); extern void pthread_exit (void *retval); extern int pthread_mutex_init (pthread_mutex_t *mutex, pthread_mutexattr_t *mutex_attr); extern int pthread_mutex_destroy (pthread_mutex_t *mutex); extern int pthread_mutex_lock (pthread_mutex_t *mutex); extern int pthread_mutex_unlock (pthread_mutex_t *mutex); #endif /* _PTHREAD_H_DEFINED */ #endif /* EMULATE_PTHREADS */
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/winvideo.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /////// Common internal implementation of Windows-specific stuff ////////////// /////// Must be the first included header ////////////// #ifndef __WINVIDEO_H__ #define __WINVIDEO_H__ #ifndef _CRT_SECURE_NO_DEPRECATE #define _CRT_SECURE_NO_DEPRECATE #endif // Check that the target Windows version has all API calls requried. #ifndef _WIN32_WINNT # define _WIN32_WINNT 0x0400 #endif #if _WIN32_WINNT<0x0400 # define YIELD_TO_THREAD() Sleep(0) #else # define YIELD_TO_THREAD() SwitchToThread() #endif #include "video.h" #include <fcntl.h> #include <io.h> #include <iostream> #include <fstream> #pragma comment(lib, "gdi32.lib") #pragma comment(lib, "user32.lib") // maximum mumber of lines the output console should have static const WORD MAX_CONSOLE_LINES = 500; const COLORREF RGBKEY = RGB(8, 8, 16); // at least 8 for 16-bit palette HWND g_hAppWnd; // The program's window handle HANDLE g_handles[2] = {0,0};// thread and wake up event unsigned int * g_pImg = 0; // drawing memory int g_sizex, g_sizey; static video * g_video = 0; WNDPROC g_pUserProc = 0; HINSTANCE video::win_hInstance = 0; int video::win_iCmdShow = 0; static WNDCLASSEX * gWndClass = 0; static HACCEL hAccelTable = 0; static DWORD g_msec = 0; static int g_fps = 0, g_updates = 0, g_skips = 0; bool DisplayError(LPSTR lpstrErr, HRESULT hres = 0); // always returns false LRESULT CALLBACK InternalWndProc(HWND hwnd, UINT iMsg, WPARAM wParam, LPARAM lParam); //! Create window bool WinInit(HINSTANCE hInstance, int nCmdShow, WNDCLASSEX *uwc, const char *title, bool fixedsize) { WNDCLASSEX wndclass; // Our app's windows class if(uwc) { memcpy(&wndclass, uwc, sizeof(wndclass)); g_pUserProc = uwc->lpfnWndProc; } else { memset(&wndclass, 0, sizeof(wndclass)); wndclass.hCursor = LoadCursor(NULL, IDC_ARROW); wndclass.lpszClassName = title; } wndclass.cbSize = sizeof(wndclass); wndclass.hInstance = hInstance; wndclass.lpfnWndProc = InternalWndProc; wndclass.style |= CS_HREDRAW | CS_VREDRAW; wndclass.hbrBackground = CreateSolidBrush(RGBKEY); if( !RegisterClassExA(&wndclass) ) return false; int xaddend = GetSystemMetrics(fixedsize?SM_CXFIXEDFRAME:SM_CXFRAME)*2; int yaddend = GetSystemMetrics(fixedsize?SM_CYFIXEDFRAME:SM_CYFRAME)*2 + GetSystemMetrics(SM_CYCAPTION); if(wndclass.lpszMenuName) yaddend += GetSystemMetrics(SM_CYMENU); // Setup the new window's physical parameters - and tell Windows to create it g_hAppWnd = CreateWindowA(wndclass.lpszClassName, // Window class name title, // Window caption !fixedsize ? WS_OVERLAPPEDWINDOW : // Window style WS_OVERLAPPED|WS_CAPTION|WS_SYSMENU|WS_MINIMIZEBOX, CW_USEDEFAULT, // Initial x pos: use default placement 0, // Initial y pos: not used here g_sizex+xaddend,// Initial x size g_sizey+yaddend,// Initial y size NULL, // parent window handle NULL, // window menu handle hInstance, // program instance handle NULL); // Creation parameters return g_hAppWnd != NULL; } //! create console window with redirection static bool RedirectIOToConsole(void) { int hConHandle; size_t lStdHandle; CONSOLE_SCREEN_BUFFER_INFO coninfo; FILE *fp; // allocate a console for this app AllocConsole(); // set the screen buffer to be big enough to let us scroll text GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &coninfo); coninfo.dwSize.Y = MAX_CONSOLE_LINES; SetConsoleScreenBufferSize(GetStdHandle(STD_OUTPUT_HANDLE), coninfo.dwSize); // redirect unbuffered STDOUT to the console lStdHandle = (size_t)GetStdHandle(STD_OUTPUT_HANDLE); hConHandle = _open_osfhandle(lStdHandle, _O_TEXT); if(hConHandle <= 0) return false; fp = _fdopen( hConHandle, "w" ); *stdout = *fp; setvbuf( stdout, NULL, _IONBF, 0 ); // redirect unbuffered STDERR to the console lStdHandle = (size_t)GetStdHandle(STD_ERROR_HANDLE); hConHandle = _open_osfhandle(lStdHandle, _O_TEXT); if(hConHandle > 0) { fp = _fdopen( hConHandle, "w" ); *stderr = *fp; setvbuf( stderr, NULL, _IONBF, 0 ); } // redirect unbuffered STDIN to the console lStdHandle = (size_t)GetStdHandle(STD_INPUT_HANDLE); hConHandle = _open_osfhandle(lStdHandle, _O_TEXT); if(hConHandle > 0) { fp = _fdopen( hConHandle, "r" ); *stdin = *fp; setvbuf( stdin, NULL, _IONBF, 0 ); } // make cout, wcout, cin, wcin, wcerr, cerr, wclog and clog // point to console as well std::ios::sync_with_stdio(); return true; } video::video() : red_mask(0xff0000), red_shift(16), green_mask(0xff00), green_shift(8), blue_mask(0xff), blue_shift(0), depth(24) { assert(g_video == 0); g_video = this; title = "Video"; running = threaded = calc_fps = false; updating = true; } //! optionally call it just before init() to set own void video::win_set_class(WNDCLASSEX &wcex) { gWndClass = &wcex; } void video::win_load_accelerators(int idc) { hAccelTable = LoadAccelerators(win_hInstance, MAKEINTRESOURCE(idc)); } bool video::init_console() { if(RedirectIOToConsole()) { if(!g_pImg && g_sizex && g_sizey) g_pImg = new unsigned int[g_sizex * g_sizey]; if(g_pImg) running = true; return true; } return false; } video::~video() { if(g_video) terminate(); } DWORD WINAPI thread_video(LPVOID lpParameter) { video *v = (video*)lpParameter; v->on_process(); return 0; } static bool loop_once(video *v) { // screen update notify if(int updates = g_updates) { g_updates = 0; if(g_video->updating) { g_skips += updates-1; g_fps++; } else g_skips += updates; UpdateWindow(g_hAppWnd); } // update fps DWORD msec = GetTickCount(); if(v->calc_fps && msec >= g_msec+1000) { double sec = (msec - g_msec)/1000.0; char buffer[256], n = _snprintf(buffer, 128, "%s: %d fps", v->title, int(double(g_fps + g_skips)/sec)); if(g_skips) _snprintf(buffer+n, 128, " - %d skipped = %d updates", int(g_skips/sec), int(g_fps/sec)); SetWindowTextA(g_hAppWnd, buffer); g_msec = msec; g_skips = g_fps = 0; } // event processing, including painting MSG msg; if(PeekMessage(&msg, NULL, 0, 0, PM_REMOVE)){ if( msg.message == WM_QUIT ) { v->running = false; return false; } if( !hAccelTable || !TranslateAccelerator(msg.hwnd, hAccelTable, &msg) ){ TranslateMessage(&msg); DispatchMessage(&msg); } return true; // try again } return false; } //! Do standard event loop void video::main_loop() { // let Windows draw and unroll the window InvalidateRect(g_hAppWnd, 0, false); g_msec = GetTickCount(); // let's stay for 0,5 sec while(g_msec + 500 > GetTickCount()) { loop_once(this); Sleep(1); } g_msec = GetTickCount(); // now, start main process if(threaded) { g_handles[0] = CreateThread ( NULL, // LPSECURITY_ATTRIBUTES security_attrs 0, // SIZE_T stacksize (LPTHREAD_START_ROUTINE) thread_video, this, // argument 0, 0); if(!g_handles[0]) { DisplayError("Can't create thread"); return; } else // harmless race is possible here g_handles[1] = CreateEvent(NULL, false, false, NULL); while(running) { while(loop_once(this)); YIELD_TO_THREAD(); // give time for processing when running on single CPU DWORD r = MsgWaitForMultipleObjects(2, g_handles, false, INFINITE, QS_ALLINPUT^QS_MOUSEMOVE); if(r == WAIT_OBJECT_0) break; // thread terminated } running = false; if(WaitForSingleObject(g_handles[0], 3000) == WAIT_TIMEOUT){ // there was not enough time for graceful shutdown, killing the example with code 1. exit(1); } if(g_handles[0]) CloseHandle(g_handles[0]); if(g_handles[1]) CloseHandle(g_handles[1]); g_handles[0] = g_handles[1] = 0; } else on_process(); } //! Refresh screen picture bool video::next_frame() { if(!running) return false; g_updates++; // Fast but inaccurate counter. The data race here is benign. if(!threaded) while(loop_once(this)); else if(g_handles[1]) { SetEvent(g_handles[1]); YIELD_TO_THREAD(); } return true; } //! Change window title void video::show_title() { if(g_hAppWnd) SetWindowTextA(g_hAppWnd, title); } #endif //__WINVIDEO_H__
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/apitrigeom.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * apitrigeom.h - header for functions to generate triangle tesselated * geometry for use with OpenGL, XGL, etc. * */ void rt_tri_fcylinder(void * tex, vector ctr, vector axis, apiflt rad); void rt_tri_cylinder(void * tex, vector ctr, vector axis, apiflt rad); void rt_tri_ring(void * tex, vector ctr, vector norm, apiflt a, apiflt b); void rt_tri_plane(void * tex, vector ctr, vector norm); void rt_tri_box(void * tex, vector min, vector max);
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/triangle.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * triangle.h - This file contains the defines for triangles etc. * * $Id: triangle.h,v 1.2 2007-02-22 17:54:16 Exp $ */ object * newtri(void *, vector, vector, vector); object * newstri(void *, vector, vector, vector, vector, vector, vector); #ifdef TRIANGLE_PRIVATE #define TRIXMAJOR 0 #define TRIYMAJOR 1 #define TRIZMAJOR 2 typedef struct { unsigned int id; /* Unique Object serial number */ void * nextobj; /* pointer to next object in list */ object_methods * methods; /* this object's methods */ texture * tex; /* object texture */ vector edge2; vector edge1; vector v0; } tri; typedef struct { unsigned int id; /* Unique Object serial number */ void * nextobj; /* pointer to next object in list */ object_methods * methods; /* this object's methods */ texture * tex; /* object texture */ vector edge2; vector edge1; vector v0; vector n0; vector n1; vector n2; } stri; static int tri_bbox(void * obj, vector * min, vector * max); static void tri_intersect(tri *, ray *); static void tri_normal(tri *, vector *, ray *, vector *); static void stri_normal(stri *, vector *, ray *, vector *); #endif
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/machine.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * machine.h - This is the machine specific include file * * $Id: machine.h,v 1.2 2007-02-22 17:54:15 Exp $ */ #include <stdio.h> #include <cstdlib> #include <string.h> #include <math.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> using namespace std; #define STDTIME
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/ui.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * ui.h - defines for user interface functions * * $Id: ui.h,v 1.2 2007-02-22 17:54:16 Exp $ */ /* Different types of message, for levels of verbosity etc */ #define MSG_0 100 #define MSG_1 101 #define MSG_2 102 #define MSG_3 103 #define MSG_4 104 #define MSG_5 105 #define MSG_ERR 200 #define MSG_ABORT 300 void rt_ui_message(int, const char *); void rt_ui_progress(int); int rt_ui_checkaction(void);
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/parse.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * parse.h - this file contains defines for model file reading. * * $Id: parse.h,v 1.2 2007-02-22 17:54:16 Exp $ */ #define PARSENOERR 0 #define PARSEBADFILE 1 #define PARSEBADSUBFILE 2 #define PARSEBADSYNTAX 4 #define PARSEEOF 8 #define PARSEALLOCERR 16 unsigned int readmodel(char *, SceneHandle); #ifdef PARSE_INTERNAL #define NUMTEXS 32768 #define TEXNAMELEN 24 typedef struct { double rx1; double rx2; double rx3; double ry1; double ry2; double ry3; double rz1; double rz2; double rz3; } RotMat; typedef struct { char name[TEXNAMELEN]; void * tex; } texentry; #ifdef _ERRCODE_DEFINED #define errcode errcode_t #endif//_ERRCODE_DEFINED typedef unsigned int errcode; static errcode add_texture(void * tex, char name[TEXNAMELEN]); static errcode GetString(FILE *, const char *); static errcode GetScenedefs(FILE *, SceneHandle); static errcode GetColor(FILE *, color *); static errcode GetVector(FILE *, vector *); static errcode GetTexDef(FILE *); static errcode GetTexAlias(FILE *); static errcode GetTexture(FILE *, void **); void * GetTexBody(FILE *); static errcode GetBackGnd(FILE *); static errcode GetCylinder(FILE *); static errcode GetFCylinder(FILE *); static errcode GetPolyCylinder(FILE *); static errcode GetSphere(FILE *); static errcode GetPlane(FILE *); static errcode GetRing(FILE *); static errcode GetBox(FILE *); static errcode GetVol(FILE *); static errcode GetTri(FILE *); static errcode GetSTri(FILE *); static errcode GetLight(FILE *); static errcode GetLandScape(FILE *); static errcode GetTPolyFile(FILE *); static errcode GetMGFFile(FILE *, SceneHandle); static errcode GetObject(FILE *, SceneHandle); #endif
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/vector.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * vector.h - This file contains declarations of vector functions * * $Id: vector.h,v 1.2 2007-02-22 17:54:17 Exp $ */ flt VDot(vector *, vector *); void VCross(vector *, vector *, vector *); flt VLength(vector *); void VNorm(vector *); void VAdd(vector *, vector *, vector *); void VSub(vector *, vector *, vector *); void VAddS(flt, vector *, vector *, vector *); vector Raypnt(ray *, flt); void VScale(vector * a, flt s); void ColorAddS(color * a, color * b, flt s); void ColorAccum(color * a, color * b); void ColorScale(color * a, flt s);
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/imageio.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * imageio.h - This file deals with reading/writing image files * * $Id: imageio.h,v 1.2 2007-02-22 17:54:15 Exp $ */ /* For our puposes, we're interested only in the 3 byte per pixel 24 bit truecolor sort of file.. */ #define IMAGENOERR 0 /* no error */ #define IMAGEBADFILE 1 /* can't find or can't open the file */ #define IMAGEUNSUP 2 /* the image file is an unsupported format */ #define IMAGEALLOCERR 3 /* not enough remaining memory to load this image */ #define IMAGEREADERR 4 /* failed read, short reads etc */ int readimage(rawimage *);
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/coordsys.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * coordsys.h - defines for coordinate system routines. * * $Id: coordsys.h,v 1.2 2007-02-22 17:54:15 Exp $ */ #define TWOPI 6.2831853 void xytopolar(flt, flt, flt, flt *, flt *); void xyztocyl(vector, flt, flt *, flt *); void xyztospr(vector, flt *, flt *);
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/ring.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * ring.h - This file contains the defines for rings etc. * * $Id: ring.h,v 1.2 2007-02-22 17:54:16 Exp $ */ object * newring(void * tex, vector ctr, vector norm, flt in, flt out); #ifdef RING_PRIVATE typedef struct { unsigned int id; /* Unique Object serial number */ void * nextobj; /* pointer to next object in list */ object_methods * methods; /* this object's methods */ texture * tex; /* object texture */ vector ctr; vector norm; flt inrad; flt outrad; } ring; static int ring_bbox(void * obj, vector * min, vector * max); static void ring_intersect(ring *, ray *); static void ring_normal(ring *, vector *, ray * incident, vector *); #endif
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/trace.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * trace.h - This file contains the declarations and defines for the trace module * * $Id: trace.h,v 1.2 2007-02-22 17:54:16 Exp $ */ extern char *global_buffer; typedef struct { int tid; int nthr; scenedef scene; char * buffer; int startx; int stopx; int starty; int stopy; } thr_parms; typedef struct { int startx; int stopx; int starty; int stopy; } patch; typedef struct { void * tga; int iwidth; int iheight; int startx; int starty; int stopx; int stopy; char * buffer; } thr_io_parms; color trace(ray *); void * thread_trace(thr_parms * parms); void thread_trace1(thr_parms *, patch *, int depth); void thread_trace2(thr_parms *, patch *); void * thread_io(void *); void trace_shm(scenedef, /*char *,*/ int, int, int, int); void trace_region(scenedef, void *, int, int, int, int);
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/extvol.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * vol.h - Volume rendering definitions etc. * * * $Id: extvol.h,v 1.2 2007-02-22 17:54:15 Exp $ */ typedef struct { unsigned int id; /* Unique Object serial number */ void * nextobj; /* pointer to next object in list */ object_methods * methods; /* this object's methods */ texture * tex; /* object texture */ vector min; vector max; flt ambient; flt diffuse; flt opacity; int samples; flt (* evaluator)(flt, flt, flt); } extvol; extvol * newextvol(void * voidtex, vector min, vector max, int samples, flt (* evaluator)(flt, flt, flt)); color ext_volume_texture(vector *, texture *, ray *);
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/box.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * box.h - This file contains the defines for boxes etc. * * $Id: box.h,v 1.2 2007-02-22 17:54:15 Exp $ */ typedef struct { unsigned int id; /* Unique Object serial number */ void * nextobj; /* pointer to next object in list */ object_methods * methods; /* this object's methods */ texture * tex; /* object texture */ vector min; vector max; } box; box * newbox(void * tex, vector min, vector max); void box_intersect(box *, ray *); void box_normal(box *, vector *, ray * incident, vector *);
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/tachyon_video.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "video.h" class tachyon_video : public video { public: bool updating_mode; bool recycling; bool pausing; void on_process(); void on_key(int key); }; extern class tachyon_video *video;
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/global.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * global.h - any/all global data items etc should be in this file * * $Id: global.h,v 1.2 2007-02-22 17:54:15 Exp $ * */ /* stuff moved from intersect.c */ extern object * rootobj; extern point_light * lightlist[MAXLIGHTS]; extern int numlights; extern unsigned int numobjects; extern unsigned int rt_mem_in_use; extern int parinitted; extern int graphicswindowopen;
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/imap.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * imap.h - This file contains defines etc for doing image map type things. * * $Id: imap.h,v 1.2 2007-02-22 17:54:15 Exp $ */ void ResetImage(void); void LoadImage(rawimage *); color ImageMap(rawimage *, flt, flt); rawimage * AllocateImage(char *); void DeallocateImage(rawimage *); void ResetImages(void);
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/cylinder.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * cylinder.h - This file contains the defines for cylinders etc. * * $Id: cylinder.h,v 1.2 2007-02-22 17:54:15 Exp $ */ object * newcylinder(void *, vector, vector, flt); object * newfcylinder(void *, vector, vector, flt); #ifdef CYLINDER_PRIVATE typedef struct { unsigned int id; /* Unique Object serial number */ void * nextobj; /* pointer to next object in list */ object_methods * methods; /* this object's methods */ texture * tex; /* object texture */ vector ctr; vector axis; flt rad; } cylinder; static void cylinder_intersect(cylinder *, ray *); static void fcylinder_intersect(cylinder *, ray *); static int cylinder_bbox(void * obj, vector * min, vector * max); static int fcylinder_bbox(void * obj, vector * min, vector * max); static void cylinder_normal(cylinder *, vector *, ray *, vector *); #endif
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/vol.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * vol.h - Volume rendering definitions etc. * * * $Id: vol.h,v 1.2 2007-02-22 17:54:17 Exp $ */ void * newscalarvol(void * intex, vector min, vector max, int xs, int ys, int zs, char * fname, scalarvol * invol); void LoadVol(scalarvol *); color scalar_volume_texture(vector *, texture *, ray *);
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/jpeg.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * jpeg.h - This file deals with JPEG format image files (reading/writing) * * $Id: jpeg.h,v 1.2 2007-02-22 17:54:15 Exp $ */ int readjpeg(char * name, int * xres, int * yres, unsigned char **imgdata);
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/api.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /***************************************************************************** * api.h - The declarations and prototypes needed so that 3rd party driver * * code can run the raytracer. Third party driver code should * * only use the functions in this header file to interface with * * the rendering engine. * *************************************************************************** */ /* * $Id: api.h,v 1.2 2007-02-22 17:54:15 Exp $ */ /********************************************/ /* Types defined for use with the API calls */ /********************************************/ #ifdef USESINGLEFLT typedef float apiflt; /* generic floating point number */ #else typedef double apiflt; /* generic floating point number */ #endif typedef void * SceneHandle; typedef struct { int texturefunc; /* which texture function to use */ color col; /* base object color */ int shadowcast; /* does the object cast a shadow */ apiflt ambient; /* ambient lighting */ apiflt diffuse; /* diffuse reflection */ apiflt specular; /* specular reflection */ apiflt opacity; /* how opaque the object is */ vector ctr; /* origin of texture */ vector rot; /* rotation of texture around origin */ vector scale; /* scale of texture in x,y,z */ vector uaxs; /* planar map u axis */ vector vaxs; /* planar map v axis */ char imap[96]; /* name of image map */ } apitexture; /******************************************************************* * NOTE: The value passed in apitexture.texturefunc corresponds to * the meanings given in this table: * * 0 - No texture function is applied other than standard lighting. * 1 - 3D checkerboard texture. Red & Blue checkers through 3d space. * 2 - Grit texture, roughens up the surface of the object a bit. * 3 - 3D marble texture. Makes a 3D swirl pattern through the object. * 4 - 3D wood texture. Makes a 3D wood pattern through the object. * 5 - 3D gradient noise function. * 6 - I've forgotten :-) * 7 - Cylindrical Image Map **** IMAGE MAPS REQUIRE the filename * 8 - Spherical Image Map of the image be put in imap[] * 9 - Planar Image Map part of the texture... * planar requires uaxs, and vaxs.. * *******************************************************************/ /********************************************/ /* Functions implemented to provide the API */ /********************************************/ vector rt_vector(apiflt x, apiflt y, apiflt z); /* helper to make vectors */ color rt_color(apiflt r, apiflt g, apiflt b); /* helper to make colors */ void rt_initialize();/* reset raytracer, memory deallocation */ void rt_finalize(void); /* close down for good.. */ SceneHandle rt_newscene(void); /* allocate new scene */ void rt_deletescene(SceneHandle); /* delete a scene */ void rt_renderscene(SceneHandle); /* raytrace the current scene */ void rt_outputfile(SceneHandle, const char * outname); void rt_resolution(SceneHandle, int hres, int vres); void rt_verbose(SceneHandle, int v); void rt_rawimage(SceneHandle, unsigned char *rawimage); void rt_background(SceneHandle, color); /* Parameter values for rt_boundmode() */ #define RT_BOUNDING_DISABLED 0 #define RT_BOUNDING_ENABLED 1 void rt_boundmode(SceneHandle, int); void rt_boundthresh(SceneHandle, int); /* Parameter values for rt_displaymode() */ #define RT_DISPLAY_DISABLED 0 #define RT_DISPLAY_ENABLED 1 void rt_displaymode(SceneHandle, int); void rt_scenesetup(SceneHandle, char *, int, int, int); /* scene, output filename, horizontal resolution, vertical resolution, verbose mode */ void rt_camerasetup(SceneHandle, apiflt, apiflt, int, int, vector, vector, vector); /* camera parms: scene, zoom, aspectratio, antialiasing, raydepth, camera center, view direction, up direction */ void * rt_texture(apitexture *); /* pointer to the texture struct that would have been passed to each object() call in older revisions.. */ void rt_light(void * , vector, apiflt); /* add a light */ /* light parms: texture, center, radius */ void rt_sphere(void *, vector, apiflt); /* add a sphere */ /* sphere parms: texture, center, radius */ void rt_scalarvol(void *, vector, vector, int, int, int, char *, void *); void rt_extvol(void *, vector, vector, int, apiflt (* evaluator)(apiflt, apiflt, apiflt)); void rt_box(void *, vector, vector); /* box parms: texture, min, max */ void rt_plane(void *, vector, vector); /* plane parms: texture, center, normal */ void rt_ring(void *, vector, vector, apiflt, apiflt); /* ring parms: texture, center, normal, inner, outer */ void rt_tri(void *, vector, vector, vector); /* tri parms: texture, vertex 0, vertex 1, vertex 2 */ void rt_stri(void *, vector, vector, vector, vector, vector, vector); /* stri parms: texture, vertex 0, vertex 1, vertex 2, norm 0, norm 1, norm 2 */ void rt_heightfield(void *, vector, int, int, apiflt *, apiflt, apiflt); /* field parms: texture, center, m, n, field, wx, wy */ void rt_landscape(void *, int, int, vector, apiflt, apiflt); void rt_quadsphere(void *, vector, apiflt); /* add quadric sphere */ /* sphere parms: texture, center, radius */ void rt_cylinder(void *, vector, vector, apiflt); void rt_fcylinder(void *, vector, vector, apiflt); void rt_polycylinder(void *, vector *, int, apiflt); /* new texture handling routines */ void rt_tex_color(void * voidtex, color col); #define RT_PHONG_PLASTIC 0 #define RT_PHONG_METAL 1 void rt_tex_phong(void * voidtex, apiflt phong, apiflt phongexp, int type);
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/intersect.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * intersect.h - This file contains the declarations and defines for the * functions that manage intersection, bounding and CSG.. * * $Id: intersect.h,v 1.2 2007-02-22 17:54:15 Exp $ */ unsigned int new_objectid(void); unsigned int max_objectid(void); void add_object(object *); void reset_object(void); void free_objects(object *); void intersect_objects(ray *); void reset_intersection(intersectstruct *); void add_intersection(flt, object *, ray *); int closest_intersection(flt *, object **, intersectstruct *); int next_intersection(object **, object *, intersectstruct *); int shadow_intersection(intersectstruct * intstruct, flt maxdist);
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/shade.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * shade.h - This file contains declarations and definitions for the shader. * * $Id: shade.h,v 1.2 2007-02-22 17:54:16 Exp $ */ void reset_lights(void); void add_light(point_light *); color shader(ray *); color shade_reflection(ray *, vector *, vector *, flt); color shade_transmission(ray *, vector *, flt); flt shade_phong(ray * incident, vector * hit, vector * N, vector * L, flt specpower);
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/objbound.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * objbound.h - defines for object bounding code. * * $Id: objbound.h,v 1.2 2007-02-22 17:54:15 Exp $ */ void dividespace(int, object **); #ifdef OBJBOUND_PRIVATE static void globalbound(object **, vector *, vector *); static int objinside(object * obj, vector * min, vector * max); static int countobj(object *); static void movenextobj(object *, object **); static void octreespace(object **, int); #endif
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/camera.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * camera.h - This file contains the defines for camera routines etc. * * $Id: camera.h,v 1.2 2007-02-22 17:54:15 Exp $ */ ray camray(scenedef *, int, int);
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/render.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * render.h - This file contains the defines for the top level functions * * $Id: render.h,v 1.2 2007-02-22 17:54:16 Exp $ */ void renderscene(scenedef);
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/tachyon/linux/include/texture.h
//============================================================== // // SAMPLE SOURCE CODE - SUBJECT TO THE TERMS OF SAMPLE CODE LICENSE AGREEMENT, // http://software.intel.com/en-us/articles/intel-sample-source-code-license-agreement/ // // Copyright (C) Intel Corporation // // THIS FILE IS PROVIDED "AS IS" WITH NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT // NOT LIMITED TO ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR // PURPOSE, NON-INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS. // // ============================================================= /* The original source for this example is Copyright (c) 1994-2008 John E. Stone All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * texture.h This file contains all of the includes and defines for the texture * mapping part of the shader. * * $Id: texture.h,v 1.2 2007-02-22 17:54:16 Exp $ */ void InitTextures(void); color standard_texture(vector *, texture *, ray *); color image_cyl_texture(vector *, texture *, ray *); color image_sphere_texture(vector *, texture *, ray *); color image_plane_texture(vector *, texture *, ray *); color checker_texture(vector *, texture *, ray *); color cyl_checker_texture(vector *, texture *, ray *); color grit_texture(vector *, texture *, ray *); color wood_texture(vector *, texture *, ray *); color marble_texture(vector *, texture *, ray *); color gnoise_texture(vector *, texture *, ray *); int Noise(flt, flt, flt); void InitTextures(void);
h
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/matrix_multiply_vtune/src/matrix.cpp
//============================================================== // Copyright 2019 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= #include <malloc.h> #include <iostream> // dpc_common.hpp can be found in the dev-utilities include folder. // e.g., $ONEAPI_ROOT/dev-utilities//include/dpc_common.hpp #include "dpc_common.hpp" #include "multiply.hpp" typedef unsigned long long UINT64; #define xstr(s) x_str(s) #define x_str(s) #s using namespace std; // routine to initialize an array with data void InitArr(TYPE row, TYPE col, TYPE off, TYPE a[][NUM]) { int i, j; for (i = 0; i < NUM; i++) { for (j = 0; j < NUM; j++) { a[i][j] = row * i + col * j + off; } } } // routine to print out contents of small arrays void PrintArr(char *name, TYPE Array[][NUM]) { int i, j; cout << "\n"<<name<<"\n"; for (i = 0; i < NUM; i++) { for (j = 0; j < NUM; j++) { cout << Array[i][j] << "\t"; } cout << endl; } } int main() { char *buf1, *buf2, *buf3, *buf4; char *addr1, *addr2, *addr3, *addr4; Array *a, *b, *c, *t; int Offset_Addr1 = 128, Offset_Addr2 = 192, Offset_Addr3 = 0, Offset_Addr4 = 64; // malloc arrays space buf1 = (char *)malloc(NUM * NUM * (sizeof(double)) + 1024); cout << "Address of buf1 = " << (void*)buf1 << endl; addr1 = buf1 + 256 - ((UINT64)buf1 % 256) + (UINT64)Offset_Addr1; cout << "Offset of buf1 = " << (void*)addr1 << endl; buf2 = (char *)malloc(NUM * NUM * (sizeof(double)) + 1024); cout << "Address of buf2 = " << (void*)buf2 << endl; addr2 = buf2 + 256 - ((UINT64)buf2 % 256) + (UINT64)Offset_Addr2; cout << "Offset of buf2 = " << (void*)addr2 << endl; buf3 = (char *)malloc(NUM * NUM * (sizeof(double)) + 1024); cout << "Address of buf3 = " << (void*)buf3 << endl; addr3 = buf3 + 256 - ((UINT64)buf3 % 256) + (UINT64)Offset_Addr3; cout << "Offset of buf3 = " << (void*)addr3 << endl; buf4 = (char *)malloc(NUM * NUM * (sizeof(double)) + 1024); cout << "Address of buf4 = " << (void*)buf4 << endl; addr4 = buf4 + 256 - ((UINT64)buf4 % 256) + (UINT64)Offset_Addr4; cout << "Offset of buf4 = " << (void*)addr4 << endl; a = (Array *)addr1; b = (Array *)addr2; c = (Array *)addr3; t = (Array *)addr4; // initialize the arrays with data InitArr(3, -2, 1, a); InitArr(-2, 1, 3, b); cout << "Using multiply kernel: "<< xstr(MULTIPLY)<< "\n"; // start timing the matrix multiply code dpc_common::TimeInterval matrix_time;; ParallelMultiply(NUM, a, b, c, t); double matrix_elapsed = matrix_time.Elapsed(); cout << "Elapsed Time: " << matrix_elapsed << "s\n"; // free memory free(buf1); free(buf2); free(buf3); free(buf4); }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/matrix_multiply_vtune/src/multiply.hpp
//============================================================== // Copyright 2019 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= constexpr int MAXTHREADS=16; constexpr int NUM=1024; constexpr int MATRIXTILESIZE=16; constexpr int WPT=8; #include <sycl/sycl.hpp> // exception handler /* The exception_list parameter is an iterable list of std::exception_ptr objects. But those pointers are not always directly readable. So, we rethrow the pointer, catch it, and then we have the exception itself. Note: depending upon the operation there may be several exceptions. */ auto exception_handler = [](sycl::exception_list exceptionList) { for (std::exception_ptr const& e : exceptionList) { try { std::rethrow_exception(e); } catch (sycl::exception const& e) { std::terminate(); // exit the process immediately. } } }; typedef float TYPE; typedef TYPE Array[NUM]; // Select which multiply kernel to use via the following macro so that the // kernel being used can be reported when the test is run. #define MULTIPLY multiply1 extern void multiply1(int msize, int tidx, int numt, TYPE a[][NUM], TYPE b[][NUM], TYPE c[][NUM], TYPE t[][NUM]); extern void multiply1_1(int msize, int tidx, int numt, TYPE a[][NUM], TYPE b[][NUM], TYPE c[][NUM], TYPE t[][NUM]); extern void multiply1_2(int msize, int tidx, int numt, TYPE a[][NUM], TYPE b[][NUM], TYPE c[][NUM], TYPE t[][NUM]); extern void ParallelMultiply(int msize, TYPE a[][NUM], TYPE b[][NUM], TYPE c[][NUM], TYPE t[][NUM]);
hpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/VTuneProfiler/matrix_multiply_vtune/src/multiply.cpp
//============================================================== // Copyright 2019 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= #include <array> #include <sycl/sycl.hpp> // matrix multiply routines #include "multiply.hpp" using namespace sycl; using namespace std; template <typename T> class Matrix1; template <typename T> class Matrix1_1; template <typename T> class Matrix1_2; // Basic matrix multiply void multiply1(int msize, int tidx, int numt, TYPE a[][NUM], TYPE b[][NUM], TYPE c[][NUM], TYPE t[][NUM]) { int i, j, k; // Declare a deviceQueue default_selector device; queue q(device, exception_handler); cout << "Running on " << q.get_device().get_info<sycl::info::device::name>() << "\n"; // Declare a 2 dimensional range range<2> matrix_range{NUM, NUM}; // Declare 3 buffers and Initialize them buffer bufferA((TYPE*)a, range(matrix_range)); buffer bufferB((TYPE*)b, range(matrix_range)); buffer bufferC((TYPE*)c, range(matrix_range)); // Submit our job to the queue q.submit([&](sycl::handler& h) { // Declare 3 accessors to our buffers. The first 2 read and the last // read_write accessor accessorA(bufferA, h, read_only); accessor accessorB(bufferB, h, read_only); accessor accessorC(bufferC, h); // Execute matrix multiply in parallel over our matrix_range // ind is an index into this range h.parallel_for<class Matrix1<TYPE> >(matrix_range,[=](sycl::id<2> ind) { int k; for (k = 0; k < NUM; k++) { // Perform computation ind[0] is row, ind[1] is col accessorC[ind[0]][ind[1]] += accessorA[ind[0]][k] * accessorB[k][ind[1]]; } }); }).wait_and_throw(); } // Replaces accessorC reference with a local variable void multiply1_1(int msize, int tidx, int numt, TYPE a[][NUM], TYPE b[][NUM],TYPE c[][NUM], TYPE t[][NUM]) { int i, j, k; // Declare a deviceQueue default_selector device; queue q(device, exception_handler); cout << "Running on " << q.get_device().get_info<sycl::info::device::name>() << "\n"; // Declare a 2 dimensional range range<2> matrix_range{NUM, NUM}; // Declare 3 buffers and Initialize them buffer bufferA((TYPE*)a, range(matrix_range)); buffer bufferB((TYPE*)b, range(matrix_range)); buffer bufferC((TYPE*)c, range(matrix_range)); // Submit our job to the queue q.submit([&](sycl::handler& h) { // Declare 3 accessors to our buffers. The first 2 read and the last // read_write accessor accessorA(bufferA, h, read_only); accessor accessorB(bufferB, h, read_only); accessor accessorC(bufferC, h); // Execute matrix multiply in parallel over our matrix_range // ind is an index into this range h.parallel_for<class Matrix1_1<TYPE>>(matrix_range,[=](sycl::id<2> ind) { int k; TYPE acc = 0.0; for (k = 0; k < NUM; k++) { // Perform computation ind[0] is row, ind[1] is col acc += accessorA[ind[0]][k] * accessorB[k][ind[1]]; } accessorC[ind[0]][ind[1]] = acc; }); }).wait_and_throw(); } // Replaces accessorC reference with a local variable and adds matrix tiling void multiply1_2(int msize, int tidx, int numt, TYPE a[][NUM], TYPE b[][NUM], TYPE c[][NUM], TYPE t[][NUM]) { int i, j, k; // Declare a deviceQueue default_selector device; queue q(device, exception_handler); cout << "Running on " << q.get_device().get_info<sycl::info::device::name>() << "\n"; // Declare a 2 dimensional range range<2> matrix_range{NUM, NUM}; range<2> tile_range{MATRIXTILESIZE, MATRIXTILESIZE}; // Declare 3 buffers and Initialize them buffer bufferA((TYPE*)a, range(matrix_range)); buffer bufferB((TYPE*)b, range(matrix_range)); buffer bufferC((TYPE*)c, range(matrix_range)); // Submit our job to the queue q.submit([&](sycl::handler& h) { // Declare 3 accessors to our buffers. The first 2 read and the last // read_write accessor accessorA(bufferA, h, read_only); accessor accessorB(bufferB, h, read_only); accessor accessorC(bufferC, h); // Create matrix tiles accessor<TYPE, 2, sycl::access::mode::read_write, sycl::access::target::local> aTile(sycl::range<2>(MATRIXTILESIZE, MATRIXTILESIZE), h); accessor<TYPE, 2, sycl::access::mode::read_write, sycl::access::target::local> bTile(sycl::range<2>(MATRIXTILESIZE, MATRIXTILESIZE), h); // Execute matrix multiply in parallel over our matrix_range // ind is an index into this range h.parallel_for<class Matrix1_2<TYPE>>(sycl::nd_range<2>(matrix_range,tile_range),[=](sycl::nd_item<2> it) { int k; const int numTiles = NUM / MATRIXTILESIZE; const int row = it.get_local_id(0); const int col = it.get_local_id(1); const int globalRow = MATRIXTILESIZE * it.get_group(0) + row; const int globalCol = MATRIXTILESIZE * it.get_group(1) + col; TYPE acc = 0.0; for (int t = 0; t < numTiles; t++) { const int tiledRow = MATRIXTILESIZE * t + row; const int tiledCol = MATRIXTILESIZE * t + col; aTile[row][col] = accessorA[globalRow][tiledCol]; bTile[row][col] = accessorB[tiledRow][globalCol]; it.barrier(sycl::access::fence_space::local_space); for (k = 0; k < MATRIXTILESIZE; k++) { // Perform computation ind[0] is row, ind[1] is col acc += aTile[row][k] * bTile[k][col]; } it.barrier(sycl::access::fence_space::local_space); } accessorC[globalRow][globalCol] = acc; }); }).wait_and_throw(); } void ParallelMultiply(int msize, TYPE a[][NUM], TYPE b[][NUM], TYPE c[][NUM], TYPE t[][NUM]) { int NTHREADS = MAXTHREADS; int MSIZE = NUM; MULTIPLY(MSIZE, NTHREADS, 0, a, b, c, t); }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/Advisor/matrix_multiply_advisor/src/matrix.cpp
//============================================================== // Copyright 2019 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= #include <malloc.h> #include <iostream> // dpc_common.hpp can be found in the dev-utilities include folder. // e.g., $ONEAPI_ROOT/dev-utilities//include/dpc_common.hpp #include "dpc_common.hpp" #include "multiply.hpp" typedef unsigned long long UINT64; #define xstr(s) x_str(s) #define x_str(s) #s using namespace std; // routine to initialize an array with data void InitArr(TYPE row, TYPE col, TYPE off, TYPE a[][NUM]) { int i, j; for (i = 0; i < NUM; i++) { for (j = 0; j < NUM; j++) { a[i][j] = row * i + col * j + off; } } } // routine to print out contents of small arrays void PrintArr(char *name, TYPE Array[][NUM]) { int i, j; cout << "\n"<<name<<"\n"; for (i = 0; i < NUM; i++) { for (j = 0; j < NUM; j++) { cout << Array[i][j] << "\t"; } cout << endl; } } int main() { char *buf1, *buf2, *buf3, *buf4; char *addr1, *addr2, *addr3, *addr4; Array *a, *b, *c, *t; int Offset_Addr1 = 128, Offset_Addr2 = 192, Offset_Addr3 = 0, Offset_Addr4 = 64; // malloc arrays space buf1 = (char *)malloc(NUM * NUM * (sizeof(double)) + 1024); cout << "Address of buf1 = " << (void*)buf1 << endl; addr1 = buf1 + 256 - ((UINT64)buf1 % 256) + (UINT64)Offset_Addr1; cout << "Offset of buf1 = " << (void*)addr1 << endl; buf2 = (char *)malloc(NUM * NUM * (sizeof(double)) + 1024); cout << "Address of buf2 = " << (void*)buf2 << endl; addr2 = buf2 + 256 - ((UINT64)buf2 % 256) + (UINT64)Offset_Addr2; cout << "Offset of buf2 = " << (void*)addr2 << endl; buf3 = (char *)malloc(NUM * NUM * (sizeof(double)) + 1024); cout << "Address of buf3 = " << (void*)buf3 << endl; addr3 = buf3 + 256 - ((UINT64)buf3 % 256) + (UINT64)Offset_Addr3; cout << "Offset of buf3 = " << (void*)addr3 << endl; buf4 = (char *)malloc(NUM * NUM * (sizeof(double)) + 1024); cout << "Address of buf4 = " << (void*)buf4 << endl; addr4 = buf4 + 256 - ((UINT64)buf4 % 256) + (UINT64)Offset_Addr4; cout << "Offset of buf4 = " << (void*)addr4 << endl; a = (Array *)addr1; b = (Array *)addr2; c = (Array *)addr3; t = (Array *)addr4; // initialize the arrays with data InitArr(3, -2, 1, a); InitArr(-2, 1, 3, b); cout << "Using multiply kernel: "<< xstr(MULTIPLY)<< "\n"; // start timing the matrix multiply code dpc_common::TimeInterval matrix_time;; ParallelMultiply(NUM, a, b, c, t); double matrix_elapsed = matrix_time.Elapsed(); cout << "Elapsed Time: " << matrix_elapsed << "s\n"; // free memory free(buf1); free(buf2); free(buf3); free(buf4); }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/Advisor/matrix_multiply_advisor/src/multiply.hpp
//============================================================== // Copyright 2019 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= constexpr int MAXTHREADS=16; constexpr int NUM=1024; constexpr int MATRIXTILESIZE=16; constexpr int WPT=8; #include <sycl/sycl.hpp> // exception handler /* The exception_list parameter is an iterable list of std::exception_ptr objects. But those pointers are not always directly readable. So, we rethrow the pointer, catch it, and then we have the exception itself. Note: depending upon the operation there may be several exceptions. */ auto exception_handler = [](sycl::exception_list exceptionList) { for (std::exception_ptr const& e : exceptionList) { try { std::rethrow_exception(e); } catch (sycl::exception const& e) { std::terminate(); // exit the process immediately. } } }; typedef float TYPE; typedef TYPE Array[NUM]; // Select which multiply kernel to use via the following macro so that the // kernel being used can be reported when the test is run. #define MULTIPLY multiply1_1 extern void multiply1(int msize, int tidx, int numt, TYPE a[][NUM], TYPE b[][NUM], TYPE c[][NUM], TYPE t[][NUM]); extern void multiply1_1(int msize, int tidx, int numt, TYPE a[][NUM], TYPE b[][NUM], TYPE c[][NUM], TYPE t[][NUM]); extern void multiply1_2(int msize, int tidx, int numt, TYPE a[][NUM], TYPE b[][NUM], TYPE c[][NUM], TYPE t[][NUM]); extern void ParallelMultiply(int msize, TYPE a[][NUM], TYPE b[][NUM], TYPE c[][NUM], TYPE t[][NUM]);
hpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/Advisor/matrix_multiply_advisor/src/multiply.cpp
//============================================================== // Copyright 2019 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= #include <array> #include <sycl/sycl.hpp> // matrix multiply routines #include "multiply.hpp" using namespace sycl; using namespace std; template <typename T> class Matrix1; template <typename T> class Matrix1_1; template <typename T> class Matrix1_2; // Basic matrix multiply void multiply1(int msize, int tidx, int numt, TYPE a[][NUM], TYPE b[][NUM], TYPE c[][NUM], TYPE t[][NUM]) { int i, j, k; // Declare a deviceQueue default_selector device; queue q(device, exception_handler); cout << "Running on " << q.get_device().get_info<sycl::info::device::name>() << "\n"; // Declare a 2 dimensional range range<2> matrix_range{NUM, NUM}; // Declare 3 buffers and Initialize them buffer bufferA((TYPE*)a, range(matrix_range)); buffer bufferB((TYPE*)b, range(matrix_range)); buffer bufferC((TYPE*)c, range(matrix_range)); // Submit our job to the queue q.submit([&](sycl::handler& h) { // Declare 3 accessors to our buffers. The first 2 read and the last // read_write accessor accessorA(bufferA, h, read_only); accessor accessorB(bufferB, h, read_only); accessor accessorC(bufferC, h); // Execute matrix multiply in parallel over our matrix_range // ind is an index into this range h.parallel_for<class Matrix1<TYPE> >(matrix_range,[=](sycl::id<2> ind) { int k; for (k = 0; k < NUM; k++) { // Perform computation ind[0] is row, ind[1] is col accessorC[ind[0]][ind[1]] += accessorA[ind[0]][k] * accessorB[k][ind[1]]; } }); }).wait_and_throw(); } // Replaces accessorC reference with a local variable void multiply1_1(int msize, int tidx, int numt, TYPE a[][NUM], TYPE b[][NUM],TYPE c[][NUM], TYPE t[][NUM]) { int i, j, k; // Declare a deviceQueue default_selector device; queue q(device, exception_handler); cout << "Running on " << q.get_device().get_info<sycl::info::device::name>() << "\n"; // Declare a 2 dimensional range range<2> matrix_range{NUM, NUM}; // Declare 3 buffers and Initialize them buffer bufferA((TYPE*)a, range(matrix_range)); buffer bufferB((TYPE*)b, range(matrix_range)); buffer bufferC((TYPE*)c, range(matrix_range)); // Submit our job to the queue q.submit([&](sycl::handler& h) { // Declare 3 accessors to our buffers. The first 2 read and the last // read_write accessor accessorA(bufferA, h, read_only); accessor accessorB(bufferB, h, read_only); accessor accessorC(bufferC, h); // Execute matrix multiply in parallel over our matrix_range // ind is an index into this range h.parallel_for<class Matrix1_1<TYPE>>(matrix_range,[=](sycl::id<2> ind) { int k; TYPE acc = 0.0; for (k = 0; k < NUM; k++) { // Perform computation ind[0] is row, ind[1] is col acc += accessorA[ind[0]][k] * accessorB[k][ind[1]]; } accessorC[ind[0]][ind[1]] = acc; }); }).wait_and_throw(); } // Replaces accessorC reference with a local variable and adds matrix tiling void multiply1_2(int msize, int tidx, int numt, TYPE a[][NUM], TYPE b[][NUM], TYPE c[][NUM], TYPE t[][NUM]) { int i, j, k; // Declare a deviceQueue default_selector device; queue q(device, exception_handler); cout << "Running on " << q.get_device().get_info<sycl::info::device::name>() << "\n"; // Declare a 2 dimensional range range<2> matrix_range{NUM, NUM}; range<2> tile_range{MATRIXTILESIZE, MATRIXTILESIZE}; // Declare 3 buffers and Initialize them buffer bufferA((TYPE*)a, range(matrix_range)); buffer bufferB((TYPE*)b, range(matrix_range)); buffer bufferC((TYPE*)c, range(matrix_range)); // Submit our job to the queue q.submit([&](sycl::handler& h) { // Declare 3 accessors to our buffers. The first 2 read and the last // read_write accessor accessorA(bufferA, h, read_only); accessor accessorB(bufferB, h, read_only); accessor accessorC(bufferC, h); // Create matrix tiles accessor<TYPE, 2, sycl::access::mode::read_write, sycl::access::target::local> aTile(sycl::range<2>(MATRIXTILESIZE, MATRIXTILESIZE), h); accessor<TYPE, 2, sycl::access::mode::read_write, sycl::access::target::local> bTile(sycl::range<2>(MATRIXTILESIZE, MATRIXTILESIZE), h); // Execute matrix multiply in parallel over our matrix_range // ind is an index into this range h.parallel_for<class Matrix1_2<TYPE>>(sycl::nd_range<2>(matrix_range,tile_range),[=](sycl::nd_item<2> it) { int k; const int numTiles = NUM / MATRIXTILESIZE; const int row = it.get_local_id(0); const int col = it.get_local_id(1); const int globalRow = MATRIXTILESIZE * it.get_group(0) + row; const int globalCol = MATRIXTILESIZE * it.get_group(1) + col; TYPE acc = 0.0; for (int t = 0; t < numTiles; t++) { const int tiledRow = MATRIXTILESIZE * t + row; const int tiledCol = MATRIXTILESIZE * t + col; aTile[row][col] = accessorA[globalRow][tiledCol]; bTile[row][col] = accessorB[tiledRow][globalCol]; it.barrier(sycl::access::fence_space::local_space); for (k = 0; k < MATRIXTILESIZE; k++) { // Perform computation ind[0] is row, ind[1] is col acc += aTile[row][k] * bTile[k][col]; } it.barrier(sycl::access::fence_space::local_space); } accessorC[globalRow][globalCol] = acc; }); }).wait_and_throw(); } void ParallelMultiply(int msize, TYPE a[][NUM], TYPE b[][NUM], TYPE c[][NUM], TYPE t[][NUM]) { int NTHREADS = MAXTHREADS; int MSIZE = NUM; MULTIPLY(MSIZE, NTHREADS, 0, a, b, c, t); }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/Benchmarks/STREAM/src/stream.cpp
/*-----------------------------------------------------------------------*/ /* Program: STREAM */ /* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */ /* Original code developed by John D. McCalpin */ /* Programmers: John D. McCalpin */ /* Joe R. Zagar */ /* */ /* This program measures memory transfer rates in MB/s for simple */ /* computational kernels coded in C. */ /*-----------------------------------------------------------------------*/ /* Copyright 1991-2013: John D. McCalpin */ /* Copyright 2020-2021: Intel Corporation (oneAPI modifications) */ /*-----------------------------------------------------------------------*/ /* License: */ /* 1. You are free to use this program and/or to redistribute */ /* this program. */ /* 2. You are free to modify this program for your own use, */ /* including commercial use, subject to the publication */ /* restrictions in item 3. */ /* 3. You are free to publish results obtained from running this */ /* program, or from works that you derive from this program, */ /* with the following limitations: */ /* 3a. In order to be referred to as "STREAM benchmark results", */ /* published results must be in conformance to the STREAM */ /* Run Rules, (briefly reviewed below) published at */ /* http://www.cs.virginia.edu/stream/ref.html */ /* and incorporated herein by reference. */ /* As the copyright holder, John McCalpin retains the */ /* right to determine conformity with the Run Rules. */ /* 3b. Results based on modified source code or on runs not in */ /* accordance with the STREAM Run Rules must be clearly */ /* labelled whenever they are published. Examples of */ /* proper labelling include: */ /* "tuned STREAM benchmark results" */ /* "based on a variant of the STREAM benchmark code" */ /* Other comparable, clear, and reasonable labelling is */ /* acceptable. */ /* 3c. Submission of results to the STREAM benchmark web site */ /* is encouraged, but not required. */ /* 4. Use of this program or creation of derived works based on this */ /* program constitutes acceptance of these licensing restrictions. */ /* 5. Absolutely no warranty is expressed or implied. */ /*-----------------------------------------------------------------------*/ # include <stdio.h> # include <unistd.h> # include <math.h> # include <float.h> # include <limits.h> # include <sys/time.h> # include <sycl/sycl.hpp> # include <iostream> /*----------------------------------------------------------------------- * INSTRUCTIONS: * * 1) STREAM requires different amounts of memory to run on different * systems, depending on both the system cache size(s) and the * granularity of the system timer. * You should adjust the value of 'STREAM_ARRAY_SIZE' (below) * to meet *both* of the following criteria: * (a) Each array must be at least 4 times the size of the * available cache memory. I don't worry about the difference * between 10^6 and 2^20, so in practice the minimum array size * is about 3.8 times the cache size. * Example 1: One Xeon E3 with 8 MB L3 cache * STREAM_ARRAY_SIZE should be >= 4 million, giving * an array size of 30.5 MB and a total memory requirement * of 91.5 MB. * Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP) * STREAM_ARRAY_SIZE should be >= 20 million, giving * an array size of 153 MB and a total memory requirement * of 458 MB. * (b) The size should be large enough so that the 'timing calibration' * output by the program is at least 20 clock-ticks. * Example: most versions of Windows have a 10 millisecond timer * granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds. * If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec. * This means the each array must be at least 1 GB, or 128M elements. * * Version 5.10 increases the default array size from 2 million * elements to 10 million elements in response to the increasing * size of L3 caches. The new default size is large enough for caches * up to 20 MB. * Version 5.10 changes the loop index variables from "register int" * to "ssize_t", which allows array indices >2^32 (4 billion) * on properly configured 64-bit systems. Additional compiler options * (such as "-mcmodel=medium") may be required for large memory runs. * * Array size can be set at compile time without modifying the source * code for the (many) compilers that support preprocessor definitions * on the compile line. E.g., * gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M * will override the default size of 10M with a new size of 100M elements * per array. */ #ifndef STREAM_ARRAY_SIZE # define STREAM_ARRAY_SIZE 10000000 #endif /* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result * for any iteration after the first, therefore the minimum value * for NTIMES is 2. * There are no rules on maximum allowable values for NTIMES, but * values larger than the default are unlikely to noticeably * increase the reported performance. * NTIMES can also be set on the compile line without changing the source * code using, for example, "-DNTIMES=7". */ #ifdef NTIMES #if NTIMES<=1 # define NTIMES 10 #endif #endif #ifndef NTIMES # define NTIMES 10 #endif /* Users are allowed to modify the "OFFSET" variable, which *may* change the * relative alignment of the arrays (though compilers may change the * effective offset by making the arrays non-contiguous on some systems). * Use of non-zero values for OFFSET can be especially helpful if the * STREAM_ARRAY_SIZE is set to a value close to a large power of 2. * OFFSET can also be set on the compile line without changing the source * code using, for example, "-DOFFSET=56". */ #ifndef OFFSET # define OFFSET 0 #endif /* * 3) Compile the code with optimization. Many compilers generate * unreasonably bad code before the optimizer tightens things up. * If the results are unreasonably good, on the other hand, the * optimizer might be too smart for me! * * For a simple single-core version, try compiling with: * cc -O stream.c -o stream * This is known to work on many, many systems.... * * To use multiple cores, you need to tell the compiler to obey the OpenMP * directives in the code. This varies by compiler, but a common example is * gcc -O -fopenmp stream.c -o stream_omp * The environment variable OMP_NUM_THREADS allows runtime control of the * number of threads/cores used when the resulting "stream_omp" program * is executed. * * To run with single-precision variables and arithmetic, simply add * -DSTREAM_TYPE=float * to the compile line. * Note that this changes the minimum array sizes required --- see (1) above. * * The preprocessor directive "TUNED" does not do much -- it simply causes the * code to call separate functions to execute each kernel. Trivial versions * of these functions are provided, but they are *not* tuned -- they just * provide predefined interfaces to be replaced with tuned code. * * * 4) Optional: Mail the results to [email protected] * Be sure to include info that will help me understand: * a) the computer hardware configuration (e.g., processor model, memory type) * b) the compiler name/version and compilation flags * c) any run-time information (such as OMP_NUM_THREADS) * d) all of the output from the test case. * * Thanks! * *-----------------------------------------------------------------------*/ # define HLINE "-------------------------------------------------------------\n" # ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) # endif #ifndef STREAM_TYPE #define STREAM_TYPE double #endif STREAM_TYPE *a, *b, *c; static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; static char label[4][12] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = { 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE }; extern double mysecond(); extern void checkSTREAMresults (); #ifdef TUNED extern void tuned_STREAM_Copy(); extern void tuned_STREAM_Scale(STREAM_TYPE scalar); extern void tuned_STREAM_Add(); extern void tuned_STREAM_Triad(STREAM_TYPE scalar); #endif #ifdef _OPENMP extern int omp_get_num_threads(); #endif int checktick(); /* oneAPI modifications: */ /* This is a global variable to allow the tuned_* implementations * to have the same function signature as the original version. */ sycl::queue q; int main(void) { /* oneAPI modifications: */ /* The default selector will likely choose a GPU, then a CPU. * We print the platform (SYCL implementation) and device information * so the user knows where they are running. */ q = sycl::default_selector{}; auto d = q.get_device(); auto p = d.get_platform(); std::cerr << "SYCL Platform: " << p.get_info<sycl::info::platform::name>() << std::endl; std::cerr << "SYCL Device: " << d.get_info<sycl::info::device::name>() << std::endl; /* oneAPI modifications: */ /* SYCL 2020 unified shared memory keeps the oneAPI implemenation as similar * to the original version as possible. */ a = sycl::malloc_shared<STREAM_TYPE>(STREAM_ARRAY_SIZE, q); b = sycl::malloc_shared<STREAM_TYPE>(STREAM_ARRAY_SIZE, q); c = sycl::malloc_shared<STREAM_TYPE>(STREAM_ARRAY_SIZE, q); int quantum; int BytesPerWord; int k; ssize_t j; STREAM_TYPE scalar; double t, times[4][NTIMES]; /* --- SETUP --- determine precision and check timing --- */ printf(HLINE); printf("STREAM version $Revision: 5.10 $\n"); printf(HLINE); BytesPerWord = sizeof(STREAM_TYPE); printf("This system uses %d bytes per array element.\n", BytesPerWord); printf(HLINE); #ifdef N printf("***** WARNING: ******\n"); printf(" It appears that you set the preprocessor variable N when compiling this code.\n"); printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n"); printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE); printf("***** WARNING: ******\n"); #endif printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET); printf("Memory per array = %.1f MiB (= %.1f GiB).\n", BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0), BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0)); printf("Total memory required = %.1f MiB (= %.1f GiB).\n", (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.), (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.)); printf("Each kernel will be executed %d times.\n", NTIMES); printf(" The *best* time for each kernel (excluding the first iteration)\n"); printf(" will be used to compute the reported bandwidth.\n"); #ifdef _OPENMP printf(HLINE); #pragma omp parallel { #pragma omp master { k = omp_get_num_threads(); printf ("Number of Threads requested = %i\n",k); } } #endif #ifdef _OPENMP k = 0; #pragma omp parallel #pragma omp atomic k++; printf ("Number of Threads counted = %i\n",k); #endif /* Get initial value for system clock. */ #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } printf(HLINE); if ( (quantum = checktick()) >= 1) printf("Your clock granularity/precision appears to be " "%d microseconds.\n", quantum); else { printf("Your clock granularity appears to be " "less than one microsecond.\n"); quantum = 1; } t = mysecond(); #pragma omp parallel for for (j = 0; j < STREAM_ARRAY_SIZE; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order" " of %d microseconds.\n", (int) t ); printf(" (= %d clock ticks)\n", (int) (t/quantum) ); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar = 3.0; for (k=0; k<NTIMES; k++) { times[0][k] = mysecond(); #ifdef TUNED /* oneAPI modifications: */ /* SYCL kernels are asynchronous. We synchronize outside of the tuned implemenation. */ tuned_STREAM_Copy(); q.wait(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; #endif times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef TUNED /* oneAPI modifications: */ /* SYCL kernels are asynchronous. We synchronize outside of the tuned implemenation. */ tuned_STREAM_Scale(scalar); q.wait(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; #endif times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED /* oneAPI modifications: */ /* SYCL kernels are asynchronous. We synchronize outside of the tuned implemenation. */ tuned_STREAM_Add(); q.wait(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; #endif times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED /* oneAPI modifications: */ /* SYCL kernels are asynchronous. We synchronize outside of the tuned implemenation. */ tuned_STREAM_Triad(scalar); q.wait(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; #endif times[3][k] = mysecond() - times[3][k]; } /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } } /* oneAPI modifications: */ /* This is here to increase the likelihood that someone running this code will * comply with license term 3b, which requires the disclosure of the use of a * tuned version of the benchmark when publishing results. */ #ifdef TUNED printf("***** NOTICE: ******\n"); printf("Results based on modified source code or on runs not in\n"); printf("accordance with the STREAM Run Rules must be clearly labelled whenever they are published.\n"); printf("Examples of proper labelling include:\n"); printf(" \"tuned STREAM benchmark results\"\n"); printf(" \"based on a variant of the STREAM benchmark code\"\n"); printf("Other comparable, clear, and reasonable labelling is acceptable.\n"); printf("***** NOTICE: ******\n"); printf(HLINE); #endif printf("Function Best Rate MB/s Avg time Min time Max time\n"); for (j=0; j<4; j++) { avgtime[j] = avgtime[j]/(double)(NTIMES-1); printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j], 1.0E-06 * bytes[j]/mintime[j], avgtime[j], mintime[j], maxtime[j]); } printf(HLINE); /* --- Check Results --- */ checkSTREAMresults (); printf(HLINE); /* oneAPI modifications: */ /* SYCL 2020 unified shared memory keeps the oneAPI implemenation as similar * to the original version as possible. */ sycl::free(c, q); sycl::free(b, q); sycl::free(a, q); return 0; } # define M 20 int checktick() { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while( ((t2=mysecond()) - t1) < 1.0E-6 ) ; timesfound[i] = t1 = t2; } /* * Determine the minimum difference between these M values. * This result will be our estimate (in microseconds) for the * clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1])); minDelta = MIN(minDelta, MAX(Delta,0)); } return(minDelta); } /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ #include <sys/time.h> double mysecond() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } #ifndef abs #define abs(a) ((a) >= 0 ? (a) : -(a)) #endif void checkSTREAMresults() { STREAM_TYPE aj,bj,cj,scalar; STREAM_TYPE aSumErr,bSumErr,cSumErr; STREAM_TYPE aAvgErr,bAvgErr,cAvgErr; double epsilon; ssize_t j; int k,ierr,err; /* reproduce initialization */ aj = 1.0; bj = 2.0; cj = 0.0; /* a[] is modified during timing check */ aj = 2.0E0 * aj; /* now execute timing loop */ scalar = 3.0; for (k=0; k<NTIMES; k++) { cj = aj; bj = scalar*cj; cj = aj+bj; aj = bj+scalar*cj; } /* accumulate deltas between observed and expected results */ aSumErr = 0.0; bSumErr = 0.0; cSumErr = 0.0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { aSumErr += abs(a[j] - aj); bSumErr += abs(b[j] - bj); cSumErr += abs(c[j] - cj); // if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN } aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; if (sizeof(STREAM_TYPE) == 4) { epsilon = 1.e-6; } else if (sizeof(STREAM_TYPE) == 8) { epsilon = 1.e-13; } else { printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",sizeof(STREAM_TYPE)); epsilon = 1.e-6; } err = 0; if (abs(aAvgErr/aj) > epsilon) { err++; printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(a[j]/aj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,aj,a[j],abs((aj-a[j])/aAvgErr)); } #endif } } printf(" For array a[], %d errors were found.\n",ierr); } if (abs(bAvgErr/bj) > epsilon) { err++; printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(b[j]/bj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,bj,b[j],abs((bj-b[j])/bAvgErr)); } #endif } } printf(" For array b[], %d errors were found.\n",ierr); } if (abs(cAvgErr/cj) > epsilon) { err++; printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(c[j]/cj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,cj,c[j],abs((cj-c[j])/cAvgErr)); } #endif } } printf(" For array c[], %d errors were found.\n",ierr); } if (err == 0) { printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon); } #ifdef VERBOSE printf ("Results Validation Verbose Results: \n"); printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj); printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]); printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj)); #endif } /* oneAPI modifications: */ /* These are straightforward SYCL implementations of the STREAM kernels. * Other implenentations may be better in some cases, e.g. using nd_range * and prescibing the dimensions to be an integer multiple of a device parameter. * Please see SYCL or oneAPI performance tuning documentation if necessary, */ /* SYCL requires global variables to be captured explicitly, which is why there * are a=a, b=b, c=c below. This is odd, but consistent with how C++ lambdas work. */ #ifdef TUNED /* stubs for "tuned" versions of the kernels */ void tuned_STREAM_Copy() { q.parallel_for(sycl::range{STREAM_ARRAY_SIZE}, [=,a=a,c=c](sycl::item<1> i) { const auto j = i[0]; c[j] = a[j]; }); } void tuned_STREAM_Scale(STREAM_TYPE scalar) { q.parallel_for(sycl::range{STREAM_ARRAY_SIZE}, [=,b=b,c=c](sycl::item<1> i) { const auto j = i[0]; b[j] = scalar*c[j]; }); } void tuned_STREAM_Add() { q.parallel_for(sycl::range{STREAM_ARRAY_SIZE}, [=,a=a,b=b,c=c](sycl::item<1> i) { const auto j = i[0]; c[j] = a[j]+b[j]; }); } void tuned_STREAM_Triad(STREAM_TYPE scalar) { q.parallel_for(sycl::range{STREAM_ARRAY_SIZE}, [=,a=a,b=b,c=c](sycl::item<1> i) { const auto j = i[0]; a[j] = b[j]+scalar*c[j]; }); } /* end of stubs for the "tuned" versions of the kernels */ #endif
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/ApplicationDebugger/array-transform/src/array-transform.cpp
//============================================================== // Copyright (C) Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= // This is a simple DPC++ program that accompanies the Getting Started // Guide of the debugger. The kernel does not compute anything // particularly interesting; it is designed to illustrate the most // essential features of the debugger when the target device is CPU or // GPU. #include <sycl/sycl.hpp> #include <iostream> // Location of file: <oneapi-root>/dev-utilities/<version>/include #include "dpc_common.hpp" using namespace std; using namespace sycl; // A device function, called from inside the kernel. static size_t GetDim(id<1> wi, int dim) { return wi[dim]; } int main(int argc, char *argv[]) { constexpr size_t length = 64; int input[length]; int output[length]; // Initialize the input for (int i = 0; i < length; i++) input[i] = i + 100; try { queue q(default_selector_v, dpc_common::exception_handler); cout << "[SYCL] Using device: [" << q.get_device().get_info<info::device::name>() << "] from [" << q.get_device().get_platform().get_info<info::platform::name>() << "]\n"; range data_range{length}; buffer buffer_in{input, data_range}; buffer buffer_out{output, data_range}; q.submit([&](auto &h) { accessor in(buffer_in, h, read_only); accessor out(buffer_out, h, write_only); // kernel-start h.parallel_for(data_range, [=](id<1> index) { size_t id0 = GetDim(index, 0); int element = in[index]; // breakpoint-here int result = element + 50; if (id0 % 2 == 0) { result = result + 50; // then-branch } else { result = -1; // else-branch } out[index] = result; }); // kernel-end }); q.wait_and_throw(); } catch (sycl::exception const& e) { cout << "fail; synchronous exception occurred: " << e.what() << "\n"; return -1; } // Verify the output for (int i = 0; i < length; i++) { int result = (i % 2 == 0) ? (input[i] + 100) : -1; if (output[i] != result) { cout << "fail; element " << i << " is " << output[i] << "\n"; return -1; } } cout << "success; result is correct.\n"; // success return 0; }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/ApplicationDebugger/guided_matrix_mult_InvalidContexts/src/1_matrix_mul_invalid_contexts.cpp
//============================================================== // Copyright © 2020 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= /** * Matrix_mul multiplies two large matrices both the CPU and the offload device, * then compares results. If the code executes on both CPU and the offload * device, the name of the offload device and a success message are displayed. * * For comprehensive instructions regarding DPC++ Programming, go to * https://software.intel.com/en-us/oneapi-programming-guide and search based on * relevant terms noted in the comments. */ #include <CL/sycl.hpp> #include <iostream> #include <limits> // dpc_common.hpp can be found in the dev-utilities include folder. // e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/dpc_common.hpp #include "dpc_common.hpp" using namespace std; using namespace sycl; /** * Each element of the product matrix c[i][j] is computed from a unique row and * column of the factor matrices, a[i][k] and b[k][j] */ // Matrix size constants. constexpr int m_size = 150 * 8; // Must be a multiple of 8. constexpr int M = m_size / 8; constexpr int N = m_size / 4; constexpr int P = m_size / 2; /** * Perform matrix multiplication on host to verify results from device. */ int VerifyResult(float (*c_back)[P]); int main() { cout << "Initializing" << "\n"; // Host memory buffer that device will write data back before destruction. float(*a_back)[N] = new float[M][N]; float(*b_back)[P] = new float[N][P]; float(*c_back)[P] = new float[M][P]; // Intialize a_back for (int i = 0; i < M; i++) for (int j = 0; j < N; j++) a_back[i][j] = 1.0f; // Intialize b_back for (int i = 0; i < N; i++) for (int j = 0; j < P; j++) b_back[i][j] = i + 1.0f; // Intialize c_back for (int i = 0; i < M; i++) for (int j = 0; j < P; j++) c_back[i][j] = 0.0f; // Initialize the device queue with the default selector. The device queue is // used to enqueue kernels. It encapsulates all states needed for execution. { property_list propList = property_list{property::queue::enable_profiling()}; queue q(default_selector_v); cout << "Computing" << "\n"; cout << "Device: " << q.get_device().get_info<info::device::name>() << "\n"; cout << "Device compute units: " << q.get_device().get_info<info::device::max_compute_units>() << "\n"; auto maxWorkItemSize = q.get_device().get_info<info::device::max_work_item_sizes<3>>(); cout << "Device max work item size: " << maxWorkItemSize.get(0) << ", " << maxWorkItemSize.get(1) << ", " << maxWorkItemSize.get(2) << "\n"; cout << "Device max work group size: " << q.get_device().get_info<info::device::max_work_group_size>() << "\n"; // Create 2D buffers for matrices, buffer c is bound with host memory c_back float * dev_a = sycl::malloc_device<float>(M*N, q); float * dev_b = sycl::malloc_device<float>(N*P, q); device selected_device = device(gpu_selector_v); context devicecontext(selected_device, propList); queue q2(devicecontext, selected_device, propList); float * dev_c = sycl::malloc_device<float>(M*P, q2); cout << "Problem size: c(" << M << "," << P << ") = a(" << M << "," << N << ") * b(" << N << "," << P << ")\n"; // Using three command groups to illustrate execution order. The use of // first two command groups for initializing matrices is not the most // efficient way. It just demonstrates the implicit multiple command group // execution ordering. // Submit command group to queue to initialize matrix a q.memcpy(dev_a, &a_back[0], M*N * sizeof(float)); // Submit command group to queue to initialize matrix b q.memcpy(dev_b, &b_back[0], N*P * sizeof(float)); // Submit command group to queue to initialize matrix c q.submit([&](auto &h) { h.memcpy(dev_c, &c_back[0], M*P * sizeof(float)); }); q.wait(); // Submit command group to queue to multiply matrices: c = a * b q.submit([&](auto &h) { // Read from a and b, write to c int width_a = N; // Execute kernel. h.parallel_for(range(M, P), [=](auto index) { // Get global position in Y direction. int row = index[0]; // m int col = index[1]; // p float sum = 0.0f; // Compute the result of one element of c for (int i = 0; i < width_a; i++) { auto a_index = row * width_a + i; auto b_index = i * P + col; sum += dev_a[a_index] * dev_b[b_index]; } auto idx = row * P + col; dev_c[idx] = sum; }); }); q.wait(); q.memcpy(&c_back[0], dev_c, M*P * sizeof(float)); q.wait(); } int result; cout << "Result of matrix multiplication using DPC++: "; result = VerifyResult(c_back); delete[] c_back; return result; } bool ValueSame(float a, float b) { return fabs(a - b) < numeric_limits<float>::epsilon(); } int VerifyResult(float (*c_back)[P]) { // Check that the results are correct by comparing with host computing. int i, j, k; // 2D arrays on host side. float(*a_host)[N] = new float[M][N]; float(*b_host)[P] = new float[N][P]; float(*c_host)[P] = new float[M][P]; // Each element of matrix a is 1. for (i = 0; i < M; i++) for (j = 0; j < N; j++) a_host[i][j] = 1.0f; // Each column of b_host is the sequence 1,2,...,N for (i = 0; i < N; i++) for (j = 0; j < P; j++) b_host[i][j] = i + 1.0f; // c_host is initialized to zero. for (i = 0; i < M; i++) for (j = 0; j < P; j++) c_host[i][j] = 0.0f; for (i = 0; i < M; i++) { for (k = 0; k < N; k++) { // Each element of the product is just the sum 1+2+...+n for (j = 0; j < P; j++) { c_host[i][j] += a_host[i][k] * b_host[k][j]; } } } bool mismatch_found = false; // Compare host side results with the result buffer from device side: print // mismatched data 5 times only. int print_count = 0; for (i = 0; i < M; i++) { for (j = 0; j < P; j++) { if (!ValueSame(c_back[i][j], c_host[i][j])) { cout << "Fail - The result is incorrect for element: [" << i << ", " << j << "], expected: " << c_host[i][j] << ", but found: " << c_back[i][j] << "\n"; mismatch_found = true; print_count++; if (print_count == 5) break; } } if (print_count == 5) break; } delete[] a_host; delete[] b_host; delete[] c_host; if (!mismatch_found) { cout << "Success - The results are correct!\n"; return 0; } else { cout << "Fail - The results mismatch!\n"; return -1; } }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/ApplicationDebugger/guided_matrix_mult_InvalidContexts/src/2_matrix_mul.cpp
//============================================================== // Copyright © 2020 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= /** * Matrix_mul multiplies two large matrices both the CPU and the offload device, * then compares results. If the code executes on both CPU and the offload * device, the name of the offload device and a success message are displayed. * * For comprehensive instructions regarding DPC++ Programming, go to * https://software.intel.com/en-us/oneapi-programming-guide and search based on * relevant terms noted in the comments. */ #include <CL/sycl.hpp> #include <iostream> #include <limits> // dpc_common.hpp can be found in the dev-utilities include folder. // e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/dpc_common.hpp #include "dpc_common.hpp" using namespace std; using namespace sycl; /** * Each element of the product matrix c[i][j] is computed from a unique row and * column of the factor matrices, a[i][k] and b[k][j] */ // Matrix size constants. constexpr int m_size = 150 * 8; // Must be a multiple of 8. constexpr int M = m_size / 8; constexpr int N = m_size / 4; constexpr int P = m_size / 2; /** * Perform matrix multiplication on host to verify results from device. */ int VerifyResult(float (*c_back)[P]); int main() { cout << "Initializing" << "\n"; // Host memory buffer that device will write data back before destruction. float(*a_back)[N] = new float[M][N]; float(*b_back)[P] = new float[N][P]; float(*c_back)[P] = new float[M][P]; // Intialize a_back for (int i = 0; i < M; i++) for (int j = 0; j < N; j++) a_back[i][j] = 1.0f; // Intialize b_back for (int i = 0; i < N; i++) for (int j = 0; j < P; j++) b_back[i][j] = i + 1.0f; // Intialize c_back for (int i = 0; i < M; i++) for (int j = 0; j < P; j++) c_back[i][j] = 0.0f; // Initialize the device queue with the default selector. The device queue is // used to enqueue kernels. It encapsulates all states needed for execution. { property_list propList = property_list{property::queue::enable_profiling()}; queue q(default_selector_v); cout << "Computing" << "\n"; cout << "Device: " << q.get_device().get_info<info::device::name>() << "\n"; cout << "Device compute units: " << q.get_device().get_info<info::device::max_compute_units>() << "\n"; auto maxWorkItemSize = q.get_device().get_info<info::device::max_work_item_sizes<3>>(); cout << "Device max work item size: " << maxWorkItemSize.get(0) << ", " << maxWorkItemSize.get(1) << ", " << maxWorkItemSize.get(2) << "\n"; cout << "Device max work group size: " << q.get_device().get_info<info::device::max_work_group_size>() << "\n"; // Create 2D buffers for matrices, buffer c is bound with host memory c_back float * dev_a = sycl::malloc_device<float>(M*N, q); float * dev_b = sycl::malloc_device<float>(N*P, q); float * dev_c = sycl::malloc_device<float>(M*P, q); cout << "Problem size: c(" << M << "," << P << ") = a(" << M << "," << N << ") * b(" << N << "," << P << ")\n"; // Using three command groups to illustrate execution order. The use of // first two command groups for initializing matrices is not the most // efficient way. It just demonstrates the implicit multiple command group // execution ordering. // Submit command group to queue to initialize matrix a q.memcpy(dev_a, &a_back[0], M*N * sizeof(float)); // Submit command group to queue to initialize matrix b q.memcpy(dev_b, &b_back[0], N*P * sizeof(float)); // Submit command group to queue to initialize matrix c q.submit([&](auto &h) { h.memcpy(dev_c, &c_back[0], M*P * sizeof(float)); }); q.wait(); // Submit command group to queue to multiply matrices: c = a * b q.submit([&](auto &h) { // Read from a and b, write to c int width_a = N; // Execute kernel. h.parallel_for(range(M, P), [=](auto index) { // Get global position in Y direction. int row = index[0]; // m int col = index[1]; // p float sum = 0.0f; // Compute the result of one element of c for (int i = 0; i < width_a; i++) { auto a_index = row * width_a + i; auto b_index = i * P + col; sum += dev_a[a_index] * dev_b[b_index]; } auto idx = row * P + col; dev_c[idx] = sum; }); }); q.wait(); q.memcpy(&c_back[0], dev_c, M*P * sizeof(float)); q.wait(); } int result; cout << "Result of matrix multiplication using DPC++: "; result = VerifyResult(c_back); delete[] c_back; return result; } bool ValueSame(float a, float b) { return fabs(a - b) < numeric_limits<float>::epsilon(); } int VerifyResult(float (*c_back)[P]) { // Check that the results are correct by comparing with host computing. int i, j, k; // 2D arrays on host side. float(*a_host)[N] = new float[M][N]; float(*b_host)[P] = new float[N][P]; float(*c_host)[P] = new float[M][P]; // Each element of matrix a is 1. for (i = 0; i < M; i++) for (j = 0; j < N; j++) a_host[i][j] = 1.0f; // Each column of b_host is the sequence 1,2,...,N for (i = 0; i < N; i++) for (j = 0; j < P; j++) b_host[i][j] = i + 1.0f; // c_host is initialized to zero. for (i = 0; i < M; i++) for (j = 0; j < P; j++) c_host[i][j] = 0.0f; for (i = 0; i < M; i++) { for (k = 0; k < N; k++) { // Each element of the product is just the sum 1+2+...+n for (j = 0; j < P; j++) { c_host[i][j] += a_host[i][k] * b_host[k][j]; } } } bool mismatch_found = false; // Compare host side results with the result buffer from device side: print // mismatched data 5 times only. int print_count = 0; for (i = 0; i < M; i++) { for (j = 0; j < P; j++) { if (!ValueSame(c_back[i][j], c_host[i][j])) { cout << "Fail - The result is incorrect for element: [" << i << ", " << j << "], expected: " << c_host[i][j] << ", but found: " << c_back[i][j] << "\n"; mismatch_found = true; print_count++; if (print_count == 5) break; } } if (print_count == 5) break; } delete[] a_host; delete[] b_host; delete[] c_host; if (!mismatch_found) { cout << "Success - The results are correct!\n"; return 0; } else { cout << "Fail - The results mismatch!\n"; return -1; } }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/ApplicationDebugger/guided_matrix_mult_RaceCondition/src/3_matrix_mul.cpp
//============================================================== // Copyright © 2020 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= /** * Matrix_mul multiplies two large matrices both the CPU and the offload device, * then compares results. If the code executes on both CPU and the offload * device, the name of the offload device and a success message are displayed. * * For comprehensive instructions regarding DPC++ Programming, go to * https://software.intel.com/en-us/oneapi-programming-guide and search based on * relevant terms noted in the comments. */ #include <CL/sycl.hpp> #include <iostream> #include <limits> // dpc_common.hpp can be found in the dev-utilities include folder. // e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/dpc_common.hpp #include "dpc_common.hpp" using namespace std; using namespace sycl; /** * Each element of the product matrix c[i][j] is computed from a unique row and * column of the factor matrices, a[i][k] and b[k][j] */ // Matrix size constants. constexpr int m_size = 150 * 8; // Must be a multiple of 8. constexpr int M = m_size / 8; constexpr int N = m_size / 4; constexpr int P = m_size / 2; /** * Perform matrix multiplication on host to verify results from device. */ int VerifyResult(sycl::host_accessor<float, 2, sycl::access::mode::read> result); int main() { // Host memory buffer that device will write data back before destruction. float(*c_back)[P] = new float[M][P]; // Intialize c_back for (int i = 0; i < M; i++) for (int j = 0; j < P; j++) c_back[i][j] = 0.0f; // Initialize the device queue with the default selector. The device queue is // used to enqueue kernels. It encapsulates all states needed for execution. queue q(default_selector_v); cout << "Device: " << q.get_device().get_info<info::device::name>() << "\n"; // Create 2D buffers for matrices, buffer c is bound with host memory c_back buffer<float, 2> a_buf(range(M, N)); buffer<float, 2> b_buf(range(N, P)); buffer c_buf(reinterpret_cast<float *>(c_back), range(M, P)); cout << "Problem size: c(" << M << "," << P << ") = a(" << M << "," << N << ") * b(" << N << "," << P << ")\n"; // Using three command groups to illustrate execution order. The use of // first two command groups for initializing matrices is not the most // efficient way. It just demonstrates the implicit multiple command group // execution ordering. // Submit command group to queue to initialize matrix a q.submit([&](auto &h) { // Get write only access to the buffer on a device. accessor a(a_buf, h, write_only); // Execute kernel. h.parallel_for(range(M, N), [=](auto index) { // Each element of matrix a is 1. a[index] = 1.0f; }); }); // Submit command group to queue to initialize matrix b q.submit([&](auto &h) { // Get write only access to the buffer on a device accessor b(b_buf, h, write_only); // Execute kernel. h.parallel_for(range(N, P), [=](auto index) { // Each column of b is the sequence 1,2,...,N b[index] = index[0] + 1.0f; }); }); // Submit command group to queue to multiply matrices: c = a * b q.submit([&](auto &h) { // Read from a and b, write to c accessor a(a_buf, h, read_only); accessor b(b_buf, h, read_only); accessor c(c_buf, h, write_only); int width_a = a_buf.get_range()[1]; // Execute kernel. h.parallel_for(range(M, P), [=](auto index) { // Get global position in Y direction. int row = index[0]; // Get global position in X direction. int col = index[1]; float sum = 0.0f; // Compute the result of one element of c for (int i = 0; i < width_a; i++) { sum += a[row][i] * b[i][col]; } c[index] = sum; }); }); q.wait(); int result; host_accessor my_results(c_buf, read_only); cout << "Result of matrix multiplication using DPC++: "; result = VerifyResult(my_results); return result; } bool ValueSame(float a, float b) { return fabs(a - b) < numeric_limits<float>::epsilon(); } int VerifyResult(sycl::host_accessor<float, 2, sycl::access::mode::read> c_back) { // Check that the results are correct by comparing with host computing. int i, j, k; // 2D arrays on host side. float(*a_host)[N] = new float[M][N]; float(*b_host)[P] = new float[N][P]; float(*c_host)[P] = new float[M][P]; // Each element of matrix a is 1. for (i = 0; i < M; i++) for (j = 0; j < N; j++) a_host[i][j] = 1.0f; // Each column of b_host is the sequence 1,2,...,N for (i = 0; i < N; i++) for (j = 0; j < P; j++) b_host[i][j] = i + 1.0f; // c_host is initialized to zero. for (i = 0; i < M; i++) for (j = 0; j < P; j++) c_host[i][j] = 0.0f; for (i = 0; i < M; i++) { for (k = 0; k < N; k++) { // Each element of the product is just the sum 1+2+...+n for (j = 0; j < P; j++) { c_host[i][j] += a_host[i][k] * b_host[k][j]; } } } bool mismatch_found = false; // Compare host side results with the result buffer from device side: print // mismatched data 5 times only. int print_count = 0; for (i = 0; i < M; i++) { for (j = 0; j < P; j++) { if (!ValueSame(c_back[i][j], c_host[i][j])) { cout << "Fail - The result is incorrect for element: [" << i << ", " << j << "], expected: " << c_host[i][j] << ", but found: " << c_back[i][j] << "\n"; mismatch_found = true; print_count++; if (print_count == 5) break; } } if (print_count == 5) break; } delete[] a_host; delete[] b_host; delete[] c_host; if (!mismatch_found) { cout << "Success - The results are correct!\n"; return 0; } else { cout << "Fail - The results mismatch!\n"; return -1; } }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/ApplicationDebugger/guided_matrix_mult_RaceCondition/src/1_matrix_mul_race_condition.cpp
//============================================================== // Copyright © 2020 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= /** * Matrix_mul multiplies two large matrices both the CPU and the offload device, * then compares results. If the code executes on both CPU and the offload * device, the name of the offload device and a success message are displayed. * * For comprehensive instructions regarding DPC++ Programming, go to * https://software.intel.com/en-us/oneapi-programming-guide and search based on * relevant terms noted in the comments. */ #include <CL/sycl.hpp> #include <iostream> #include <limits> // dpc_common.hpp can be found in the dev-utilities include folder. // e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/dpc_common.hpp #include "dpc_common.hpp" using namespace std; using namespace sycl; /** * Each element of the product matrix c[i][j] is computed from a unique row and * column of the factor matrices, a[i][k] and b[k][j] */ // Matrix size constants. constexpr int m_size = 150 * 8; // Must be a multiple of 8. constexpr int M = m_size / 8; constexpr int N = m_size / 4; constexpr int P = m_size / 2; /** * Perform matrix multiplication on host to verify results from device. */ int VerifyResult(float (*c_back)[P]); int main() { // Host memory buffer that device will write data back before destruction. float(*c_back)[P] = new float[M][P]; // Intialize c_back for (int i = 0; i < M; i++) for (int j = 0; j < P; j++) c_back[i][j] = 0.0f; // Initialize the device queue with the default selector. The device queue is // used to enqueue kernels. It encapsulates all states needed for execution. queue q(default_selector_v); cout << "Device: " << q.get_device().get_info<info::device::name>() << "\n"; // Create 2D buffers for matrices, buffer c is bound with host memory c_back buffer<float, 2> a_buf(range(M, N)); buffer<float, 2> b_buf(range(N, P)); buffer c_buf(reinterpret_cast<float *>(c_back), range(M, P)); cout << "Problem size: c(" << M << "," << P << ") = a(" << M << "," << N << ") * b(" << N << "," << P << ")\n"; // Using three command groups to illustrate execution order. The use of // first two command groups for initializing matrices is not the most // efficient way. It just demonstrates the implicit multiple command group // execution ordering. // Submit command group to queue to initialize matrix a q.submit([&](auto &h) { // Get write only access to the buffer on a device. accessor a(a_buf, h, write_only); // Execute kernel. h.parallel_for(range(M, N), [=](auto index) { // Each element of matrix a is 1. a[index] = 1.0f; }); }); // Submit command group to queue to initialize matrix b q.submit([&](auto &h) { // Get write only access to the buffer on a device accessor b(b_buf, h, write_only); // Execute kernel. h.parallel_for(range(N, P), [=](auto index) { // Each column of b is the sequence 1,2,...,N b[index] = index[0] + 1.0f; }); }); // Submit command group to queue to multiply matrices: c = a * b q.submit([&](auto &h) { // Read from a and b, write to c accessor a(a_buf, h, read_only); accessor b(b_buf, h, read_only); accessor c(c_buf, h, write_only); int width_a = a_buf.get_range()[1]; // Execute kernel. h.parallel_for(range(M, P), [=](auto index) { // Get global position in Y direction. int row = index[0]; // Get global position in X direction. int col = index[1]; float sum = 0.0f; // Compute the result of one element of c for (int i = 0; i < width_a; i++) { sum += a[row][i] * b[i][col]; } c[index] = sum; }); }); int result; cout << "Result of matrix multiplication using DPC++: "; result = VerifyResult(c_back); delete[] c_back; return result; } bool ValueSame(float a, float b) { return fabs(a - b) < numeric_limits<float>::epsilon(); } int VerifyResult(float (*c_back)[P]) { // Check that the results are correct by comparing with host computing. int i, j, k; // 2D arrays on host side. float(*a_host)[N] = new float[M][N]; float(*b_host)[P] = new float[N][P]; float(*c_host)[P] = new float[M][P]; // Each element of matrix a is 1. for (i = 0; i < M; i++) for (j = 0; j < N; j++) a_host[i][j] = 1.0f; // Each column of b_host is the sequence 1,2,...,N for (i = 0; i < N; i++) for (j = 0; j < P; j++) b_host[i][j] = i + 1.0f; // c_host is initialized to zero. for (i = 0; i < M; i++) for (j = 0; j < P; j++) c_host[i][j] = 0.0f; for (i = 0; i < M; i++) { for (k = 0; k < N; k++) { // Each element of the product is just the sum 1+2+...+n for (j = 0; j < P; j++) { c_host[i][j] += a_host[i][k] * b_host[k][j]; } } } bool mismatch_found = false; // Compare host side results with the result buffer from device side: print // mismatched data 5 times only. int print_count = 0; for (i = 0; i < M; i++) { for (j = 0; j < P; j++) { if (!ValueSame(c_back[i][j], c_host[i][j])) { cout << "Fail - The result is incorrect for element: [" << i << ", " << j << "], expected: " << c_host[i][j] << ", but found: " << c_back[i][j] << "\n"; mismatch_found = true; print_count++; if (print_count == 5) break; } } if (print_count == 5) break; } delete[] a_host; delete[] b_host; delete[] c_host; if (!mismatch_found) { cout << "Success - The results are correct!\n"; return 0; } else { cout << "Fail - The results mismatch!\n"; return -1; } }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/ApplicationDebugger/guided_matrix_mult_RaceCondition/src/2_matrix_mul.cpp
//============================================================== // Copyright © 2020 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= /** * Matrix_mul multiplies two large matrices both the CPU and the offload device, * then compares results. If the code executes on both CPU and the offload * device, the name of the offload device and a success message are displayed. * * For comprehensive instructions regarding DPC++ Programming, go to * https://software.intel.com/en-us/oneapi-programming-guide and search based on * relevant terms noted in the comments. */ #include <CL/sycl.hpp> #include <iostream> #include <limits> // dpc_common.hpp can be found in the dev-utilities include folder. // e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/dpc_common.hpp #include "dpc_common.hpp" using namespace std; using namespace sycl; /** * Each element of the product matrix c[i][j] is computed from a unique row and * column of the factor matrices, a[i][k] and b[k][j] */ // Matrix size constants. constexpr int m_size = 150 * 8; // Must be a multiple of 8. constexpr int M = m_size / 8; constexpr int N = m_size / 4; constexpr int P = m_size / 2; /** * Perform matrix multiplication on host to verify results from device. */ int VerifyResult(float (*c_back)[P]); int main() { // Host memory buffer that device will write data back before destruction. float(*c_back)[P] = new float[M][P]; // Intialize c_back for (int i = 0; i < M; i++) for (int j = 0; j < P; j++) c_back[i][j] = 0.0f; // Initialize the device queue with the default selector. The device queue is // used to enqueue kernels. It encapsulates all states needed for execution. { queue q(default_selector_v); cout << "Device: " << q.get_device().get_info<info::device::name>() << "\n"; // Create 2D buffers for matrices, buffer c is bound with host memory c_back buffer<float, 2> a_buf(range(M, N)); buffer<float, 2> b_buf(range(N, P)); buffer c_buf(reinterpret_cast<float *>(c_back), range(M, P)); cout << "Problem size: c(" << M << "," << P << ") = a(" << M << "," << N << ") * b(" << N << "," << P << ")\n"; // Using three command groups to illustrate execution order. The use of // first two command groups for initializing matrices is not the most // efficient way. It just demonstrates the implicit multiple command group // execution ordering. // Submit command group to queue to initialize matrix a q.submit([&](auto &h) { // Get write only access to the buffer on a device. accessor a(a_buf, h, write_only); // Execute kernel. h.parallel_for(range(M, N), [=](auto index) { // Each element of matrix a is 1. a[index] = 1.0f; }); }); // Submit command group to queue to initialize matrix b q.submit([&](auto &h) { // Get write only access to the buffer on a device accessor b(b_buf, h, write_only); // Execute kernel. h.parallel_for(range(N, P), [=](auto index) { // Each column of b is the sequence 1,2,...,N b[index] = index[0] + 1.0f; }); }); // Submit command group to queue to multiply matrices: c = a * b q.submit([&](auto &h) { // Read from a and b, write to c accessor a(a_buf, h, read_only); accessor b(b_buf, h, read_only); accessor c(c_buf, h, write_only); int width_a = a_buf.get_range()[1]; // Execute kernel. h.parallel_for(range(M, P), [=](auto index) { // Get global position in Y direction. int row = index[0]; // Get global position in X direction. int col = index[1]; float sum = 0.0f; // Compute the result of one element of c for (int i = 0; i < width_a; i++) { sum += a[row][i] * b[i][col]; } c[index] = sum; }); }); } int result; cout << "Result of matrix multiplication using DPC++: "; result = VerifyResult(c_back); delete[] c_back; return result; } bool ValueSame(float a, float b) { return fabs(a - b) < numeric_limits<float>::epsilon(); } int VerifyResult(float (*c_back)[P]) { // Check that the results are correct by comparing with host computing. int i, j, k; // 2D arrays on host side. float(*a_host)[N] = new float[M][N]; float(*b_host)[P] = new float[N][P]; float(*c_host)[P] = new float[M][P]; // Each element of matrix a is 1. for (i = 0; i < M; i++) for (j = 0; j < N; j++) a_host[i][j] = 1.0f; // Each column of b_host is the sequence 1,2,...,N for (i = 0; i < N; i++) for (j = 0; j < P; j++) b_host[i][j] = i + 1.0f; // c_host is initialized to zero. for (i = 0; i < M; i++) for (j = 0; j < P; j++) c_host[i][j] = 0.0f; for (i = 0; i < M; i++) { for (k = 0; k < N; k++) { // Each element of the product is just the sum 1+2+...+n for (j = 0; j < P; j++) { c_host[i][j] += a_host[i][k] * b_host[k][j]; } } } bool mismatch_found = false; // Compare host side results with the result buffer from device side: print // mismatched data 5 times only. int print_count = 0; for (i = 0; i < M; i++) { for (j = 0; j < P; j++) { if (!ValueSame(c_back[i][j], c_host[i][j])) { cout << "Fail - The result is incorrect for element: [" << i << ", " << j << "], expected: " << c_host[i][j] << ", but found: " << c_back[i][j] << "\n"; mismatch_found = true; print_count++; if (print_count == 5) break; } } if (print_count == 5) break; } delete[] a_host; delete[] b_host; delete[] c_host; if (!mismatch_found) { cout << "Success - The results are correct!\n"; return 0; } else { cout << "Fail - The results mismatch!\n"; return -1; } }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/ApplicationDebugger/guided_matrix_mult_BadBuffers/src/a2_matrix_mul.cpp
//============================================================== // Copyright © 2020 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= /** * Matrix_mul multiplies two large matrices both the CPU and the offload device, * then compares results. If the code executes on both CPU and the offload * device, the name of the offload device and a success message are displayed. * * For comprehensive instructions regarding DPC++ Programming, go to * https://software.intel.com/en-us/oneapi-programming-guide and search based on * relevant terms noted in the comments. */ #include <CL/sycl.hpp> #include <iostream> #include <limits> // dpc_common.hpp can be found in the dev-utilities include folder. // e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/dpc_common.hpp #include "dpc_common.hpp" using namespace std; using namespace sycl; /** * Each element of the product matrix c[i][j] is computed from a unique row and * column of the factor matrices, a[i][k] and b[k][j] */ // Matrix size constants. constexpr int m_size = 150 * 8; // Must be a multiple of 8. constexpr int M = m_size / 8; constexpr int N = m_size / 4; constexpr int P = m_size / 2; /** * Perform matrix multiplication on host to verify results from device. */ int VerifyResult(float (*c_back)[P]); int main() { // Host memory buffer that device will write data back before destruction. float(*c_back)[P] = new float[M][P]; // Intialize c_back for (int i = 0; i < M; i++) for (int j = 0; j < P; j++) c_back[i][j] = 0.0f; // Initialize the device queue with the default selector. The device queue is // used to enqueue kernels. It encapsulates all states needed for execution. { queue q(default_selector_v); cout << "Device: " << q.get_device().get_info<info::device::name>() << "\n"; // Create 2D buffers for matrices, buffer c is bound with host memory c_back buffer<float, 2> a_buf(range(M, N)); buffer<float, 2> b_buf(range(N, P)); buffer c_buf(reinterpret_cast<float *>(c_back), range(M, P)); cout << "Problem size: c(" << M << "," << P << ") = a(" << M << "," << N << ") * b(" << N << "," << P << ")\n"; // Using three command groups to illustrate execution order. The use of // first two command groups for initializing matrices is not the most // efficient way. It just demonstrates the implicit multiple command group // execution ordering. // Submit command group to queue to initialize matrix a q.submit([&](auto &h) { // Get write only access to the buffer on a device. accessor a(a_buf, h, write_only); // Execute kernel. h.parallel_for(range(M, N), [=](auto index) { // Each element of matrix a is 1. a[index] = 1.0f; }); }); // Submit command group to queue to initialize matrix b q.submit([&](auto &h) { // Get write only access to the buffer on a device accessor b(b_buf, h, write_only); // Execute kernel. h.parallel_for(range(N, P), [=](auto index) { // Each column of b is the sequence 1,2,...,N b[index] = index[0] + 1.0f; }); }); // Submit command group to queue to multiply matrices: c = a * b q.submit([&](auto &h) { // Read from a and b, write to c accessor a(a_buf, h, read_only); accessor b(b_buf, h, read_only); accessor c(c_buf, h, write_only); int width_a = a_buf.get_range()[1]; // Execute kernel. h.parallel_for(range(M, P), [=](auto index) { // Get global position in Y direction. int row = index[0]; // Get global position in X direction. int col = index[1]; float sum = 0.0f; // Compute the result of one element of c for (int i = 0; i < width_a; i++) { sum += a[row][i] * b[i][col]; } c[index] = sum; }); }); } int result; cout << "Result of matrix multiplication using DPC++: "; result = VerifyResult(c_back); delete[] c_back; return result; } bool ValueSame(float a, float b) { return fabs(a - b) < numeric_limits<float>::epsilon(); } int VerifyResult(float (*c_back)[P]) { // Check that the results are correct by comparing with host computing. int i, j, k; // 2D arrays on host side. float(*a_host)[N] = new float[M][N]; float(*b_host)[P] = new float[N][P]; float(*c_host)[P] = new float[M][P]; // Each element of matrix a is 1. for (i = 0; i < M; i++) for (j = 0; j < N; j++) a_host[i][j] = 1.0f; // Each column of b_host is the sequence 1,2,...,N for (i = 0; i < N; i++) for (j = 0; j < P; j++) b_host[i][j] = i + 1.0f; // c_host is initialized to zero. for (i = 0; i < M; i++) for (j = 0; j < P; j++) c_host[i][j] = 0.0f; for (i = 0; i < M; i++) { for (k = 0; k < N; k++) { // Each element of the product is just the sum 1+2+...+n for (j = 0; j < P; j++) { c_host[i][j] += a_host[i][k] * b_host[k][j]; } } } bool mismatch_found = false; // Compare host side results with the result buffer from device side: print // mismatched data 5 times only. int print_count = 0; for (i = 0; i < M; i++) { for (j = 0; j < P; j++) { if (!ValueSame(c_back[i][j], c_host[i][j])) { cout << "Fail - The result is incorrect for element: [" << i << ", " << j << "], expected: " << c_host[i][j] << ", but found: " << c_back[i][j] << "\n"; mismatch_found = true; print_count++; if (print_count == 5) break; } } if (print_count == 5) break; } delete[] a_host; delete[] b_host; delete[] c_host; if (!mismatch_found) { cout << "Success - The results are correct!\n"; return 0; } else { cout << "Fail - The results mismatch!\n"; return -1; } }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/ApplicationDebugger/guided_matrix_mult_BadBuffers/src/a1_matrix_mul_zero_buff.cpp
//============================================================== // Copyright © 2020 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= /** * Matrix_mul multiplies two large matrices both the CPU and the offload device, * then compares results. If the code executes on both CPU and the offload * device, the name of the offload device and a success message are displayed. * * For comprehensive instructions regarding DPC++ Programming, go to * https://software.intel.com/en-us/oneapi-programming-guide and search based on * relevant terms noted in the comments. */ #include <CL/sycl.hpp> #include <iostream> #include <limits> // dpc_common.hpp can be found in the dev-utilities include folder. // e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/dpc_common.hpp #include "dpc_common.hpp" using namespace std; using namespace sycl; /** * Each element of the product matrix c[i][j] is computed from a unique row and * column of the factor matrices, a[i][k] and b[k][j] */ // Matrix size constants. constexpr int m_size = 150 * 8; // Must be a multiple of 8. constexpr int M = m_size / 8; constexpr int N = m_size / 4; constexpr int P = m_size / 2; /** * Perform matrix multiplication on host to verify results from device. */ int VerifyResult(float (*c_back)[P]); int main() { // Host memory buffer that device will write data back before destruction. float(*c_back)[P] = new float[M][P]; // Intialize c_back for (int i = 0; i < M; i++) for (int j = 0; j < P; j++) c_back[i][j] = 0.0f; // Initialize the device queue with the default selector. The device queue is // used to enqueue kernels. It encapsulates all states needed for execution. { queue q(default_selector_v); cout << "Device: " << q.get_device().get_info<info::device::name>() << "\n"; // Create 2D buffers for matrices, buffer c is bound with host memory c_back buffer<float, 2> a_buf(range(0, 0)); buffer<float, 2> b_buf(range(N, P)); buffer c_buf(reinterpret_cast<float *>(c_back), range(M, P)); cout << "Problem size: c(" << M << "," << P << ") = a(" << M << "," << N << ") * b(" << N << "," << P << ")\n"; // Using three command groups to illustrate execution order. The use of // first two command groups for initializing matrices is not the most // efficient way. It just demonstrates the implicit multiple command group // execution ordering. // Submit command group to queue to initialize matrix a q.submit([&](auto &h) { // Get write only access to the buffer on a device. accessor a(a_buf, h, write_only); // Execute kernel. h.parallel_for(range(M, N), [=](auto index) { // Each element of matrix a is 1. a[index] = 1.0f; }); }); // Submit command group to queue to initialize matrix b q.submit([&](auto &h) { // Get write only access to the buffer on a device accessor b(b_buf, h, write_only); // Execute kernel. h.parallel_for(range(N, P), [=](auto index) { // Each column of b is the sequence 1,2,...,N b[index] = index[0] + 1.0f; }); }); // Submit command group to queue to multiply matrices: c = a * b q.submit([&](auto &h) { // Read from a and b, write to c accessor a(a_buf, h, read_only); accessor b(b_buf, h, read_only); accessor c(c_buf, h, write_only); int width_a = a_buf.get_range()[1]; // Execute kernel. h.parallel_for(range(M, P), [=](auto index) { // Get global position in Y direction. int row = index[0]; // Get global position in X direction. int col = index[1]; float sum = 0.0f; // Compute the result of one element of c for (int i = 0; i < width_a; i++) { sum += a[row][i] * b[i][col]; } c[index] = sum; }); }); } int result; cout << "Result of matrix multiplication using DPC++: "; result = VerifyResult(c_back); delete[] c_back; return result; } bool ValueSame(float a, float b) { return fabs(a - b) < numeric_limits<float>::epsilon(); } int VerifyResult(float (*c_back)[P]) { // Check that the results are correct by comparing with host computing. int i, j, k; // 2D arrays on host side. float(*a_host)[N] = new float[M][N]; float(*b_host)[P] = new float[N][P]; float(*c_host)[P] = new float[M][P]; // Each element of matrix a is 1. for (i = 0; i < M; i++) for (j = 0; j < N; j++) a_host[i][j] = 1.0f; // Each column of b_host is the sequence 1,2,...,N for (i = 0; i < N; i++) for (j = 0; j < P; j++) b_host[i][j] = i + 1.0f; // c_host is initialized to zero. for (i = 0; i < M; i++) for (j = 0; j < P; j++) c_host[i][j] = 0.0f; for (i = 0; i < M; i++) { for (k = 0; k < N; k++) { // Each element of the product is just the sum 1+2+...+n for (j = 0; j < P; j++) { c_host[i][j] += a_host[i][k] * b_host[k][j]; } } } bool mismatch_found = false; // Compare host side results with the result buffer from device side: print // mismatched data 5 times only. int print_count = 0; for (i = 0; i < M; i++) { for (j = 0; j < P; j++) { if (!ValueSame(c_back[i][j], c_host[i][j])) { cout << "Fail - The result is incorrect for element: [" << i << ", " << j << "], expected: " << c_host[i][j] << ", but found: " << c_back[i][j] << "\n"; mismatch_found = true; print_count++; if (print_count == 5) break; } } if (print_count == 5) break; } delete[] a_host; delete[] b_host; delete[] c_host; if (!mismatch_found) { cout << "Success - The results are correct!\n"; return 0; } else { cout << "Fail - The results mismatch!\n"; return -1; } }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/ApplicationDebugger/guided_matrix_mult_BadBuffers/src/b2_matrix_mul_usm.cpp
//============================================================== // Copyright © 2020 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= /** * Matrix_mul multiplies two large matrices both the CPU and the offload device, * then compares results. If the code executes on both CPU and the offload * device, the name of the offload device and a success message are displayed. * * For comprehensive instructions regarding DPC++ Programming, go to * https://software.intel.com/en-us/oneapi-programming-guide and search based on * relevant terms noted in the comments. */ #include <CL/sycl.hpp> #include <iostream> #include <limits> // dpc_common.hpp can be found in the dev-utilities include folder. // e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/dpc_common.hpp #include "dpc_common.hpp" using namespace std; using namespace sycl; /** * Each element of the product matrix c[i][j] is computed from a unique row and * column of the factor matrices, a[i][k] and b[k][j] */ // Matrix size constants. constexpr int m_size = 150 * 8; // Must be a multiple of 8. constexpr int M = m_size / 8; constexpr int N = m_size / 4; constexpr int P = m_size / 2; /** * Perform matrix multiplication on host to verify results from device. */ int VerifyResult(float (*c_back)[P]); int main() { cout << "Initializing" << "\n"; // Host memory buffer that device will write data back before destruction. float(*a_back)[N] = new float[M][N]; float(*b_back)[P] = new float[N][P]; float(*c_back)[P] = new float[M][P]; // Intialize a_back for (int i = 0; i < M; i++) for (int j = 0; j < N; j++) a_back[i][j] = 1.0f; // Intialize b_back for (int i = 0; i < N; i++) for (int j = 0; j < P; j++) b_back[i][j] = i + 1.0f; // Intialize c_back for (int i = 0; i < M; i++) for (int j = 0; j < P; j++) c_back[i][j] = 0.0f; // Initialize the device queue with the default selector. The device queue is // used to enqueue kernels. It encapsulates all states needed for execution. { property_list propList = property_list{property::queue::enable_profiling()}; queue q(default_selector_v); cout << "Computing" << "\n"; cout << "Device: " << q.get_device().get_info<info::device::name>() << "\n"; cout << "Device compute units: " << q.get_device().get_info<info::device::max_compute_units>() << "\n"; auto maxWorkItemSize = q.get_device().get_info<info::device::max_work_item_sizes<3>>(); cout << "Device max work item size: " << maxWorkItemSize.get(0) << ", " << maxWorkItemSize.get(1) << ", " << maxWorkItemSize.get(2) << "\n"; cout << "Device max work group size: " << q.get_device().get_info<info::device::max_work_group_size>() << "\n"; // Create 2D buffers for matrices, buffer c is bound with host memory c_back float * dev_a = sycl::malloc_device<float>(M*N, q); float * dev_b = sycl::malloc_device<float>(N*P, q); float * dev_c = sycl::malloc_device<float>(M*P, q); cout << "Problem size: c(" << M << "," << P << ") = a(" << M << "," << N << ") * b(" << N << "," << P << ")\n"; // Using three command groups to illustrate execution order. The use of // first two command groups for initializing matrices is not the most // efficient way. It just demonstrates the implicit multiple command group // execution ordering. // Submit command group to queue to initialize matrix a q.memcpy(dev_a, &a_back[0], M*N * sizeof(float)); // Submit command group to queue to initialize matrix b q.memcpy(dev_b, &b_back[0], N*P * sizeof(float)); // Submit command group to queue to initialize matrix c q.submit([&](auto &h) { h.memcpy(dev_c, &c_back[0], M*P * sizeof(float)); }); q.wait(); // Submit command group to queue to multiply matrices: c = a * b q.submit([&](auto &h) { // Read from a and b, write to c int width_a = N; // Execute kernel. h.parallel_for(range(M, P), [=](auto index) { // Get global position in Y direction. int row = index[0]; // m int col = index[1]; // p float sum = 0.0f; // Compute the result of one element of c for (int i = 0; i < width_a; i++) { auto a_index = row * width_a + i; auto b_index = i * P + col; sum += dev_a[a_index] * dev_b[b_index]; } auto idx = row * P + col; dev_c[idx] = sum; }); }); q.wait(); q.memcpy(&c_back[0], dev_c, M*P * sizeof(float)); q.wait(); } int result; cout << "Result of matrix multiplication using DPC++: "; result = VerifyResult(c_back); delete[] c_back; return result; } bool ValueSame(float a, float b) { return fabs(a - b) < numeric_limits<float>::epsilon(); } int VerifyResult(float (*c_back)[P]) { // Check that the results are correct by comparing with host computing. int i, j, k; // 2D arrays on host side. float(*a_host)[N] = new float[M][N]; float(*b_host)[P] = new float[N][P]; float(*c_host)[P] = new float[M][P]; // Each element of matrix a is 1. for (i = 0; i < M; i++) for (j = 0; j < N; j++) a_host[i][j] = 1.0f; // Each column of b_host is the sequence 1,2,...,N for (i = 0; i < N; i++) for (j = 0; j < P; j++) b_host[i][j] = i + 1.0f; // c_host is initialized to zero. for (i = 0; i < M; i++) for (j = 0; j < P; j++) c_host[i][j] = 0.0f; for (i = 0; i < M; i++) { for (k = 0; k < N; k++) { // Each element of the product is just the sum 1+2+...+n for (j = 0; j < P; j++) { c_host[i][j] += a_host[i][k] * b_host[k][j]; } } } bool mismatch_found = false; // Compare host side results with the result buffer from device side: print // mismatched data 5 times only. int print_count = 0; for (i = 0; i < M; i++) { for (j = 0; j < P; j++) { if (!ValueSame(c_back[i][j], c_host[i][j])) { cout << "Fail - The result is incorrect for element: [" << i << ", " << j << "], expected: " << c_host[i][j] << ", but found: " << c_back[i][j] << "\n"; mismatch_found = true; print_count++; if (print_count == 5) break; } } if (print_count == 5) break; } delete[] a_host; delete[] b_host; delete[] c_host; if (!mismatch_found) { cout << "Success - The results are correct!\n"; return 0; } else { cout << "Fail - The results mismatch!\n"; return -1; } }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/ApplicationDebugger/guided_matrix_mult_BadBuffers/src/b1_matrix_mul_null_usm.cpp
//============================================================== // Copyright © 2020 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= /** * Matrix_mul multiplies two large matrices both the CPU and the offload device, * then compares results. If the code executes on both CPU and the offload * device, the name of the offload device and a success message are displayed. * * For comprehensive instructions regarding DPC++ Programming, go to * https://software.intel.com/en-us/oneapi-programming-guide and search based on * relevant terms noted in the comments. */ #include <CL/sycl.hpp> #include <iostream> #include <limits> // dpc_common.hpp can be found in the dev-utilities include folder. // e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/dpc_common.hpp #include "dpc_common.hpp" using namespace std; using namespace sycl; /** * Each element of the product matrix c[i][j] is computed from a unique row and * column of the factor matrices, a[i][k] and b[k][j] */ // Matrix size constants. constexpr int m_size = 150 * 8; // Must be a multiple of 8. constexpr int M = m_size / 8; constexpr int N = m_size / 4; constexpr int P = m_size / 2; /** * Perform matrix multiplication on host to verify results from device. */ int VerifyResult(float (*c_back)[P]); int main() { cout << "Initializing" << "\n"; // Host memory buffer that device will write data back before destruction. float(*a_back)[N] = new float[M][N]; float(*b_back)[P] = new float[N][P]; float(*c_back)[P] = new float[M][P]; // Intialize a_back for (int i = 0; i < M; i++) for (int j = 0; j < N; j++) a_back[i][j] = 1.0f; // Intialize b_back for (int i = 0; i < N; i++) for (int j = 0; j < P; j++) b_back[i][j] = i + 1.0f; // Intialize c_back for (int i = 0; i < M; i++) for (int j = 0; j < P; j++) c_back[i][j] = 0.0f; // Initialize the device queue with the default selector. The device queue is // used to enqueue kernels. It encapsulates all states needed for execution. { property_list propList = property_list{property::queue::enable_profiling()}; queue q(default_selector_v); cout << "Computing" << "\n"; cout << "Device: " << q.get_device().get_info<info::device::name>() << "\n"; cout << "Device compute units: " << q.get_device().get_info<info::device::max_compute_units>() << "\n"; auto maxWorkItemSize = q.get_device().get_info<info::device::max_work_item_sizes<3>>(); cout << "Device max work item size: " << maxWorkItemSize.get(0) << ", " << maxWorkItemSize.get(1) << ", " << maxWorkItemSize.get(2) << "\n"; cout << "Device max work group size: " << q.get_device().get_info<info::device::max_work_group_size>() << "\n"; // Create 2D buffers for matrices, buffer c is bound with host memory c_back float * dev_a = sycl::malloc_device<float>(M*N, q); float * dev_b = sycl::malloc_device<float>(N*P, q); float * dev_c = sycl::malloc_device<float>(M*P, q); cout << "Problem size: c(" << M << "," << P << ") = a(" << M << "," << N << ") * b(" << N << "," << P << ")\n"; // Using three command groups to illustrate execution order. The use of // first two command groups for initializing matrices is not the most // efficient way. It just demonstrates the implicit multiple command group // execution ordering. // Submit command group to queue to initialize matrix a q.memcpy(dev_a, &a_back[0], M*N * sizeof(float)); // Submit command group to queue to initialize matrix b q.memcpy(dev_b, &b_back[0], N*P * sizeof(float)); // Submit command group to queue to initialize matrix c q.submit([&](auto &h) { h.memcpy(dev_c, &c_back[0], M*P * sizeof(float)); }); q.wait(); dev_a = (float *)0; // Submit command group to queue to multiply matrices: c = a * b q.submit([&](auto &h) { // Read from a and b, write to c int width_a = N; // Execute kernel. h.parallel_for(range(M, P), [=](auto index) { // Get global position in Y direction. int row = index[0]; // m int col = index[1]; // p float sum = 0.0f; // Compute the result of one element of c for (int i = 0; i < width_a; i++) { auto a_index = row * width_a + i; auto b_index = i * P + col; sum += dev_a[a_index] * dev_b[b_index]; } auto idx = row * P + col; dev_c[idx] = sum; }); }); q.wait(); q.memcpy(&c_back[0], dev_c, M*P * sizeof(float)); q.wait(); } int result; cout << "Result of matrix multiplication using DPC++: "; result = VerifyResult(c_back); delete[] c_back; return result; } bool ValueSame(float a, float b) { return fabs(a - b) < numeric_limits<float>::epsilon(); } int VerifyResult(float (*c_back)[P]) { // Check that the results are correct by comparing with host computing. int i, j, k; // 2D arrays on host side. float(*a_host)[N] = new float[M][N]; float(*b_host)[P] = new float[N][P]; float(*c_host)[P] = new float[M][P]; // Each element of matrix a is 1. for (i = 0; i < M; i++) for (j = 0; j < N; j++) a_host[i][j] = 1.0f; // Each column of b_host is the sequence 1,2,...,N for (i = 0; i < N; i++) for (j = 0; j < P; j++) b_host[i][j] = i + 1.0f; // c_host is initialized to zero. for (i = 0; i < M; i++) for (j = 0; j < P; j++) c_host[i][j] = 0.0f; for (i = 0; i < M; i++) { for (k = 0; k < N; k++) { // Each element of the product is just the sum 1+2+...+n for (j = 0; j < P; j++) { c_host[i][j] += a_host[i][k] * b_host[k][j]; } } } bool mismatch_found = false; // Compare host side results with the result buffer from device side: print // mismatched data 5 times only. int print_count = 0; for (i = 0; i < M; i++) { for (j = 0; j < P; j++) { if (!ValueSame(c_back[i][j], c_host[i][j])) { cout << "Fail - The result is incorrect for element: [" << i << ", " << j << "], expected: " << c_host[i][j] << ", but found: " << c_back[i][j] << "\n"; mismatch_found = true; print_count++; if (print_count == 5) break; } } if (print_count == 5) break; } delete[] a_host; delete[] b_host; delete[] c_host; if (!mismatch_found) { cout << "Success - The results are correct!\n"; return 0; } else { cout << "Fail - The results mismatch!\n"; return -1; } }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/ApplicationDebugger/guided_matrix_mult_SLMSize/src/1_matrix_mul_SLM_size.cpp
//============================================================== // Copyright © 2020 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= /** * Matrix_mul multiplies two large matrices both the CPU and the offload device, * then compares results. If the code executes on both CPU and the offload * device, the name of the offload device and a success message are displayed. * * For comprehensive instructions regarding DPC++ Programming, go to * https://software.intel.com/en-us/oneapi-programming-guide and search based on * relevant terms noted in the comments. */ #include <CL/sycl.hpp> #include <iostream> #include <limits> // dpc_common.hpp can be found in the dev-utilities include folder. // e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/dpc_common.hpp #include "dpc_common.hpp" using namespace std; using namespace sycl; /** * Each element of the product matrix c[i][j] is computed from a unique row and * column of the factor matrices, a[i][k] and b[k][j] */ // Matrix size constants. constexpr int m_size = 150 * 8; // Must be a multiple of 8. constexpr int M = m_size / 8; constexpr int N = m_size / 4; constexpr int P = m_size / 2; /** * Perform matrix multiplication on host to verify results from device. */ int VerifyResult(float (*c_back)[P]); int main() { cout << "Initializing" << "\n"; // Host memory buffer that device will write data back before destruction. float(*a_back)[N] = new float[M][N]; float(*b_back)[P] = new float[N][P]; float(*c_back)[P] = new float[M][P]; // Intialize a_back for (int i = 0; i < M; i++) for (int j = 0; j < N; j++) a_back[i][j] = 1.0f; // Intialize b_back for (int i = 0; i < N; i++) for (int j = 0; j < P; j++) b_back[i][j] = i + 1.0f; // Intialize c_back for (int i = 0; i < M; i++) for (int j = 0; j < P; j++) c_back[i][j] = 0.0f; // Initialize the device queue with the default selector. The device queue is // used to enqueue kernels. It encapsulates all states needed for execution. { property_list propList = property_list{property::queue::enable_profiling()}; queue q(default_selector_v); cout << "Computing" << "\n"; cout << "Device: " << q.get_device().get_info<info::device::name>() << "\n"; cout << "Device compute units: " << q.get_device().get_info<info::device::max_compute_units>() << "\n"; auto maxWorkItemSize = q.get_device().get_info<cl::sycl::info::device::max_work_item_sizes<3>>(); cout << "Device max work item size: " << maxWorkItemSize.get(0) << ", " << maxWorkItemSize.get(1) << ", " << maxWorkItemSize.get(2) << "\n"; cout << "Device max work group size: " << q.get_device().get_info<info::device::max_work_group_size>() << "\n"; // Create 2D buffers for matrices, buffer c is bound with host memory c_back float * dev_a = sycl::malloc_device<float>(M*N, q); float * dev_b = sycl::malloc_device<float>(N*P, q); float * dev_c = sycl::malloc_device<float>(M*P, q); cout << "Problem size: c(" << M << "," << P << ") = a(" << M << "," << N << ") * b(" << N << "," << P << ")\n"; // Using three command groups to illustrate execution order. The use of // first two command groups for initializing matrices is not the most // efficient way. It just demonstrates the implicit multiple command group // execution ordering. // Submit command group to queue to initialize matrix a q.memcpy(dev_a, &a_back[0], M*N * sizeof(float)); // Submit command group to queue to initialize matrix b q.memcpy(dev_b, &b_back[0], N*P * sizeof(float)); // Submit command group to queue to initialize matrix c q.submit([&](auto &h) { h.memcpy(dev_c, &c_back[0], M*P * sizeof(float)); }); q.wait(); q.submit([&](handler &h){ local_accessor<float,1> acc(163850, h); h.parallel_for(163850, [=](auto i){ int index = i.get_id(); acc[index] = index; }); }).wait(); // Submit command group to queue to multiply matrices: c = a * b q.submit([&](auto &h) { // Read from a and b, write to c int width_a = N; // Execute kernel. h.parallel_for(range(M, P), [=](auto index) { // Get global position in Y direction. int row = index[0]; // m int col = index[1]; // p float sum = 0.0f; // Compute the result of one element of c for (int i = 0; i < width_a; i++) { auto a_index = row * width_a + i; auto b_index = i * P + col; sum += dev_a[a_index] * dev_b[b_index]; } auto idx = row * P + col; dev_c[idx] = sum; }); }); q.wait(); q.memcpy(&c_back[0], dev_c, M*P * sizeof(float)); q.wait(); } int result; cout << "Result of matrix multiplication using DPC++: "; result = VerifyResult(c_back); delete[] c_back; return result; } bool ValueSame(float a, float b) { return fabs(a - b) < numeric_limits<float>::epsilon(); } int VerifyResult(float (*c_back)[P]) { // Check that the results are correct by comparing with host computing. int i, j, k; // 2D arrays on host side. float(*a_host)[N] = new float[M][N]; float(*b_host)[P] = new float[N][P]; float(*c_host)[P] = new float[M][P]; // Each element of matrix a is 1. for (i = 0; i < M; i++) for (j = 0; j < N; j++) a_host[i][j] = 1.0f; // Each column of b_host is the sequence 1,2,...,N for (i = 0; i < N; i++) for (j = 0; j < P; j++) b_host[i][j] = i + 1.0f; // c_host is initialized to zero. for (i = 0; i < M; i++) for (j = 0; j < P; j++) c_host[i][j] = 0.0f; for (i = 0; i < M; i++) { for (k = 0; k < N; k++) { // Each element of the product is just the sum 1+2+...+n for (j = 0; j < P; j++) { c_host[i][j] += a_host[i][k] * b_host[k][j]; } } } bool mismatch_found = false; // Compare host side results with the result buffer from device side: print // mismatched data 5 times only. int print_count = 0; for (i = 0; i < M; i++) { for (j = 0; j < P; j++) { if (!ValueSame(c_back[i][j], c_host[i][j])) { cout << "Fail - The result is incorrect for element: [" << i << ", " << j << "], expected: " << c_host[i][j] << ", but found: " << c_back[i][j] << "\n"; mismatch_found = true; print_count++; if (print_count == 5) break; } } if (print_count == 5) break; } delete[] a_host; delete[] b_host; delete[] c_host; if (!mismatch_found) { cout << "Success - The results are correct!\n"; return 0; } else { cout << "Fail - The results mismatch!\n"; return -1; } }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/ApplicationDebugger/guided_matrix_mult_SLMSize/src/2_matrix_mul.cpp
//============================================================== // Copyright © 2020 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= /** * Matrix_mul multiplies two large matrices both the CPU and the offload device, * then compares results. If the code executes on both CPU and the offload * device, the name of the offload device and a success message are displayed. * * For comprehensive instructions regarding DPC++ Programming, go to * https://software.intel.com/en-us/oneapi-programming-guide and search based on * relevant terms noted in the comments. */ #include <CL/sycl.hpp> #include <iostream> #include <limits> // dpc_common.hpp can be found in the dev-utilities include folder. // e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/dpc_common.hpp #include "dpc_common.hpp" using namespace std; using namespace sycl; /** * Each element of the product matrix c[i][j] is computed from a unique row and * column of the factor matrices, a[i][k] and b[k][j] */ // Matrix size constants. constexpr int m_size = 150 * 8; // Must be a multiple of 8. constexpr int M = m_size / 8; constexpr int N = m_size / 4; constexpr int P = m_size / 2; /** * Perform matrix multiplication on host to verify results from device. */ int VerifyResult(float (*c_back)[P]); int main() { cout << "Initializing" << "\n"; // Host memory buffer that device will write data back before destruction. float(*a_back)[N] = new float[M][N]; float(*b_back)[P] = new float[N][P]; float(*c_back)[P] = new float[M][P]; // Intialize a_back for (int i = 0; i < M; i++) for (int j = 0; j < N; j++) a_back[i][j] = 1.0f; // Intialize b_back for (int i = 0; i < N; i++) for (int j = 0; j < P; j++) b_back[i][j] = i + 1.0f; // Intialize c_back for (int i = 0; i < M; i++) for (int j = 0; j < P; j++) c_back[i][j] = 0.0f; // Initialize the device queue with the default selector. The device queue is // used to enqueue kernels. It encapsulates all states needed for execution. { property_list propList = property_list{property::queue::enable_profiling()}; queue q(default_selector_v); cout << "Computing" << "\n"; cout << "Device: " << q.get_device().get_info<info::device::name>() << "\n"; cout << "Device compute units: " << q.get_device().get_info<info::device::max_compute_units>() << "\n"; auto maxWorkItemSize = q.get_device().get_info<cl::sycl::info::device::max_work_item_sizes<3>>(); cout << "Device max work item size: " << maxWorkItemSize.get(0) << ", " << maxWorkItemSize.get(1) << ", " << maxWorkItemSize.get(2) << "\n"; cout << "Device max work group size: " << q.get_device().get_info<info::device::max_work_group_size>() << "\n"; // Create 2D buffers for matrices, buffer c is bound with host memory c_back float * dev_a = sycl::malloc_device<float>(M*N, q); float * dev_b = sycl::malloc_device<float>(N*P, q); float * dev_c = sycl::malloc_device<float>(M*P, q); cout << "Problem size: c(" << M << "," << P << ") = a(" << M << "," << N << ") * b(" << N << "," << P << ")\n"; // Using three command groups to illustrate execution order. The use of // first two command groups for initializing matrices is not the most // efficient way. It just demonstrates the implicit multiple command group // execution ordering. // Submit command group to queue to initialize matrix a q.memcpy(dev_a, &a_back[0], M*N * sizeof(float)); // Submit command group to queue to initialize matrix b q.memcpy(dev_b, &b_back[0], N*P * sizeof(float)); // Submit command group to queue to initialize matrix c q.submit([&](auto &h) { h.memcpy(dev_c, &c_back[0], M*P * sizeof(float)); }); q.wait(); // Submit command group to queue to multiply matrices: c = a * b q.submit([&](auto &h) { // Read from a and b, write to c int width_a = N; // Execute kernel. h.parallel_for(range(M, P), [=](auto index) { // Get global position in Y direction. int row = index[0]; // m int col = index[1]; // p float sum = 0.0f; // Compute the result of one element of c for (int i = 0; i < width_a; i++) { auto a_index = row * width_a + i; auto b_index = i * P + col; sum += dev_a[a_index] * dev_b[b_index]; } auto idx = row * P + col; dev_c[idx] = sum; }); }); q.wait(); q.memcpy(&c_back[0], dev_c, M*P * sizeof(float)); q.wait(); } int result; cout << "Result of matrix multiplication using DPC++: "; result = VerifyResult(c_back); delete[] c_back; return result; } bool ValueSame(float a, float b) { return fabs(a - b) < numeric_limits<float>::epsilon(); } int VerifyResult(float (*c_back)[P]) { // Check that the results are correct by comparing with host computing. int i, j, k; // 2D arrays on host side. float(*a_host)[N] = new float[M][N]; float(*b_host)[P] = new float[N][P]; float(*c_host)[P] = new float[M][P]; // Each element of matrix a is 1. for (i = 0; i < M; i++) for (j = 0; j < N; j++) a_host[i][j] = 1.0f; // Each column of b_host is the sequence 1,2,...,N for (i = 0; i < N; i++) for (j = 0; j < P; j++) b_host[i][j] = i + 1.0f; // c_host is initialized to zero. for (i = 0; i < M; i++) for (j = 0; j < P; j++) c_host[i][j] = 0.0f; for (i = 0; i < M; i++) { for (k = 0; k < N; k++) { // Each element of the product is just the sum 1+2+...+n for (j = 0; j < P; j++) { c_host[i][j] += a_host[i][k] * b_host[k][j]; } } } bool mismatch_found = false; // Compare host side results with the result buffer from device side: print // mismatched data 5 times only. int print_count = 0; for (i = 0; i < M; i++) { for (j = 0; j < P; j++) { if (!ValueSame(c_back[i][j], c_host[i][j])) { cout << "Fail - The result is incorrect for element: [" << i << ", " << j << "], expected: " << c_host[i][j] << ", but found: " << c_back[i][j] << "\n"; mismatch_found = true; print_count++; if (print_count == 5) break; } } if (print_count == 5) break; } delete[] a_host; delete[] b_host; delete[] c_host; if (!mismatch_found) { cout << "Success - The results are correct!\n"; return 0; } else { cout << "Fail - The results mismatch!\n"; return -1; } }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/ApplicationDebugger/guided_matrix_mult_Exceptions/src/2_matrix_mul_multi_offload.cpp
//============================================================== // Copyright © 2020 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= /** * Matrix_mul multiplies two large matrices both the CPU and the offload device, * then compares results. If the code executes on both CPU and the offload * device, the name of the offload device and a success message are displayed. * * For comprehensive instructions regarding DPC++ Programming, go to * https://software.intel.com/en-us/oneapi-programming-guide and search based on * relevant terms noted in the comments. */ #include <CL/sycl.hpp> #include <iostream> #include <limits> // dpc_common.hpp can be found in the dev-utilities include folder. // e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/dpc_common.hpp #include "dpc_common.hpp" using namespace std; using namespace sycl; /** * Each element of the product matrix c[i][j] is computed from a unique row and * column of the factor matrices, a[i][k] and b[k][j] */ // Matrix size constants. constexpr int m_size = 150 * 8; // Must be a multiple of 8. constexpr int M = m_size / 8; constexpr int N = m_size / 4; constexpr int P = m_size / 2; /** * Perform matrix multiplication on host to verify results from device. */ int VerifyResult(float (*c_back)[P]); int main() { cout << "Initializing" << "\n"; // Host memory buffer that device will write data back before destruction. float(*a_back)[N] = new float[M][N]; float(*b_back)[P] = new float[N][P]; float(*c_back)[P] = new float[M][P]; // Intialize a_back for (int i = 0; i < M; i++) for (int j = 0; j < N; j++) a_back[i][j] = 1.0f; // Intialize b_back for (int i = 0; i < N; i++) for (int j = 0; j < P; j++) b_back[i][j] = i + 1.0f; // Intialize c_back for (int i = 0; i < M; i++) for (int j = 0; j < P; j++) c_back[i][j] = 0.0f; // Initialize the device queue with the default selector. The device queue is // used to enqueue kernels. It encapsulates all states needed for execution. { property_list propList = property_list{property::queue::enable_profiling()}; queue q(default_selector_v); cout << "Computing" << "\n"; cout << "Device: " << q.get_device().get_info<info::device::name>() << "\n"; cout << "Device compute units: " << q.get_device().get_info<info::device::max_compute_units>() << "\n"; auto maxWorkItemSize = q.get_device().get_info<info::device::max_work_item_sizes<3>>(); cout << "Device max work item size: " << maxWorkItemSize.get(0) << ", " << maxWorkItemSize.get(1) << ", " << maxWorkItemSize.get(2) << "\n"; cout << "Device max work group size: " << q.get_device().get_info<info::device::max_work_group_size>() << "\n"; // Create 2D buffers for matrices, buffer c is bound with host memory c_back float * dev_a = sycl::malloc_device<float>(M*N, q); float * dev_b = sycl::malloc_device<float>(N*P, q); float * dev_c = sycl::malloc_device<float>(M*P, q); cout << "Problem size: c(" << M << "," << P << ") = a(" << M << "," << N << ") * b(" << N << "," << P << ")\n"; // Using three command groups to illustrate execution order. The use of // first two command groups for initializing matrices is not the most // efficient way. It just demonstrates the implicit multiple command group // execution ordering. // Submit command group to queue to initialize matrix a q.memcpy(dev_a, &a_back[0], M*N * sizeof(float)); // Submit command group to queue to initialize matrix b q.memcpy(dev_b, &b_back[0], N*P * sizeof(float)); // Submit command group to queue to initialize matrix c q.submit([&](auto &h) { h.memcpy(dev_c, &c_back[0], M*P * sizeof(float)); h.memcpy(dev_c, &c_back[0], M*P * sizeof(float)); }); q.wait(); // Submit command group to queue to multiply matrices: c = a * b q.submit([&](auto &h) { // Read from a and b, write to c int width_a = N; // Execute kernel. h.parallel_for(range(M, P), [=](auto index) { // Get global position in Y direction. int row = index[0]; // m int col = index[1]; // p float sum = 0.0f; // Compute the result of one element of c for (int i = 0; i < width_a; i++) { auto a_index = row * width_a + i; auto b_index = i * P + col; sum += dev_a[a_index] * dev_b[b_index]; } auto idx = row * P + col; dev_c[idx] = sum; }); }); q.wait(); q.memcpy(&c_back[0], dev_c, M*P * sizeof(float)); q.wait(); } int result; cout << "Result of matrix multiplication using DPC++: "; result = VerifyResult(c_back); delete[] c_back; return result; } bool ValueSame(float a, float b) { return fabs(a - b) < numeric_limits<float>::epsilon(); } int VerifyResult(float (*c_back)[P]) { // Check that the results are correct by comparing with host computing. int i, j, k; // 2D arrays on host side. float(*a_host)[N] = new float[M][N]; float(*b_host)[P] = new float[N][P]; float(*c_host)[P] = new float[M][P]; // Each element of matrix a is 1. for (i = 0; i < M; i++) for (j = 0; j < N; j++) a_host[i][j] = 1.0f; // Each column of b_host is the sequence 1,2,...,N for (i = 0; i < N; i++) for (j = 0; j < P; j++) b_host[i][j] = i + 1.0f; // c_host is initialized to zero. for (i = 0; i < M; i++) for (j = 0; j < P; j++) c_host[i][j] = 0.0f; for (i = 0; i < M; i++) { for (k = 0; k < N; k++) { // Each element of the product is just the sum 1+2+...+n for (j = 0; j < P; j++) { c_host[i][j] += a_host[i][k] * b_host[k][j]; } } } bool mismatch_found = false; // Compare host side results with the result buffer from device side: print // mismatched data 5 times only. int print_count = 0; for (i = 0; i < M; i++) { for (j = 0; j < P; j++) { if (!ValueSame(c_back[i][j], c_host[i][j])) { cout << "Fail - The result is incorrect for element: [" << i << ", " << j << "], expected: " << c_host[i][j] << ", but found: " << c_back[i][j] << "\n"; mismatch_found = true; print_count++; if (print_count == 5) break; } } if (print_count == 5) break; } delete[] a_host; delete[] b_host; delete[] c_host; if (!mismatch_found) { cout << "Success - The results are correct!\n"; return 0; } else { cout << "Fail - The results mismatch!\n"; return -1; } }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/ApplicationDebugger/guided_matrix_mult_Exceptions/src/3_matrix_mul.cpp
//============================================================== // Copyright © 2020 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= /** * Matrix_mul multiplies two large matrices both the CPU and the offload device, * then compares results. If the code executes on both CPU and the offload * device, the name of the offload device and a success message are displayed. * * For comprehensive instructions regarding DPC++ Programming, go to * https://software.intel.com/en-us/oneapi-programming-guide and search based on * relevant terms noted in the comments. */ #include <CL/sycl.hpp> #include <iostream> #include <limits> // dpc_common.hpp can be found in the dev-utilities include folder. // e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/dpc_common.hpp #include "dpc_common.hpp" using namespace std; using namespace sycl; /** * Each element of the product matrix c[i][j] is computed from a unique row and * column of the factor matrices, a[i][k] and b[k][j] */ // Matrix size constants. constexpr int m_size = 150 * 8; // Must be a multiple of 8. constexpr int M = m_size / 8; constexpr int N = m_size / 4; constexpr int P = m_size / 2; /** * Perform matrix multiplication on host to verify results from device. */ int VerifyResult(float (*c_back)[P]); int main() { cout << "Initializing" << "\n"; // Host memory buffer that device will write data back before destruction. float(*a_back)[N] = new float[M][N]; float(*b_back)[P] = new float[N][P]; float(*c_back)[P] = new float[M][P]; // Intialize a_back for (int i = 0; i < M; i++) for (int j = 0; j < N; j++) a_back[i][j] = 1.0f; // Intialize b_back for (int i = 0; i < N; i++) for (int j = 0; j < P; j++) b_back[i][j] = i + 1.0f; // Intialize c_back for (int i = 0; i < M; i++) for (int j = 0; j < P; j++) c_back[i][j] = 0.0f; // Initialize the device queue with the default selector. The device queue is // used to enqueue kernels. It encapsulates all states needed for execution. { property_list propList = property_list{property::queue::enable_profiling()}; queue q(default_selector_v); cout << "Computing" << "\n"; cout << "Device: " << q.get_device().get_info<info::device::name>() << "\n"; cout << "Device compute units: " << q.get_device().get_info<info::device::max_compute_units>() << "\n"; auto maxWorkItemSize = q.get_device().get_info<info::device::max_work_item_sizes<3>>(); cout << "Device max work item size: " << maxWorkItemSize.get(0) << ", " << maxWorkItemSize.get(1) << ", " << maxWorkItemSize.get(2) << "\n"; cout << "Device max work group size: " << q.get_device().get_info<info::device::max_work_group_size>() << "\n"; // Create 2D buffers for matrices, buffer c is bound with host memory c_back float * dev_a = sycl::malloc_device<float>(M*N, q); float * dev_b = sycl::malloc_device<float>(N*P, q); float * dev_c = sycl::malloc_device<float>(M*P, q); cout << "Problem size: c(" << M << "," << P << ") = a(" << M << "," << N << ") * b(" << N << "," << P << ")\n"; // Using three command groups to illustrate execution order. The use of // first two command groups for initializing matrices is not the most // efficient way. It just demonstrates the implicit multiple command group // execution ordering. // Submit command group to queue to initialize matrix a q.memcpy(dev_a, &a_back[0], M*N * sizeof(float)); // Submit command group to queue to initialize matrix b q.memcpy(dev_b, &b_back[0], N*P * sizeof(float)); // Submit command group to queue to initialize matrix c q.submit([&](auto &h) { h.memcpy(dev_c, &c_back[0], M*P * sizeof(float)); }); q.wait(); // Submit command group to queue to multiply matrices: c = a * b q.submit([&](auto &h) { // Read from a and b, write to c int width_a = N; // Execute kernel. h.parallel_for(range(M, P), [=](auto index) { // Get global position in Y direction. int row = index[0]; // m int col = index[1]; // p float sum = 0.0f; // Compute the result of one element of c for (int i = 0; i < width_a; i++) { auto a_index = row * width_a + i; auto b_index = i * P + col; sum += dev_a[a_index] * dev_b[b_index]; } auto idx = row * P + col; dev_c[idx] = sum; }); }); q.wait(); q.memcpy(&c_back[0], dev_c, M*P * sizeof(float)); q.wait(); } int result; cout << "Result of matrix multiplication using DPC++: "; result = VerifyResult(c_back); delete[] c_back; return result; } bool ValueSame(float a, float b) { return fabs(a - b) < numeric_limits<float>::epsilon(); } int VerifyResult(float (*c_back)[P]) { // Check that the results are correct by comparing with host computing. int i, j, k; // 2D arrays on host side. float(*a_host)[N] = new float[M][N]; float(*b_host)[P] = new float[N][P]; float(*c_host)[P] = new float[M][P]; // Each element of matrix a is 1. for (i = 0; i < M; i++) for (j = 0; j < N; j++) a_host[i][j] = 1.0f; // Each column of b_host is the sequence 1,2,...,N for (i = 0; i < N; i++) for (j = 0; j < P; j++) b_host[i][j] = i + 1.0f; // c_host is initialized to zero. for (i = 0; i < M; i++) for (j = 0; j < P; j++) c_host[i][j] = 0.0f; for (i = 0; i < M; i++) { for (k = 0; k < N; k++) { // Each element of the product is just the sum 1+2+...+n for (j = 0; j < P; j++) { c_host[i][j] += a_host[i][k] * b_host[k][j]; } } } bool mismatch_found = false; // Compare host side results with the result buffer from device side: print // mismatched data 5 times only. int print_count = 0; for (i = 0; i < M; i++) { for (j = 0; j < P; j++) { if (!ValueSame(c_back[i][j], c_host[i][j])) { cout << "Fail - The result is incorrect for element: [" << i << ", " << j << "], expected: " << c_host[i][j] << ", but found: " << c_back[i][j] << "\n"; mismatch_found = true; print_count++; if (print_count == 5) break; } } if (print_count == 5) break; } delete[] a_host; delete[] b_host; delete[] c_host; if (!mismatch_found) { cout << "Success - The results are correct!\n"; return 0; } else { cout << "Fail - The results mismatch!\n"; return -1; } }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/ApplicationDebugger/guided_matrix_mult_Exceptions/src/1_matrix_mul_null_pointer.cpp
//============================================================== // Copyright © 2020 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= /** * Matrix_mul multiplies two large matrices both the CPU and the offload device, * then compares results. If the code executes on both CPU and the offload * device, the name of the offload device and a success message are displayed. * * For comprehensive instructions regarding DPC++ Programming, go to * https://software.intel.com/en-us/oneapi-programming-guide and search based on * relevant terms noted in the comments. */ #include <CL/sycl.hpp> #include <iostream> #include <limits> // dpc_common.hpp can be found in the dev-utilities include folder. // e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/dpc_common.hpp #include "dpc_common.hpp" using namespace std; using namespace sycl; /** * Each element of the product matrix c[i][j] is computed from a unique row and * column of the factor matrices, a[i][k] and b[k][j] */ // Matrix size constants. constexpr int m_size = 150 * 8; // Must be a multiple of 8. constexpr int M = m_size / 8; constexpr int N = m_size / 4; constexpr int P = m_size / 2; /** * Perform matrix multiplication on host to verify results from device. */ int VerifyResult(float (*c_back)[P]); int main() { cout << "Initializing" << "\n"; // Host memory buffer that device will write data back before destruction. float(*a_back)[N] = new float[M][N]; float(*b_back)[P] = new float[N][P]; float(*c_back)[P] = new float[M][P]; // Intialize a_back for (int i = 0; i < M; i++) for (int j = 0; j < N; j++) a_back[i][j] = 1.0f; // Intialize b_back for (int i = 0; i < N; i++) for (int j = 0; j < P; j++) b_back[i][j] = i + 1.0f; // Intialize c_back for (int i = 0; i < M; i++) for (int j = 0; j < P; j++) c_back[i][j] = 0.0f; // Initialize the device queue with the default selector. The device queue is // used to enqueue kernels. It encapsulates all states needed for execution. { property_list propList = property_list{property::queue::enable_profiling()}; queue q(default_selector_v); cout << "Computing" << "\n"; cout << "Device: " << q.get_device().get_info<info::device::name>() << "\n"; cout << "Device compute units: " << q.get_device().get_info<info::device::max_compute_units>() << "\n"; auto maxWorkItemSize = q.get_device().get_info<info::device::max_work_item_sizes<3>>(); cout << "Device max work item size: " << maxWorkItemSize.get(0) << ", " << maxWorkItemSize.get(1) << ", " << maxWorkItemSize.get(2) << "\n"; cout << "Device max work group size: " << q.get_device().get_info<info::device::max_work_group_size>() << "\n"; // Create 2D buffers for matrices, buffer c is bound with host memory c_back float * dev_a = sycl::malloc_device<float>(M*N, q); float * dev_b = sycl::malloc_device<float>(N*P, q); float * dev_c = sycl::malloc_device<float>(M*P, q); cout << "Problem size: c(" << M << "," << P << ") = a(" << M << "," << N << ") * b(" << N << "," << P << ")\n"; // Using three command groups to illustrate execution order. The use of // first two command groups for initializing matrices is not the most // efficient way. It just demonstrates the implicit multiple command group // execution ordering. // Submit command group to queue to initialize matrix a q.memcpy(dev_a, &a_back[0], M*N * sizeof(float)); // Submit command group to queue to initialize matrix b q.memcpy(dev_b, 0, N*P * sizeof(float)); // Submit command group to queue to initialize matrix c q.submit([&](auto &h) { h.memcpy(dev_c, &c_back[0], M*P * sizeof(float)); }); q.wait(); // Submit command group to queue to multiply matrices: c = a * b q.submit([&](auto &h) { // Read from a and b, write to c int width_a = N; // Execute kernel. h.parallel_for(range(M, P), [=](auto index) { // Get global position in Y direction. int row = index[0]; // m int col = index[1]; // p float sum = 0.0f; // Compute the result of one element of c for (int i = 0; i < width_a; i++) { auto a_index = row * width_a + i; auto b_index = i * P + col; sum += dev_a[a_index] * dev_b[b_index]; } auto idx = row * P + col; dev_c[idx] = sum; }); }); q.wait(); q.memcpy(&c_back[0], dev_c, M*P * sizeof(float)); q.wait(); } int result; cout << "Result of matrix multiplication using DPC++: "; result = VerifyResult(c_back); delete[] c_back; return result; } bool ValueSame(float a, float b) { return fabs(a - b) < numeric_limits<float>::epsilon(); } int VerifyResult(float (*c_back)[P]) { // Check that the results are correct by comparing with host computing. int i, j, k; // 2D arrays on host side. float(*a_host)[N] = new float[M][N]; float(*b_host)[P] = new float[N][P]; float(*c_host)[P] = new float[M][P]; // Each element of matrix a is 1. for (i = 0; i < M; i++) for (j = 0; j < N; j++) a_host[i][j] = 1.0f; // Each column of b_host is the sequence 1,2,...,N for (i = 0; i < N; i++) for (j = 0; j < P; j++) b_host[i][j] = i + 1.0f; // c_host is initialized to zero. for (i = 0; i < M; i++) for (j = 0; j < P; j++) c_host[i][j] = 0.0f; for (i = 0; i < M; i++) { for (k = 0; k < N; k++) { // Each element of the product is just the sum 1+2+...+n for (j = 0; j < P; j++) { c_host[i][j] += a_host[i][k] * b_host[k][j]; } } } bool mismatch_found = false; // Compare host side results with the result buffer from device side: print // mismatched data 5 times only. int print_count = 0; for (i = 0; i < M; i++) { for (j = 0; j < P; j++) { if (!ValueSame(c_back[i][j], c_host[i][j])) { cout << "Fail - The result is incorrect for element: [" << i << ", " << j << "], expected: " << c_host[i][j] << ", but found: " << c_back[i][j] << "\n"; mismatch_found = true; print_count++; if (print_count == 5) break; } } if (print_count == 5) break; } delete[] a_host; delete[] b_host; delete[] c_host; if (!mismatch_found) { cout << "Success - The results are correct!\n"; return 0; } else { cout << "Fail - The results mismatch!\n"; return -1; } }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/ApplicationDebugger/jacobi/src/bugged.cpp
//============================================================== // Copyright (C) Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= // This file is included only into jacobi-bugged program. // This file contains methods where actual computation takes place. // Here we injected three bugs, that can be investigated with the help // of the debugger. // Compute x_k1 and write the result to its accessor. void compute_x_k1_kernel (id<1> &index, float *b, float *x_k, float *x_k1) { // Current index. int i = index[0]; // The vector x_k1 should be computed as: // x_k1 = D^{-1}(b - (A - D)x_k), // where A is our matrix, D is its diagonal, b is right hand // side vector, and x_k is the result of the previous iteration. // // Matrices (A - D) and D are hardcoded as: // (A - D) is a stencil matrix [1 1 0 1 1]; // D is a diagonal matrix with all elements equal to 5. float result = b[i]; // Non-diagonal elements of matrix A are all 1s, so to substract // i-th element of (A - D)x_k, we need to substract the sum of elements // of x_k with indices i - 2, i - 1, i + 1, i + 2. We do not substract // the i-th element, as it gets multiplied by 0 in (A - D)x_k. result -= x_k[i - 2]; result -= x_k[i - 1]; result -= x_k[i + 1]; result -= x_k[i + 2]; // In our case the diagonal matrix has only 5s on the diagonal, so // division by 5 gives us its invert. result /= 5; // Save the value to the output buffer. x_k1[i] = result; } // Submits the kernel which updates x_k1 at every iteration. void compute_x_k1 (queue &q, buffer_args &buffers) { q.submit([&](auto &h) { accessor acc_b(buffers.b, h, read_only); accessor acc_x_k(buffers.x_k, h, read_only); accessor acc_x_k1(buffers.x_k1, h, write_only); h.parallel_for(range{n}, [=](id<1> index) { compute_x_k1_kernel (index, acc_b.get_pointer(), acc_x_k.get_pointer(), acc_x_k1.get_pointer()); }); }); } // Here we compute values which are used for relative error computation // and copy the vector x_k1 over the vector x_k. void prepare_for_next_iteration (queue &q, buffer_args &buffers) { constexpr size_t l = 16; q.submit([&](auto &h) { accessor acc_abs_error(buffers.abs_error, h, read_write); accessor acc_x_k(buffers.x_k, h, read_write); accessor acc_x_k1(buffers.x_k1, h, read_only); // To compute the relative error we need to prepare two values: // absolute error and L1-norm of x_k1. // Absolute error is computed as L1-norm of (x_k - x_k1). // To compute the L1-norms of x_k1 and (x_k - x_k1) vectors // we use the reduction API with std::plus operator. auto r_abs_error = reduction(buffers.abs_error, h, std::plus<>()); auto r_l1_norm_x_k1 = reduction(buffers.l1_norm_x_k1, h, std::plus<>()); h.parallel_for(nd_range<1>{n, l}, r_abs_error, r_l1_norm_x_k1, [=](nd_item<1> index, auto &abs_error, auto &l1_norm_x_k1) { auto gid = index.get_global_id(); float x_k = acc_x_k[gid]; float x_k1 = acc_x_k1[gid]; // Execute reduction sums. float local_abs_error = abs(x_k - x_k1); abs_error += local_abs_error; // Bug 2 challenge: breakpoint here. l1_norm_x_k1 += abs(x_k1); // Copy the vector x_k1 over x_k. acc_x_k[gid] = x_k1; }); }); } // Iterate until the algorithm converges (success) or the maximum number // of iterations is reached (fail). int iterate(queue &q, float *b, float *x_k, float *x_k1, float &rel_error) { // Absolute error, ||x_k - x_k1||_1, L1-norm of (x_k - x_k1). float abs_error = 0; // ||x_k1||_1, L1-norm of x_k1. float l1_norm_x_k1 = 0; int k = 0; // Jacobi iteration begins. do {// k-th iteration of Jacobi. // Create SYCL buffers. buffer_args buffers {b, x_k, x_k1, &l1_norm_x_k1, &abs_error}; compute_x_k1(q, buffers); prepare_for_next_iteration(q, buffers); q.wait_and_throw(); // Compute relative error based on reduced values from this iteration. rel_error = abs_error / (l1_norm_x_k1 + 1e-32); if (abs_error < 0 || l1_norm_x_k1 < 0 || (abs_error + l1_norm_x_k1) < 1e-32) { cout << "\nfail; Bug 3. Fix it on GPU. The relative error has invalid value " << "after iteration " << k << ".\n" << "Hint 1: inspect reduced error values. With the challenge scenario\n" << " from bug 2 you can verify that reduction algorithms compute\n" << " the correct values inside kernel on GPU. Take into account\n" << " SIMD lanes: on GPU each thread processes several work items\n" << " at once, so you need to modify your commands and update\n" << " the convenience variable for each SIMD lane, e.g. using \n" << " `thread apply :*`.\n" << "Hint 2: why don't we get the correct values at the host part of\n" << " the application?\n"; exit(0); } // Periodically print out how the algorithm behaves. if (k % 20 == 0) { std::cout << "Iteration " << k << ", relative error = " << rel_error << "\n"; } k++; } while (rel_error > tolerance && k < max_number_of_iterations); // Jacobi iteration ends. return k; }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/ApplicationDebugger/jacobi/src/jacobi.cpp
//============================================================== // Copyright (C) Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= // The program solves the linear equation Ax=b, where matrix A is a // n x n sparse matrix with diagonals [1 1 5 1 1], // vector b is set such that the solution is [1 1 ... 1]^T. // The linear system is solved via Jacobi iteration. // The algorithm converges, as the matrix A is strictly diagonally dominant. #include <sycl/sycl.hpp> #include <iostream> #include <cmath> // Location of file: <oneapi-root>/dev-utilities/<version>/include #include "dpc_common.hpp" using namespace std; using namespace sycl; // The size of the problem. // The matrix A is n x n matrix, the length of the vector x is n. constexpr size_t n = 64; // The maximum number of iterations the algorithm is going // to take. constexpr size_t max_number_of_iterations = 100; // We expect each element of vector x to be that close // to the analitycal solution. constexpr float tolerance = 1e-4; // Helper structure to initialize and hold all our SYCL buffers. // Note: no bugs here. struct buffer_args { buffer<float, 1> b; buffer<float, 1> x_k; buffer<float, 1> x_k1; buffer<float, 1> l1_norm_x_k1; buffer<float, 1> abs_error; buffer_args(float *b, float *x_k, float *x_k1, float *l1_norm_x_k1, float *abs_error): // Right hand side vector b; b(buffer(b, range{n})), // Iteration vectors x_k and x_k1; x_k(buffer(x_k, range{n})), x_k1(buffer(x_k1, range{n})), // Sum of absolute values of x_k1 elements. l1_norm_x_k1(buffer(l1_norm_x_k1, range{1})), // Absolute error. abs_error(buffer(abs_error, range{1})) {} }; // Depending on whether FIXED is set, select fixed or bugged versions // of computation methods. #ifdef FIXED #include "fixed.cpp" #else #include "bugged.cpp" #endif // FIXED // Initialize right hand side vector b and the initial guess for x_k. void initialize_input(float *b, float *x_k); int main(int argc, char *argv[]) { // The right hand side vector. float b[n]; // We store an intermediate result after every iteration here. float x_k[n]; // At each iteration we compute a new value of x here. We need // both buffers x_k and x_k1 due to the data dependency: to compute // one element at the iteration k + 1 we need several elements // from the iteration k. float x_k1[n]; // We will compute the relative error at each iteration as: // // ||x_k - x_k1||_1 abs_error // rel_error = ------------------- = -------------- // ||x_k1||_1 l1_norm_x_k1 // Relative error. float rel_error; // Initialize the input. // Note: the matrix A is hardcoded as a stencil matrix [1 1 5 1 1] // into the kernel. initialize_input(b, x_k); // Iteration counter. int k; try { queue q(default_selector_v, dpc_common::exception_handler); cout << "[SYCL] Using device: [" << q.get_device().get_info<info::device::name>() << "] from [" << q.get_device().get_platform().get_info<info::platform::name>() << "]\n"; k = iterate (q, b, x_k, x_k1, rel_error); } catch (sycl::exception const &e) { cout << "fail; synchronous exception occurred: " << e.what() << "\n"; return -1; } // Verify the output, we expect a vector whose components are close to 1.0. bool correct = true; for (int i = 0; i < n; i++) { if ((x_k[i] - 1.0f) * (x_k[i] - 1.0f) > tolerance) correct = false; } if (correct) cout << "\nsuccess; all elements of the resulting vector are close to 1.0.\n"; else { cout << "\nfail; Bug 1. Fix this on CPU: components of x_k are not close to 1.0.\n" << "Hint: figure out which elements are farthest from 1.0.\n"; return 0; } // Check whether the algorithm converged. if (k < max_number_of_iterations) { cout << "success; the relative error (" << rel_error << ") is below the desired tolerance " << tolerance <<" after " << k <<" iterations.\n\n"; } else { cout << "\nfail; Bug 2. Fix this on CPU: the relative error (" << rel_error << ") is greater than\n" << " the desired tolerance " << tolerance <<" after " << max_number_of_iterations << " iterations.\n" << "Hint: check the reduction results at several iterations.\n" << "Challenge: in the debugger you can simmulate the computation of a reduced\n" << " value by putting a BP inside the corresponding kernel and defining\n" << " a convenience variable. We will compute the reduced value at this\n" << " convenience variable: at each BP hit we update it with a help of \"commands\"\n" << " command. After the reduction kernel is finished, the convenience\n" << " variable should contain the reduced value.\n" << " See README for details.\n"; return 0; } return 0; } // Note: no bugs here. void initialize_input(float *b, float *x_k) { constexpr float main_b = 9; // Vector b and the matrix A are hardcoded // such that the analytical solution of the system Ax=b is a vector // whose elements are 1s. for (int i = 0; i < n; i++) b[i] = main_b; // Boundary values of the vector b. b[0] = main_b - 2; b[1] = main_b - 1; b[n - 2] = main_b - 1; b[n - 1] = main_b - 2; // Initial guess of x is b. for (int i = 0; i < n; i++) x_k[i] = b[i]; }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Tools/ApplicationDebugger/jacobi/src/fixed.cpp
//============================================================== // Copyright (C) Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= // This file is included only into jacobi-fixed program. // This file contains methods where actual computation takes place. // Here we explain and show how to fix Bugs 1, 2, and 3. // Compute x_k1 and write the result to its accessor. void compute_x_k1_kernel (id<1> &index, float *b, float *x_k, float *x_k1) { // Current index. int i = index[0]; // The vector x_k1 should be computed as: // x_k1 = D^{-1}(b - (A - D)x_k), // where A is our matrix, D is its diagonal, b is right hand // side vector, and x_k is the result of the previous iteration. // // Matrices (A - D) and D are hardcoded as: // (A - D) is a stencil matrix [1 1 0 1 1]; // D is a diagonal matrix with all elements equal to 5. float result = b[i]; // Non-diagonal elements of matrix A are all 1s, so to substract // i-th element of (A - D)x_k, we need to substract the sum of elements // of x_k with indices i - 2, i - 1, i + 1, i + 2. We do not substract // the i-th element, as it gets multiplied by 0 in (A - D)x_k. // Fix Bug 1: out-of-bounds access: all indices below can trigger // out-of-bounds access and thus garbage values will be read. // Fix it by adding checks that the index exists: if (i > 1) result -= x_k[i - 2]; if (i > 0) result -= x_k[i - 1]; if (i < n - 1) result -= x_k[i + 1]; if (i < n - 2) result -= x_k[i + 2]; // In our case the diagonal matrix has only 5s on the diagonal, so // division by 5 gives us its invert. result /= 5; // Save the value to the output buffer. x_k1[i] = result; } // Submits the kernel which updates x_k1 at every iteration. void compute_x_k1 (queue &q, buffer_args &buffers) { q.submit([&](auto &h) { accessor acc_b(buffers.b, h, read_only); accessor acc_x_k(buffers.x_k, h, read_only); accessor acc_x_k1(buffers.x_k1, h, write_only); h.parallel_for(range{n}, [=](id<1> index) { compute_x_k1_kernel (index, acc_b.get_pointer(), acc_x_k.get_pointer(), acc_x_k1.get_pointer()); }); }); } // Here we compute values which are used for relative error computation // and copy the vector x_k1 over the vector x_k. void prepare_for_next_iteration (queue &q, buffer_args &buffers) { constexpr size_t l = 16; q.submit([&](auto &h) { accessor acc_abs_error(buffers.abs_error, h, read_write); accessor acc_x_k(buffers.x_k, h, read_write); accessor acc_x_k1(buffers.x_k1, h, read_only); // To compute the relative error we need to prepare two values: // absolute error and L1-norm of x_k1. // Absolute error is computed as L1-norm of (x_k - x_k1). // To compute the L1-norms of x_k1 and (x_k - x_k1) vectors // we use the reduction API with std::plus operator. auto r_abs_error = reduction(buffers.abs_error, h, std::plus<>()); auto r_l1_norm_x_k1 = reduction(buffers.l1_norm_x_k1, h, std::plus<>()); h.parallel_for(nd_range<1>{n, l}, r_abs_error, r_l1_norm_x_k1, [=](nd_item<1> index, auto &abs_error, auto &l1_norm_x_k1) { auto gid = index.get_global_id(); float x_k = acc_x_k[gid]; float x_k1 = acc_x_k1[gid]; // Execute reduction sums. float local_abs_error = abs(x_k - x_k1); abs_error += local_abs_error; // Bug 2 challenge: breakpoint here. l1_norm_x_k1 += abs(x_k1); // Copy the vector x_k1 over x_k. acc_x_k[gid] = x_k1; }); }); } // Iterate until the algorithm converges (success) or the maximum number // of iterations is reached (fail). int iterate(queue &q, float *b, float *x_k, float *x_k1, float &rel_error) { // Absolute error, ||x_k - x_k1||_1, L1-norm of (x_k - x_k1). float abs_error = 0; // ||x_k1||_1, L1-norm of x_k1. float l1_norm_x_k1 = 0; int k = 0; // Jacobi iteration begins. do {// k-th iteration of Jacobi. // Fix bug 2: we have to reset the error values at each iteration, otherwise // the relative error accumulates through iterations and does not fall // below the tolerance. abs_error = 0; l1_norm_x_k1 = 0; { // Fix bug 3: the host values of abs_error and l1_norm_x_k1 // were not synchronised with their new values on device. // Open new scope for buffers. Once the scope is ended, the destructors // of buffers will write the data back from device to host. // Create SYCL buffers. buffer_args buffers {b, x_k, x_k1, &l1_norm_x_k1, &abs_error}; compute_x_k1(q, buffers); prepare_for_next_iteration(q, buffers); } // Compute relative error based on reduced values from this iteration. rel_error = abs_error / (l1_norm_x_k1 + 1e-32); if (abs_error < 0 || l1_norm_x_k1 < 0 || (abs_error + l1_norm_x_k1) < 1e-32) { cout << "\nfail; Bug 3. Fix it on GPU. The relative error has invalid value " << "after iteration " << k << ".\n" << "Hint 1: inspect reduced error values. With the challenge scenario\n" << " from bug 2 you can verify that reduction algorithms compute\n" << " the correct values inside kernel on GPU. Take into account\n" << " SIMD lanes: on GPU each thread processes several work items\n" << " at once, so you need to modify your commands and update\n" << " the convenience variable for each SIMD lane, e.g. using \n" << " `thread apply :*`.\n" << "Hint 2: why don't we get the correct values at the host part of\n" << " the application?\n"; exit(0); } // Periodically print out how the algorithm behaves. if (k % 20 == 0) { std::cout << "Iteration " << k << ", relative error = " << rel_error << "\n"; } k++; } while (rel_error > tolerance && k < max_number_of_iterations); // Jacobi iteration ends. return k; }
cpp
oneAPI-samples
data/projects/oneAPI-samples/AI-and-Analytics/End-to-end-Workloads/LidarObjectDetection-PointPillars/src/main.cpp
//============================================================== // Copyright © 2020-2021 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= #include <boost/filesystem.hpp> #include <boost/program_options.hpp> #include <boost/range/iterator_range.hpp> #include <chrono> #include <fstream> #include <iostream> #include "devicemanager/devicemanager.hpp" #include "pointpillars/pointpillars.hpp" #include "pointpillars/pointpillars_config.hpp" #include "pointpillars/pointpillars_util.hpp" /** * Read in a LiDAR point cloud from a file in Point Cloud Data format (as ascii) * https://pointclouds.org/documentation/tutorials/pcd_file_format.html * * @param[in] file_name is the name of the PCD file * @param[in] points are the parsed points from the PCD as x,y,z,intensity values * @return number of of points in the point cloud */ std::size_t ReadPointCloud(std::string const &file_name, std::vector<float> &points) { if (!boost::filesystem::exists(file_name) || file_name.empty()) { return 0; } std::size_t number_of_points = 0; std::ifstream in(file_name); std::string line; bool parse_data = false; // read PCD file in a line-by-line manner while (std::getline(in, line) && points.size() <= 4 * number_of_points) { if (parse_data) { std::istringstream iss(line); float x, y, z, intensity; double timestamp; if (!(iss >> x >> y >> z >> intensity >> timestamp)) { return 0; } points.push_back(x); points.push_back(y); points.push_back(z); points.push_back(intensity); } else if (line.find("POINTS") != std::string::npos) { number_of_points = atoll(line.substr(7).c_str()); } else if (line.find("DATA") != std::string::npos) { parse_data = true; } } return number_of_points; } int main(int argc, char *argv[]) { boost::program_options::options_description desc("Allowed options"); // clang-format off desc.add_options() ("help", "produce help message") ("pfe_model", boost::program_options::value<std::string>()->default_value("pfe.onnx"), "PFE model file path (.onnx, .xml)") ("rpn_model", boost::program_options::value<std::string>()->default_value("rpn.onnx"), "RPN model file path (.onnx, .xml)") ("data", boost::program_options::value<std::string>()->default_value("./data"), "data path") ("cpu", "Use CPU as execution device (default)") ("gpu", "Use GPU as execution device") ("list", "Get available execution devices"); // clang-format on boost::program_options::variables_map vm; boost::program_options::store(boost::program_options::parse_command_line(argc, argv, desc), vm); boost::program_options::notify(vm); // parse program options if (vm.count("help")) { std::cout << desc << std::endl; return 1; } if (vm.count("list")) { devicemanager::GetDevices(); return 1; } std::vector<sycl::info::device_type> execution_devices; if (vm.count("gpu")) { execution_devices.push_back(sycl::info::device_type::gpu); } if (vm.count("cpu") || execution_devices.empty()) { execution_devices.push_back(sycl::info::device_type::cpu); } // Point Pillars initialization pointpillars::PointPillarsConfig config; std::vector<pointpillars::ObjectDetection> object_detections; // read point cloud std::size_t number_of_points; std::vector<float> points; number_of_points = ReadPointCloud("example.pcd", points); // if the point cloud was empty, something went wrong if ((number_of_points == 0) || points.empty()) { std::cout << "Unable to read point cloud file. Please put the point cloud file into the data/ folder." << std::endl; return -1; } config.pfe_model_file = vm["pfe_model"].as<std::string>(); config.rpn_model_file = vm["rpn_model"].as<std::string>(); // Run PointPillars for each execution device for (const auto &device_type : execution_devices) { if (!devicemanager::SelectDevice(device_type)) { std::cout << "\n\n"; continue; } // setup PointPillars pointpillars::PointPillars point_pillars(0.5f, 0.5f, config); const auto start_time = std::chrono::high_resolution_clock::now(); // run PointPillars try { point_pillars.Detect(points.data(), number_of_points, object_detections); } catch (const std::runtime_error &e) { std::cout << "Exception during PointPillars execution\n"; std::cout << e.what() << std::endl; return -1; } const auto end_time = std::chrono::high_resolution_clock::now(); std::cout << "Execution time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count() << "ms\n\n"; // print results std::cout << object_detections.size() << " cars detected\n"; for (auto const &detection : object_detections) { std::cout << config.classes[detection.class_id] << ": Probability = " << detection.class_probabilities[0] << " Position = (" << detection.x << ", " << detection.y << ", " << detection.z << ") Length = " << detection.length << " Width = " << detection.width << "\n"; } std::cout << "\n\n"; } return 0; }
cpp
oneAPI-samples
data/projects/oneAPI-samples/AI-and-Analytics/End-to-end-Workloads/LidarObjectDetection-PointPillars/src/pointpillars/nms.cpp
//============================================================== // Copyright © 2020-2021 Intel Corporation (oneAPI modifications) // // SPDX-License-Identifier: MIT // ============================================================= // ------------------------------------------------------------------ // Copyright (c) 2015 Microsoft // Licensed under The MIT License // Modified from MATLAB Faster R-CNN // (https://github.com/shaoqingren/faster_rcnn) // ------------------------------------------------------------------ #include "pointpillars/nms.hpp" #include <sycl/sycl.hpp> #include <algorithm> #include <numeric> #include <set> #include <vector> #include "devicemanager/devicemanager.hpp" namespace pointpillars { // Intersection over Union (IoU) calculation // a and b are pointers to the input objects // @return IoU value = Area of overlap / Area of union // @details: https://en.wikipedia.org/wiki/Jaccard_index inline float DevIoU(float const *const a, float const *const b) { float left = sycl::max(a[0], b[0]); float right = sycl::min(a[2], b[2]); float top = sycl::max(a[1], b[1]); float bottom = sycl::min(a[3], b[3]); float width = sycl::max((float)(right - left + 1), 0.f); float height = sycl::max((float)(bottom - top + 1), 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } NMS::NMS(const int num_threads, const int num_box_corners, const float nms_overlap_threshold) : num_threads_(num_threads), num_box_corners_(num_box_corners), nms_overlap_threshold_(nms_overlap_threshold) {} void NMS::DoNMS(const size_t host_filter_count, float *dev_sorted_box_for_nms, int *out_keep_inds, size_t &out_num_to_keep) { // Currently the parallel implementation of NMS only works on the GPU // Therefore, in case of a CPU or Host device, we use the sequential implementation if (!devicemanager::GetCurrentDevice().is_gpu()) { SequentialNMS(host_filter_count, dev_sorted_box_for_nms, out_keep_inds, out_num_to_keep); } else { ParallelNMS(host_filter_count, dev_sorted_box_for_nms, out_keep_inds, out_num_to_keep); } } void NMS::SequentialNMS(const size_t host_filter_count, float *dev_sorted_box_for_nms, int *out_keep_inds, size_t &out_num_to_keep) { std::vector<int> keep_inds_vec; // vector holding the object indexes that should be kept keep_inds_vec.resize(host_filter_count); // resize vector to maximum possible length // fill vector with default indexes 0, 1, 2, ... std::iota(keep_inds_vec.begin(), keep_inds_vec.end(), 0); // Convert vector to a C++ set std::set<int> keep_inds(keep_inds_vec.begin(), keep_inds_vec.end()); // Filtering overlapping boxes for (size_t i = 0; i < host_filter_count; ++i) { for (size_t j = i + 1; j < host_filter_count; ++j) { auto iou_value = DevIoU(dev_sorted_box_for_nms + i * num_box_corners_, dev_sorted_box_for_nms + j * num_box_corners_); if (iou_value > nms_overlap_threshold_) { // if IoU value to too high, remove the index from the set keep_inds.erase(j); } } } // fill output data, with the kept indexes out_num_to_keep = keep_inds.size(); int keep_counter = 0; for (auto ind : keep_inds) { out_keep_inds[keep_counter] = ind; keep_counter++; } } void Kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask, const int num_box_corners, sycl::nd_item<3> item_ct1, float *block_boxes) { const unsigned long row_start = item_ct1.get_group(1); const unsigned long col_start = item_ct1.get_group(2); const unsigned long block_threads = item_ct1.get_local_range().get(2); const unsigned long row_size = sycl::min((unsigned long)(n_boxes - row_start * block_threads), block_threads); const unsigned long col_size = sycl::min((unsigned long)(n_boxes - col_start * block_threads), block_threads); if (item_ct1.get_local_id(2) < col_size) { block_boxes[item_ct1.get_local_id(2) * num_box_corners + 0] = dev_boxes[(block_threads * col_start + item_ct1.get_local_id(2)) * num_box_corners + 0]; block_boxes[item_ct1.get_local_id(2) * num_box_corners + 1] = dev_boxes[(block_threads * col_start + item_ct1.get_local_id(2)) * num_box_corners + 1]; block_boxes[item_ct1.get_local_id(2) * num_box_corners + 2] = dev_boxes[(block_threads * col_start + item_ct1.get_local_id(2)) * num_box_corners + 2]; block_boxes[item_ct1.get_local_id(2) * num_box_corners + 3] = dev_boxes[(block_threads * col_start + item_ct1.get_local_id(2)) * num_box_corners + 3]; } if (item_ct1.get_local_id(2) < row_size) { const int cur_box_idx = block_threads * row_start + item_ct1.get_local_id(2); const float cur_box[NUM_2D_BOX_CORNERS_MACRO] = { dev_boxes[cur_box_idx * num_box_corners + 0], dev_boxes[cur_box_idx * num_box_corners + 1], dev_boxes[cur_box_idx * num_box_corners + 2], dev_boxes[cur_box_idx * num_box_corners + 3]}; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = item_ct1.get_local_id(2) + 1; } for (size_t i = start; i < col_size; i++) { if (DevIoU(cur_box, block_boxes + i * num_box_corners) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, block_threads); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void NMS::ParallelNMS(const size_t host_filter_count, float *dev_sorted_box_for_nms, int *out_keep_inds, size_t &out_num_to_keep) { const unsigned long col_blocks = DIVUP(host_filter_count, num_threads_); sycl::range<3> blocks(DIVUP(host_filter_count, num_threads_), DIVUP(host_filter_count, num_threads_), 1); sycl::range<3> threads(num_threads_, 1, 1); unsigned long long *dev_mask; sycl::queue queue = devicemanager::GetCurrentQueue(); dev_mask = sycl::malloc_device<unsigned long long>(host_filter_count * col_blocks, queue); queue.submit([&](auto &h) { sycl::accessor<float, 1, sycl::access::mode::read_write, sycl::access::target::local> block_boxes_acc_ct1( sycl::range<1>(256), h); auto global_range = blocks * threads; auto nms_overlap_threshold_ct1 = nms_overlap_threshold_; auto num_box_corners_ct4 = num_box_corners_; h.parallel_for(sycl::nd_range<3>(sycl::range<3>(global_range.get(2), global_range.get(1), global_range.get(0)), sycl::range<3>(threads.get(2), threads.get(1), threads.get(0))), [=](sycl::nd_item<3> item_ct1) { Kernel(host_filter_count, nms_overlap_threshold_ct1, dev_sorted_box_for_nms, dev_mask, num_box_corners_ct4, item_ct1, block_boxes_acc_ct1.get_pointer()); }); }); queue.wait(); // postprocess for nms output std::vector<unsigned long long> host_mask(host_filter_count * col_blocks); queue.memcpy(&host_mask[0], dev_mask, sizeof(unsigned long long) * host_filter_count * col_blocks).wait(); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); for (size_t i = 0; i < host_filter_count; i++) { int nblock = i / num_threads_; int inblock = i % num_threads_; if (!(remv[nblock] & (1ULL << inblock))) { out_keep_inds[out_num_to_keep++] = i; unsigned long long *p = &host_mask[0] + i * col_blocks; for (size_t j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } // release the dev_mask, as it was only of temporary use sycl::free(dev_mask, devicemanager::GetCurrentQueue()); } } // namespace pointpillars
cpp
oneAPI-samples
data/projects/oneAPI-samples/AI-and-Analytics/End-to-end-Workloads/LidarObjectDetection-PointPillars/src/pointpillars/scatter.cpp
/* * Copyright 2018-2019 Autoware Foundation. All rights reserved. * Copyright (c) 2019-2021 Intel Corporation (oneAPI modifications) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "pointpillars/scatter.hpp" #include <sycl/sycl.hpp> #include <algorithm> #include "devicemanager/devicemanager.hpp" namespace pointpillars { void ScatterKernel(int *x_coors, int *y_coors, float *pfe_output, float *scattered_feature, const int max_num_pillars_, const int grid_x_size, const int grid_y_size, sycl::nd_item<3> item_ct1) { // Get pillar index and feature index from current group and local id int i_pillar = item_ct1.get_group(2); int i_feature = item_ct1.get_local_id(2); // is accessing the feature by // thread id, which forces feature // = thread nums // Read (x,y) indices in the sparse feature map of the corresponding pillar int x_ind = x_coors[i_pillar]; int y_ind = y_coors[i_pillar]; float feature = pfe_output[i_feature * max_num_pillars_ + i_pillar]; // Copy the i feature from pillar to the sparse feature map scattered_feature[i_feature * grid_y_size * grid_x_size + y_ind * grid_x_size + x_ind] = feature; } Scatter::Scatter(const int num_features, const int max_num_pillars, const int grid_x_size, const int grid_y_size) : num_features_(num_features), max_num_pillars_(max_num_pillars), grid_x_size_(grid_x_size), grid_y_size_(grid_y_size) {} void Scatter::DoScatter(const int pillar_count, int *x_coors, int *y_coors, float *pfe_output, float *scattered_feature) { // Launch the scatter kernel on each (n-pillar , m-feature) sycl::queue queue = devicemanager::GetCurrentQueue(); queue.submit([&](auto &h) { auto max_num_pillars_ct4 = max_num_pillars_; auto grid_x_size_ct5 = grid_x_size_; auto grid_y_size_ct6 = grid_y_size_; h.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, pillar_count) * sycl::range<3>(1, 1, num_features_), sycl::range<3>(1, 1, num_features_)), [=](sycl::nd_item<3> item_ct1) { ScatterKernel(x_coors, y_coors, pfe_output, scattered_feature, max_num_pillars_ct4, grid_x_size_ct5, grid_y_size_ct6, item_ct1); }); }); queue.wait(); } } // namespace pointpillars
cpp
oneAPI-samples
data/projects/oneAPI-samples/AI-and-Analytics/End-to-end-Workloads/LidarObjectDetection-PointPillars/src/pointpillars/postprocess.cpp
/* * Copyright 2018-2019 Autoware Foundation. All rights reserved. * Copyright (c) 2019-2021 Intel Corporation (oneAPI modifications) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <oneapi/dpl/algorithm> #include <oneapi/dpl/execution> #include <oneapi/dpl/iterator> #include <sycl/sycl.hpp> #include <algorithm> #include "pointpillars/postprocess.hpp" // the oneapi headers have to be included at first here! #include "devicemanager/devicemanager.hpp" namespace pointpillars { inline float Sigmoid(float x) { return 1.0f / (1.0f + sycl::exp(-x)); } // FilterKernel decodes the RegionProposalNetwork output and filters the generated detections by threshold // it also generates a box representation that will later be used during nms void FilterKernel(const float *box_preds, const float *cls_preds, const float *dir_preds, const int *anchor_mask, const float *dev_anchors_px, const float *dev_anchors_py, const float *dev_anchors_pz, const float *dev_anchors_dx, const float *dev_anchors_dy, const float *dev_anchors_dz, const float *dev_anchors_ro, float *filtered_box, float *filtered_score, float *multiclass_score, int *filtered_dir, int *dev_filtered_class_id, float *box_for_nms, int *filter_count, const float float_min, const float float_max, const float score_threshold, const size_t num_box_corners, const size_t num_output_box_feature, const size_t num_cls, const int index) { float class_score_cache[20]; // Asume maximum class size of 20 to avoid runtime allocations int tid = index; // Decode the class probabilities using the Sigmoid function float score = 0.f; int class_id = 0; for (size_t i = 0; i < num_cls; i++) { class_score_cache[i] = Sigmoid(cls_preds[tid * num_cls + i]); if (class_score_cache[i] > score) { score = class_score_cache[i]; class_id = i; } } // if there is data inside the anchor if (anchor_mask[tid] == 1 && score > score_threshold) { int counter = AtomicFetchAdd(filter_count, 1); float za = dev_anchors_pz[tid] + dev_anchors_dz[tid] / 2; // decode RPN output, the formulas are used according to the encoding used in the paper float diagonal = sycl::sqrt(dev_anchors_dx[tid] * dev_anchors_dx[tid] + dev_anchors_dy[tid] * dev_anchors_dy[tid]); float box_px = box_preds[tid * num_output_box_feature + 0] * diagonal + dev_anchors_px[tid]; float box_py = box_preds[tid * num_output_box_feature + 1] * diagonal + dev_anchors_py[tid]; float box_pz = box_preds[tid * num_output_box_feature + 2] * dev_anchors_dz[tid] + za; float box_dx = sycl::exp((float)(box_preds[tid * num_output_box_feature + 3])) * dev_anchors_dx[tid]; float box_dy = sycl::exp((float)(box_preds[tid * num_output_box_feature + 4])) * dev_anchors_dy[tid]; float box_dz = sycl::exp((float)(box_preds[tid * num_output_box_feature + 5])) * dev_anchors_dz[tid]; float box_ro = box_preds[tid * num_output_box_feature + 6] + dev_anchors_ro[tid]; box_pz = box_pz - box_dz / 2.f; // Store the detection x,y,z,l,w,h,theta coordinates filtered_box[counter * num_output_box_feature + 0] = box_px; filtered_box[counter * num_output_box_feature + 1] = box_py; filtered_box[counter * num_output_box_feature + 2] = box_pz; filtered_box[counter * num_output_box_feature + 3] = box_dx; filtered_box[counter * num_output_box_feature + 4] = box_dy; filtered_box[counter * num_output_box_feature + 5] = box_dz; filtered_box[counter * num_output_box_feature + 6] = box_ro; filtered_score[counter] = score; // Copy the class scores for (size_t i = 0; i < num_cls; i++) { multiclass_score[counter * num_cls + i] = class_score_cache[i]; } // Decode the direction class specified in SecondNet: Sparsely Embedded Convolutional Detection int direction_label; if (dir_preds[tid * 2 + 0] < dir_preds[tid * 2 + 1]) { direction_label = 1; } else { direction_label = 0; } filtered_dir[counter] = direction_label; dev_filtered_class_id[counter] = class_id; // convert normal box(normal boxes: x, y, z, w, l, h, r) to box(xmin, ymin, // xmax, ymax) for nms calculation // First: dx, dy -> box(x0y0, x0y1, x1y0, x1y1) float corners[NUM_3D_BOX_CORNERS_MACRO] = {float(-0.5f * box_dx), float(-0.5f * box_dy), float(-0.5f * box_dx), float(0.5f * box_dy), float(0.5f * box_dx), float(0.5f * box_dy), float(0.5f * box_dx), float(-0.5f * box_dy)}; // Second: Rotate, Offset and convert to point(xmin. ymin, xmax, ymax) float rotated_corners[NUM_3D_BOX_CORNERS_MACRO]; float offset_corners[NUM_3D_BOX_CORNERS_MACRO]; float sin_yaw = sycl::sin(box_ro); float cos_yaw = sycl::cos(box_ro); float xmin = float_max; float ymin = float_max; float xmax = float_min; float ymax = float_min; for (size_t i = 0; i < num_box_corners; i++) { rotated_corners[i * 2 + 0] = cos_yaw * corners[i * 2 + 0] - sin_yaw * corners[i * 2 + 1]; rotated_corners[i * 2 + 1] = sin_yaw * corners[i * 2 + 0] + cos_yaw * corners[i * 2 + 1]; offset_corners[i * 2 + 0] = rotated_corners[i * 2 + 0] + box_px; offset_corners[i * 2 + 1] = rotated_corners[i * 2 + 1] + box_py; xmin = sycl::fmin(xmin, offset_corners[i * 2 + 0]); ymin = sycl::fmin(ymin, offset_corners[i * 2 + 1]); xmax = sycl::fmax(xmin, offset_corners[i * 2 + 0]); ymax = sycl::fmax(ymax, offset_corners[i * 2 + 1]); } // Store the resulting box, box_for_nms(num_box, 4) box_for_nms[counter * num_box_corners + 0] = xmin; box_for_nms[counter * num_box_corners + 1] = ymin; box_for_nms[counter * num_box_corners + 2] = xmax; box_for_nms[counter * num_box_corners + 3] = ymax; } } // This Kernel uses a list of indices to sort the given input arrays. void SortBoxesByIndexKernel(float *filtered_box, int *filtered_dir, int *filtered_class_id, float *dev_multiclass_score, float *box_for_nms, int *indexes, int filter_count, float *sorted_filtered_boxes, int *sorted_filtered_dir, int *sorted_filtered_class_id, float *dev_sorted_multiclass_score, float *sorted_box_for_nms, const size_t num_box_corners, const size_t num_output_box_feature, const size_t num_cls, sycl::nd_item<3> item_ct1) { int tid = item_ct1.get_local_id(2) + item_ct1.get_group(2) * item_ct1.get_local_range().get(2); if (tid < filter_count) { int sort_index = indexes[tid]; sorted_filtered_boxes[tid * num_output_box_feature + 0] = filtered_box[sort_index * num_output_box_feature + 0]; sorted_filtered_boxes[tid * num_output_box_feature + 1] = filtered_box[sort_index * num_output_box_feature + 1]; sorted_filtered_boxes[tid * num_output_box_feature + 2] = filtered_box[sort_index * num_output_box_feature + 2]; sorted_filtered_boxes[tid * num_output_box_feature + 3] = filtered_box[sort_index * num_output_box_feature + 3]; sorted_filtered_boxes[tid * num_output_box_feature + 4] = filtered_box[sort_index * num_output_box_feature + 4]; sorted_filtered_boxes[tid * num_output_box_feature + 5] = filtered_box[sort_index * num_output_box_feature + 5]; sorted_filtered_boxes[tid * num_output_box_feature + 6] = filtered_box[sort_index * num_output_box_feature + 6]; for (size_t i = 0; i < num_cls; ++i) { dev_sorted_multiclass_score[tid * num_cls + i] = dev_multiclass_score[sort_index * num_cls + i]; } sorted_filtered_dir[tid] = filtered_dir[sort_index]; sorted_filtered_class_id[tid] = filtered_class_id[sort_index]; sorted_box_for_nms[tid * num_box_corners + 0] = box_for_nms[sort_index * num_box_corners + 0]; sorted_box_for_nms[tid * num_box_corners + 1] = box_for_nms[sort_index * num_box_corners + 1]; sorted_box_for_nms[tid * num_box_corners + 2] = box_for_nms[sort_index * num_box_corners + 2]; sorted_box_for_nms[tid * num_box_corners + 3] = box_for_nms[sort_index * num_box_corners + 3]; } } PostProcess::PostProcess(const float float_min, const float float_max, const size_t num_anchor_x_inds, const size_t num_anchor_y_inds, const size_t num_anchor_r_inds, const size_t num_cls, const float score_threshold, const size_t num_threads, const float nms_overlap_threshold, const size_t num_box_corners, const size_t num_output_box_feature) : float_min_(float_min), float_max_(float_max), num_anchor_x_inds_(num_anchor_x_inds), num_anchor_y_inds_(num_anchor_y_inds), num_anchor_r_inds_(num_anchor_r_inds), num_cls_(num_cls), score_threshold_(score_threshold), num_threads_(num_threads), num_box_corners_(num_box_corners), num_output_box_feature_(num_output_box_feature) { nms_ptr_ = std::make_unique<NMS>(num_threads, num_box_corners, nms_overlap_threshold); } void PostProcess::DoPostProcess(const float *rpn_box_output, const float *rpn_cls_output, const float *rpn_dir_output, int *dev_anchor_mask, const float *dev_anchors_px, const float *dev_anchors_py, const float *dev_anchors_pz, const float *dev_anchors_dx, const float *dev_anchors_dy, const float *dev_anchors_dz, const float *dev_anchors_ro, float *dev_multiclass_score, float *dev_filtered_box, float *dev_filtered_score, int *dev_filtered_dir, int *dev_filtered_class_id, float *dev_box_for_nms, int *dev_filter_count, std::vector<ObjectDetection> &detections) { // filter objects by applying a class confidence threshold // Calculate number of boxes in the feature map const unsigned int length = num_anchor_x_inds_ * num_cls_ * num_anchor_r_inds_ * num_anchor_y_inds_; // Decode the output of the RegionProposalNetwork and store all the boxes with score above the threshold sycl::queue queue = devicemanager::GetCurrentQueue(); queue.submit([&](auto &h) { auto float_min_ct18 = float_min_; auto float_max_ct19 = float_max_; auto score_threshold_ct20 = score_threshold_; auto num_box_corners_ct21 = num_box_corners_; auto num_output_box_feature_ct22 = num_output_box_feature_; auto num_cls_ct23 = num_cls_; h.parallel_for(sycl::range<1>{length}, [=](sycl::item<1> it) { const int index = it[0]; FilterKernel(rpn_box_output, rpn_cls_output, rpn_dir_output, dev_anchor_mask, dev_anchors_px, dev_anchors_py, dev_anchors_pz, dev_anchors_dx, dev_anchors_dy, dev_anchors_dz, dev_anchors_ro, dev_filtered_box, dev_filtered_score, dev_multiclass_score, dev_filtered_dir, dev_filtered_class_id, dev_box_for_nms, dev_filter_count, float_min_ct18, float_max_ct19, score_threshold_ct20, num_box_corners_ct21, num_output_box_feature_ct22, num_cls_ct23, index); }); }); queue.wait(); int host_filter_count[1]; queue.memcpy(host_filter_count, dev_filter_count, sizeof(int)).wait(); if (host_filter_count[0] == 0) { return; } // Create variables to hold the sorted box arrays int *dev_indexes; float *dev_sorted_filtered_box, *dev_sorted_box_for_nms, *dev_sorted_multiclass_score; int *dev_sorted_filtered_dir, *dev_sorted_filtered_class_id; dev_indexes = sycl::malloc_device<int>(host_filter_count[0], queue); dev_sorted_filtered_box = sycl::malloc_device<float>(num_output_box_feature_ * host_filter_count[0], queue); dev_sorted_filtered_dir = sycl::malloc_device<int>(host_filter_count[0], queue); dev_sorted_filtered_class_id = sycl::malloc_device<int>(host_filter_count[0], queue); dev_sorted_box_for_nms = sycl::malloc_device<float>(num_box_corners_ * host_filter_count[0], queue); dev_sorted_multiclass_score = sycl::malloc_device<float>(num_cls_ * host_filter_count[0], queue); // Generate an array to hold the box indexes sycl::range<1> num_items{static_cast<std::size_t>(host_filter_count[0])}; auto e = queue.parallel_for(num_items, [=](auto i) { dev_indexes[i] = i; }); e.wait(); // Sort the box indexes according to the boxes score auto first = oneapi::dpl::make_zip_iterator(dev_filtered_score, dev_indexes); auto last = first + std::distance(dev_filtered_score, dev_filtered_score + size_t(host_filter_count[0])); std::sort(oneapi::dpl::execution::make_device_policy(queue), first, last, [](auto lhs, auto rhs) { return std::get<0>(lhs) > std::get<0>(rhs); }); const int num_blocks = DIVUP(host_filter_count[0], num_threads_); // Use the sorted indexes to sort the boxes and all other decoded information from the RPN queue.submit([&](auto &h) { auto host_filter_count_ct6 = host_filter_count[0]; auto num_box_corners_ct12 = num_box_corners_; auto num_output_box_feature_ct13 = num_output_box_feature_; auto num_cls_ct14 = num_cls_; h.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, num_threads_), sycl::range<3>(1, 1, num_threads_)), [=](sycl::nd_item<3> item_ct1) { SortBoxesByIndexKernel(dev_filtered_box, dev_filtered_dir, dev_filtered_class_id, dev_multiclass_score, dev_box_for_nms, dev_indexes, host_filter_count_ct6, dev_sorted_filtered_box, dev_sorted_filtered_dir, dev_sorted_filtered_class_id, dev_sorted_multiclass_score, dev_sorted_box_for_nms, num_box_corners_ct12, num_output_box_feature_ct13, num_cls_ct14, item_ct1); }); }); queue.wait(); // Apply NMS to the sorted boxes int keep_inds[host_filter_count[0]]; size_t out_num_objects = 0; nms_ptr_->DoNMS(host_filter_count[0], dev_sorted_box_for_nms, keep_inds, out_num_objects); // Create arrays to hold the detections in host memory float host_filtered_box[host_filter_count[0] * num_output_box_feature_]; float host_multiclass_score[host_filter_count[0] * num_cls_]; float host_filtered_score[host_filter_count[0]]; int host_filtered_dir[host_filter_count[0]]; int host_filtered_class_id[host_filter_count[0]]; // Copy memory to host queue.memcpy(host_filtered_box, dev_sorted_filtered_box, num_output_box_feature_ * host_filter_count[0] * sizeof(float)); queue.memcpy(host_multiclass_score, dev_sorted_multiclass_score, num_cls_ * host_filter_count[0] * sizeof(float)); queue.memcpy(host_filtered_class_id, dev_sorted_filtered_class_id, host_filter_count[0] * sizeof(int)); queue.memcpy(host_filtered_dir, dev_sorted_filtered_dir, host_filter_count[0] * sizeof(int)); queue.memcpy(host_filtered_score, dev_filtered_score, host_filter_count[0] * sizeof(float)); queue.wait(); // Convert the NMS filtered boxes defined by keep_inds to an array of ObjectDetection for (size_t i = 0; i < out_num_objects; i++) { ObjectDetection detection; detection.x = host_filtered_box[keep_inds[i] * num_output_box_feature_ + 0]; detection.y = host_filtered_box[keep_inds[i] * num_output_box_feature_ + 1]; detection.z = host_filtered_box[keep_inds[i] * num_output_box_feature_ + 2]; detection.length = host_filtered_box[keep_inds[i] * num_output_box_feature_ + 3]; detection.width = host_filtered_box[keep_inds[i] * num_output_box_feature_ + 4]; detection.height = host_filtered_box[keep_inds[i] * num_output_box_feature_ + 5]; detection.class_id = static_cast<float>(host_filtered_class_id[keep_inds[i]]); detection.likelihood = host_filtered_score[keep_inds[i]]; // Apply the direction label found by the direction classifier if (host_filtered_dir[keep_inds[i]] == 0) { detection.yaw = host_filtered_box[keep_inds[i] * num_output_box_feature_ + 6] + M_PI; } else { detection.yaw = host_filtered_box[keep_inds[i] * num_output_box_feature_ + 6]; } for (size_t k = 0; k < num_cls_; k++) { detection.class_probabilities.push_back(host_multiclass_score[keep_inds[i] * num_cls_ + k]); } detections.push_back(detection); } sycl::free(dev_indexes, queue); sycl::free(dev_sorted_filtered_box, queue); sycl::free(dev_sorted_filtered_dir, queue); sycl::free(dev_sorted_box_for_nms, queue); } } // namespace pointpillars
cpp
oneAPI-samples
data/projects/oneAPI-samples/AI-and-Analytics/End-to-end-Workloads/LidarObjectDetection-PointPillars/src/pointpillars/preprocess.cpp
/* * Copyright 2018-2019 Autoware Foundation. All rights reserved. * Copyright (c) 2019-2021 Intel Corporation (oneAPI modifications) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "pointpillars/preprocess.hpp" #include <sycl/sycl.hpp> #include <algorithm> #include <iostream> #include "devicemanager/devicemanager.hpp" #include "pointpillars/common.hpp" namespace pointpillars { // This kernel is called on each point of the Point Cloud. It calculates the coordinates of the point in the 2D pillar // map and adds it to the corresponding pillar. void MakePillarHistoKernel(const float *dev_points, float *dev_pillar_x_in_coors, float *dev_pillar_y_in_coors, float *dev_pillar_z_in_coors, float *dev_pillar_i_in_coors, int *pillar_count_histo, // holds the point count on the cell const int num_points, const int max_points_per_pillar, const int grid_x_size, const int grid_y_size, const int grid_z_size, const float min_x_range, const float min_y_range, const float min_z_range, const float pillar_x_size, const float pillar_y_size, const float pillar_z_size, sycl::nd_item<3> item_ct1) { int point_index = item_ct1.get_local_id(2) + item_ct1.get_group(2) * item_ct1.get_local_range().get(2); if (point_index >= num_points) { return; } // Indexes in the pillar map int xIndex = sycl::floor((float)((dev_points[point_index * 4 + 0] - min_x_range) / pillar_x_size)); int yIndex = sycl::floor((float)((dev_points[point_index * 4 + 1] - min_y_range) / pillar_y_size)); int zIndex = sycl::floor((float)((dev_points[point_index * 4 + 2] - min_z_range) / pillar_z_size)); // Check if it is within grid range if (xIndex >= 0 && xIndex < grid_x_size && yIndex >= 0 && yIndex < grid_y_size && zIndex >= 0 && zIndex < grid_z_size) { // increase the point count int count = AtomicFetchAdd(&pillar_count_histo[yIndex * grid_x_size + xIndex], 1); if (count < max_points_per_pillar) { // pillar index int pillarIndex = yIndex * grid_x_size * max_points_per_pillar + xIndex * max_points_per_pillar; // pointPillarIndex is the m-point in the n-pillar int pointPillarIndex = pillarIndex + count; // add point to pillar data dev_pillar_x_in_coors[pointPillarIndex] = dev_points[point_index * 4 + 0]; dev_pillar_y_in_coors[pointPillarIndex] = dev_points[point_index * 4 + 1]; dev_pillar_z_in_coors[pointPillarIndex] = dev_points[point_index * 4 + 2]; dev_pillar_i_in_coors[pointPillarIndex] = dev_points[point_index * 4 + 3]; } } } // This kernel is executed on a specific location in the pillar map. // It will test if the corresponding pillar has points. // In such case it will mark the pillar for use as input to the PillarFeatureExtraction // A pillar mask is also generated and can be used to optimize the decoding. void MakePillarIndexKernel(int *dev_pillar_count_histo, int *dev_counter, int *dev_pillar_count, int *dev_x_coors, int *dev_y_coors, float *dev_x_coors_for_sub, float *dev_y_coors_for_sub, float *dev_num_points_per_pillar, int *dev_sparse_pillar_map, const int max_pillars, const int max_points_per_pillar, const int grid_x_size, const float min_x_range, const float min_y_range, const float pillar_x_size, const float pillar_y_size, const int x, const int y) { int num_points_at_this_pillar = dev_pillar_count_histo[y * grid_x_size + x]; if (num_points_at_this_pillar == 0) { return; } int count = AtomicFetchAdd(dev_counter, 1); if (count < max_pillars) { AtomicFetchAdd(dev_pillar_count, 1); if (num_points_at_this_pillar >= max_points_per_pillar) { dev_num_points_per_pillar[count] = max_points_per_pillar; } else { dev_num_points_per_pillar[count] = num_points_at_this_pillar; } // grid coordinates of this pillar dev_x_coors[count] = x; dev_y_coors[count] = y; // metric position of this pillar dev_x_coors_for_sub[count] = x * pillar_x_size + 0.5f * pillar_x_size + min_x_range; dev_y_coors_for_sub[count] = y * pillar_y_size + 0.5f * pillar_y_size + min_y_range; // map of pillars with at least one point dev_sparse_pillar_map[y * grid_x_size + x] = 1; } } // This kernel generates the input feature map to the PillarFeatureExtraction network. // It takes the pillars that were marked for use and stores the first 4 features (x,y,z,i) in the input feature map. void MakePillarFeatureKernel(float *dev_pillar_x_in_coors, float *dev_pillar_y_in_coors, float *dev_pillar_z_in_coors, float *dev_pillar_i_in_coors, float *dev_pillar_x, float *dev_pillar_y, float *dev_pillar_z, float *dev_pillar_i, int *dev_x_coors, int *dev_y_coors, float *dev_num_points_per_pillar, const int max_points, const int grid_x_size, sycl::nd_item<3> item_ct1) { int ith_pillar = item_ct1.get_group(2); int num_points_at_this_pillar = dev_num_points_per_pillar[ith_pillar]; int ith_point = item_ct1.get_local_id(2); if (ith_point >= num_points_at_this_pillar) { return; } int x_ind = dev_x_coors[ith_pillar]; int y_ind = dev_y_coors[ith_pillar]; int pillar_ind = ith_pillar * max_points + ith_point; int coors_ind = y_ind * grid_x_size * max_points + x_ind * max_points + ith_point; dev_pillar_x[pillar_ind] = dev_pillar_x_in_coors[coors_ind]; dev_pillar_y[pillar_ind] = dev_pillar_y_in_coors[coors_ind]; dev_pillar_z[pillar_ind] = dev_pillar_z_in_coors[coors_ind]; dev_pillar_i[pillar_ind] = dev_pillar_i_in_coors[coors_ind]; } // This kernel takes the pillars that were marked for use and stores the features: (pillar_center_x, pillar_center_y, // pillar_mask) in the input feature map. void MakeExtraNetworkInputKernel(float *dev_x_coors_for_sub, float *dev_y_coors_for_sub, float *dev_num_points_per_pillar, float *dev_x_coors_for_sub_shaped, float *dev_y_coors_for_sub_shaped, float *dev_pillar_feature_mask, const int max_num_points_per_pillar, sycl::nd_item<3> item_ct1) { int ith_pillar = item_ct1.get_group(2); int ith_point = item_ct1.get_local_id(2); float x = dev_x_coors_for_sub[ith_pillar]; float y = dev_y_coors_for_sub[ith_pillar]; int num_points_for_a_pillar = dev_num_points_per_pillar[ith_pillar]; int ind = ith_pillar * max_num_points_per_pillar + ith_point; dev_x_coors_for_sub_shaped[ind] = x; dev_y_coors_for_sub_shaped[ind] = y; if (ith_point < num_points_for_a_pillar) { dev_pillar_feature_mask[ind] = 1.0f; } else { dev_pillar_feature_mask[ind] = 0.0f; } } PreProcess::PreProcess(const int max_num_pillars, const int max_points_per_pillar, const int grid_x_size, const int grid_y_size, const int grid_z_size, const float pillar_x_size, const float pillar_y_size, const float pillar_z_size, const float min_x_range, const float min_y_range, const float min_z_range) : max_num_pillars_(max_num_pillars), max_num_points_per_pillar_(max_points_per_pillar), grid_x_size_(grid_x_size), grid_y_size_(grid_y_size), grid_z_size_(grid_z_size), pillar_x_size_(pillar_x_size), pillar_y_size_(pillar_y_size), pillar_z_size_(pillar_z_size), min_x_range_(min_x_range), min_y_range_(min_y_range), min_z_range_(min_z_range) { sycl::queue queue = devicemanager::GetCurrentQueue(); // allocate memory dev_pillar_x_in_coors_ = sycl::malloc_device<float>(grid_y_size_ * grid_x_size_ * max_num_points_per_pillar_, queue); dev_pillar_y_in_coors_ = sycl::malloc_device<float>(grid_y_size_ * grid_x_size_ * max_num_points_per_pillar_, queue); dev_pillar_z_in_coors_ = sycl::malloc_device<float>(grid_y_size_ * grid_x_size_ * max_num_points_per_pillar_, queue); dev_pillar_i_in_coors_ = sycl::malloc_device<float>(grid_y_size_ * grid_x_size_ * max_num_points_per_pillar_, queue); dev_pillar_count_histo_ = sycl::malloc_device<int>(grid_y_size_ * grid_x_size_, queue); dev_counter_ = sycl::malloc_device<int>(1, queue); dev_pillar_count_ = sycl::malloc_device<int>(1, queue); dev_x_coors_for_sub_ = sycl::malloc_device<float>(max_num_pillars_, queue); dev_y_coors_for_sub_ = sycl::malloc_device<float>(max_num_pillars_, queue); } PreProcess::~PreProcess() { sycl::queue queue = devicemanager::GetCurrentQueue(); sycl::free(dev_pillar_x_in_coors_, queue); sycl::free(dev_pillar_y_in_coors_, queue); sycl::free(dev_pillar_z_in_coors_, queue); sycl::free(dev_pillar_i_in_coors_, queue); sycl::free(dev_pillar_count_histo_, queue); sycl::free(dev_counter_, queue); sycl::free(dev_pillar_count_, queue); sycl::free(dev_x_coors_for_sub_, queue); sycl::free(dev_y_coors_for_sub_, queue); } void PreProcess::DoPreProcess(const float *dev_points, const int in_num_points, int *dev_x_coors, int *dev_y_coors, float *dev_num_points_per_pillar, float *dev_pillar_x, float *dev_pillar_y, float *dev_pillar_z, float *dev_pillar_i, float *dev_x_coors_for_sub_shaped, float *dev_y_coors_for_sub_shaped, float *dev_pillar_feature_mask, int *dev_sparse_pillar_map, int *host_pillar_count) { // Set Pillar input features to 0 sycl::queue queue = devicemanager::GetCurrentQueue(); // TODO: test queue.fill vs queue.memset queue.memset(dev_pillar_x_in_coors_, 0, grid_y_size_ * grid_x_size_ * max_num_points_per_pillar_ * sizeof(float)); queue.memset(dev_pillar_y_in_coors_, 0, grid_y_size_ * grid_x_size_ * max_num_points_per_pillar_ * sizeof(float)); queue.memset(dev_pillar_z_in_coors_, 0, grid_y_size_ * grid_x_size_ * max_num_points_per_pillar_ * sizeof(float)); queue.memset(dev_pillar_i_in_coors_, 0, grid_y_size_ * grid_x_size_ * max_num_points_per_pillar_ * sizeof(float)); queue.memset(dev_pillar_count_histo_, 0, grid_y_size_ * grid_x_size_ * sizeof(int)); queue.memset(dev_counter_, 0, sizeof(int)); queue.memset(dev_pillar_count_, 0, sizeof(int)); queue.wait(); // Use the point cloud data to generate the pillars // This will create create assign the point to the corresponding pillar in the grid. A maximum number of points can be // assigned to a single pillar. int num_block = DIVUP(in_num_points, 256); queue.submit([&](auto &h) { auto dev_pillar_x_in_coors_ct1 = dev_pillar_x_in_coors_; auto dev_pillar_y_in_coors_ct2 = dev_pillar_y_in_coors_; auto dev_pillar_z_in_coors_ct3 = dev_pillar_z_in_coors_; auto dev_pillar_i_in_coors_ct4 = dev_pillar_i_in_coors_; auto dev_pillar_count_histo_ct5 = dev_pillar_count_histo_; auto max_num_points_per_pillar_ct7 = max_num_points_per_pillar_; auto grid_x_size_ct8 = grid_x_size_; auto grid_y_size_ct9 = grid_y_size_; auto grid_z_size_ct10 = grid_z_size_; auto min_x_range_ct11 = min_x_range_; auto min_y_range_ct12 = min_y_range_; auto min_z_range_ct13 = min_z_range_; auto pillar_x_size_ct14 = pillar_x_size_; auto pillar_y_size_ct15 = pillar_y_size_; auto pillar_z_size_ct16 = pillar_z_size_; h.parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, num_block) * sycl::range<3>(1, 1, 256), sycl::range<3>(1, 1, 256)), [=](sycl::nd_item<3> item_ct1) { MakePillarHistoKernel(dev_points, dev_pillar_x_in_coors_ct1, dev_pillar_y_in_coors_ct2, dev_pillar_z_in_coors_ct3, dev_pillar_i_in_coors_ct4, dev_pillar_count_histo_ct5, in_num_points, max_num_points_per_pillar_ct7, grid_x_size_ct8, grid_y_size_ct9, grid_z_size_ct10, min_x_range_ct11, min_y_range_ct12, min_z_range_ct13, pillar_x_size_ct14, pillar_y_size_ct15, pillar_z_size_ct16, item_ct1); }); }); queue.wait(); // Check which pillars contain points and mark them for use during feature extraction. queue.submit([&](auto &h) { auto dev_pillar_count_histo_ct0 = dev_pillar_count_histo_; auto dev_counter_ct1 = dev_counter_; auto dev_pillar_count_ct2 = dev_pillar_count_; auto dev_x_coors_for_sub_ct5 = dev_x_coors_for_sub_; auto dev_y_coors_for_sub_ct6 = dev_y_coors_for_sub_; auto max_num_pillars_ct9 = max_num_pillars_; auto max_num_points_per_pillar_ct10 = max_num_points_per_pillar_; auto grid_x_size_ct11 = grid_x_size_; auto min_x_range_ct12 = min_x_range_; auto min_y_range_ct13 = min_y_range_; auto pillar_x_size_ct14 = pillar_x_size_; auto pillar_y_size_ct15 = pillar_y_size_; h.parallel_for(sycl::range<2>{static_cast<unsigned long>(grid_x_size_), static_cast<unsigned long>(grid_y_size_)}, [=](sycl::id<2> it) { const int x = it[0]; const int y = it[1]; MakePillarIndexKernel(dev_pillar_count_histo_ct0, dev_counter_ct1, dev_pillar_count_ct2, dev_x_coors, dev_y_coors, dev_x_coors_for_sub_ct5, dev_y_coors_for_sub_ct6, dev_num_points_per_pillar, dev_sparse_pillar_map, max_num_pillars_ct9, max_num_points_per_pillar_ct10, grid_x_size_ct11, min_x_range_ct12, min_y_range_ct13, pillar_x_size_ct14, pillar_y_size_ct15, x, y); }); }); queue.wait(); queue.memcpy(host_pillar_count, dev_pillar_count_, sizeof(int)).wait(); // Generate the first 4 pillar features in the input feature map. // This is a list of points up to max_num_points_per_pillar queue.submit([&](auto &h) { auto dev_pillar_x_in_coors_ct0 = dev_pillar_x_in_coors_; auto dev_pillar_y_in_coors_ct1 = dev_pillar_y_in_coors_; auto dev_pillar_z_in_coors_ct2 = dev_pillar_z_in_coors_; auto dev_pillar_i_in_coors_ct3 = dev_pillar_i_in_coors_; auto max_num_points_per_pillar_ct11 = max_num_points_per_pillar_; auto grid_x_size_ct12 = grid_x_size_; h.parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, host_pillar_count[0]) * sycl::range<3>(1, 1, max_num_points_per_pillar_), sycl::range<3>(1, 1, max_num_points_per_pillar_)), [=](sycl::nd_item<3> item_ct1) { MakePillarFeatureKernel(dev_pillar_x_in_coors_ct0, dev_pillar_y_in_coors_ct1, dev_pillar_z_in_coors_ct2, dev_pillar_i_in_coors_ct3, dev_pillar_x, dev_pillar_y, dev_pillar_z, dev_pillar_i, dev_x_coors, dev_y_coors, dev_num_points_per_pillar, max_num_points_per_pillar_ct11, grid_x_size_ct12, item_ct1); }); }); queue.wait(); // Generate the next features in the pillar input feature map: (pillar_center_x, pillar_center_y, pillar_mask) queue.submit([&](auto &h) { auto dev_x_coors_for_sub_ct0 = dev_x_coors_for_sub_; auto dev_y_coors_for_sub_ct1 = dev_y_coors_for_sub_; auto max_num_points_per_pillar_ct6 = max_num_points_per_pillar_; h.parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, max_num_pillars_) * sycl::range<3>(1, 1, max_num_points_per_pillar_), sycl::range<3>(1, 1, max_num_points_per_pillar_)), [=](sycl::nd_item<3> item_ct1) { MakeExtraNetworkInputKernel(dev_x_coors_for_sub_ct0, dev_y_coors_for_sub_ct1, dev_num_points_per_pillar, dev_x_coors_for_sub_shaped, dev_y_coors_for_sub_shaped, dev_pillar_feature_mask, max_num_points_per_pillar_ct6, item_ct1); }); }); queue.wait(); } } // namespace pointpillars
cpp
oneAPI-samples
data/projects/oneAPI-samples/AI-and-Analytics/End-to-end-Workloads/LidarObjectDetection-PointPillars/src/pointpillars/pointpillars.cpp
/* * Copyright 2018-2019 Autoware Foundation. All rights reserved. * Copyright (c) 2019-2021 Intel Corporation (oneAPI modifications) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "pointpillars/pointpillars.hpp" #include <sys/stat.h> #include <algorithm> #include <cmath> #include <iostream> #include <limits> #include <thread> #include "devicemanager/devicemanager.hpp" #include <sycl/backend/opencl.hpp> namespace pointpillars { PointPillars::PointPillars(const float score_threshold, const float nms_threshold, const PointPillarsConfig &config) : config_(config), score_threshold_(score_threshold), nms_overlap_threshold_(nms_threshold), pfe_model_file_(config.pfe_model_file), rpn_model_file_(config.rpn_model_file), max_num_pillars_(config.max_num_pillars), max_num_points_per_pillar_(config.max_num_points_per_pillar), pfe_output_size_(config.max_num_pillars * config.pillar_features), grid_x_size_(config.grid_x_size), grid_y_size_(config.grid_y_size), grid_z_size_(config.grid_z_size), rpn_input_size_(config.pillar_features * config.grid_x_size * config.grid_y_size), num_cls_(config.num_classes), num_anchor_x_inds_(config.grid_x_size * config.rpn_scale), num_anchor_y_inds_(config.grid_y_size * config.rpn_scale), num_anchor_r_inds_(2), num_anchor_(num_anchor_x_inds_ * num_anchor_y_inds_ * num_anchor_r_inds_ * num_cls_), rpn_box_output_size_(num_anchor_ * 7), // feature score rpn_cls_output_size_(num_anchor_ * num_cls_), // classification score rpn_dir_output_size_(num_anchor_ * 2), // orientation score pillar_x_size_(config.pillar_x_size), pillar_y_size_(config.pillar_y_size), pillar_z_size_(config.pillar_z_size), min_x_range_(config.min_x_range), min_y_range_(config.min_y_range), min_z_range_(config.min_z_range), max_x_range_(config.max_x_range), max_y_range_(config.max_y_range), max_z_range_(config.max_x_range), batch_size_(1), num_features_(64), // number of pillar features num_threads_(64), num_box_corners_(4), num_output_box_feature_(7) { AnchorGridConfig anchor_grid_config; anchor_grid_config.min_x_range = config.min_x_range; anchor_grid_config.max_x_range = config.max_x_range; anchor_grid_config.min_y_range = config.min_y_range; anchor_grid_config.max_y_range = config.max_y_range; anchor_grid_config.min_z_range = config.min_z_range; anchor_grid_config.max_z_range = config.max_z_range; anchor_grid_config.x_stride = config.x_stride; anchor_grid_config.y_stride = config.y_stride; anchor_grid_config.anchors = config.anchors; anchor_grid_config.rotations = {0.f, M_PI_2}; anchor_grid_ptr_ = std::make_unique<AnchorGrid>(anchor_grid_config); InitComponents(); DeviceMemoryMalloc(); SetupPfeNetwork(); SetupRpnNetwork(true); } PointPillars::~PointPillars() { // Upon destruction clear all SYCL memory sycl::queue queue = devicemanager::GetCurrentQueue(); sycl::free(dev_x_coors_, queue); sycl::free(dev_y_coors_, queue); sycl::free(dev_num_points_per_pillar_, queue); sycl::free(dev_sparse_pillar_map_, queue); sycl::free(dev_pillar_x_, queue); sycl::free(dev_pillar_y_, queue); sycl::free(dev_pillar_z_, queue); sycl::free(dev_pillar_i_, queue); sycl::free(dev_x_coors_for_sub_shaped_, queue); sycl::free(dev_y_coors_for_sub_shaped_, queue); sycl::free(dev_pillar_feature_mask_, queue); sycl::free(dev_cumsum_workspace_, queue); sycl::free(dev_anchor_mask_, queue); sycl::free(dev_scattered_feature_, queue); sycl::free(dev_filtered_box_, queue); sycl::free(dev_filtered_score_, queue); sycl::free(dev_filtered_dir_, queue); sycl::free(dev_filtered_class_id_, queue); sycl::free(dev_box_for_nms_, queue); sycl::free(dev_filter_count_, queue); } void PointPillars::InitComponents() { // Setup anchor grid anchor_grid_ptr_->GenerateAnchors(); // Setup preprocessing preprocess_points_ptr_ = std::make_unique<PreProcess>(max_num_pillars_, max_num_points_per_pillar_, grid_x_size_, grid_y_size_, grid_z_size_, pillar_x_size_, pillar_y_size_, pillar_z_size_, min_x_range_, min_y_range_, min_z_range_); // Setup scatter scatter_ptr_ = std::make_unique<Scatter>(num_features_, max_num_pillars_, grid_x_size_, grid_y_size_); const float float_min = std::numeric_limits<float>::lowest(); const float float_max = std::numeric_limits<float>::max(); // Setup postprocessing postprocess_ptr_ = std::make_unique<PostProcess>(float_min, float_max, num_anchor_x_inds_, num_anchor_y_inds_, num_anchor_r_inds_, num_cls_, score_threshold_, num_threads_, nms_overlap_threshold_, num_box_corners_, num_output_box_feature_); } void PointPillars::SetupPfeNetwork() { std::size_t num_threads = 1; std::string device = "CPU"; // If the chosen execution device is GPU, also use this for OpenVINO if (devicemanager::GetCurrentDevice().is_gpu()) { device = "GPU"; } // Enable model caching and read the model ov::Core core; core.set_property(ov::cache_dir("pointpillars_cache")); auto model = core.read_model(pfe_model_file_); // Configure input pre-processing ov::preprocess::PrePostProcessor ppp(model); for (auto input : model->inputs()) { auto& input_info = ppp.input(input.get_any_name()); ov::Layout layout = "NCHW"; if (input.get_any_name() == "num_points_per_pillar") { layout = "NC"; } input_info.tensor() .set_element_type(ov::element::f32) .set_layout(layout); input_info.model().set_layout(layout); } model = ppp.build(); // Setup device configuration and load the network onto the execution device if (device == "CPU") { pfe_exe_network_ = core.compile_model(model, device, ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)); } else { assert(device == "GPU"); auto queue = sycl::get_native<sycl::backend::opencl>(devicemanager::GetCurrentQueue()); auto remote_context = ov::intel_gpu::ocl::ClContext(core, queue); pfe_exe_network_ = core.compile_model(model, remote_context, ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)); } // Create the inference request const auto if_req_t1 = std::chrono::high_resolution_clock::now(); pfe_infer_request_ = pfe_exe_network_.create_infer_request(); const auto if_req_t2 = std::chrono::high_resolution_clock::now(); std::cout << " PFE InferRequest create " << std::chrono::duration_cast<std::chrono::milliseconds>(if_req_t2 - if_req_t1).count() << "ms\n"; // create map of network inputs to memory objects pfe_input_map_.insert({"pillar_x", dev_pillar_x_}); pfe_input_map_.insert({"pillar_y", dev_pillar_y_}); pfe_input_map_.insert({"pillar_z", dev_pillar_z_}); pfe_input_map_.insert({"pillar_i", dev_pillar_i_}); pfe_input_map_.insert({"num_points_per_pillar", dev_num_points_per_pillar_}); pfe_input_map_.insert({"x_sub_shaped", dev_x_coors_for_sub_shaped_}); pfe_input_map_.insert({"y_sub_shaped", dev_y_coors_for_sub_shaped_}); pfe_input_map_.insert({"mask", dev_pillar_feature_mask_}); if (devicemanager::GetCurrentDevice().is_cpu()) { for (auto &input : pfe_exe_network_.inputs()) { auto input_tensor = ov::Tensor(input.get_element_type(), input.get_shape(), pfe_input_map_[input.get_any_name()]); pfe_input_tensor_map_.insert({input.get_any_name(), input_tensor}); } auto output = pfe_exe_network_.output(); pfe_output_tensor_ = ov::Tensor(output.get_element_type(), output.get_shape(), pfe_output_); } else { auto remote_context = pfe_exe_network_.get_context() .as<ov::intel_gpu::ocl::ClContext>(); for (auto &input : pfe_exe_network_.inputs()) { ov::RemoteTensor input_tensor = remote_context.create_tensor(input.get_element_type(), input.get_shape(), pfe_input_map_[input.get_any_name()]); pfe_input_tensor_map_.insert({input.get_any_name(), input_tensor}); } auto output = pfe_exe_network_.output(); pfe_output_tensor_ = remote_context.create_tensor( output.get_element_type(), output.get_shape(), pfe_output_); } } void PointPillars::SetupRpnNetwork(bool resize_input) { std::size_t num_threads = 1; std::string device = "CPU"; // if the chosen exceution device is GPU, also use this for OpenVINO if (devicemanager::GetCurrentDevice().is_gpu()) { device = "GPU"; } // Enable model caching and read the model ov::Core core; core.set_property(ov::cache_dir("pointpillars_cache")); auto model = core.read_model(rpn_model_file_); // Configure input pre-processing ov::preprocess::PrePostProcessor ppp(model); auto& input = ppp.input(); input.tensor() .set_element_type(ov::element::f32) .set_layout("NCHW"); input.model().set_layout("NCHW"); if (resize_input) { model->reshape({1, config_.pillar_features, config_.grid_y_size, config_.grid_x_size}); } model = ppp.build(); // Setup device configuration and load the network onto the execution device if (device == "CPU") { rpn_exe_network_ = core.compile_model(model, device, ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)); } else { assert(device == "GPU"); auto queue = sycl::get_native<sycl::backend::opencl>(devicemanager::GetCurrentQueue()); auto remote_context = ov::intel_gpu::ocl::ClContext(core, queue); rpn_exe_network_ = core.compile_model(model, remote_context, ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)); } // Create the inference request const auto if_req_t1 = std::chrono::high_resolution_clock::now(); rpn_infer_request_ = rpn_exe_network_.create_infer_request(); const auto if_req_t2 = std::chrono::high_resolution_clock::now(); std::cout << " RPN InferRequest create " << std::chrono::duration_cast<std::chrono::milliseconds>(if_req_t2 - if_req_t1).count() << "ms\n"; if (devicemanager::GetCurrentDevice().is_cpu()) { auto input_info = rpn_exe_network_.input(); scattered_feature_tensor_ = ov::Tensor(input_info.get_element_type(), input_info.get_shape(), dev_scattered_feature_); int i = 0; float* outputs[] = {rpn_1_output_, rpn_2_output_, rpn_3_output_}; for (auto& output : rpn_exe_network_.outputs()) { rpn_output_tensors_[i] = ov::Tensor(output.get_element_type(), output.get_shape(), outputs[i]); i++; } } else { auto remote_context = rpn_exe_network_.get_context() .as<ov::intel_gpu::ocl::ClContext>(); auto input_info = rpn_exe_network_.input(); scattered_feature_tensor_ = remote_context.create_tensor(input_info.get_element_type(), input_info.get_shape(), dev_scattered_feature_); int i = 0; float* outputs[] = {rpn_1_output_, rpn_2_output_, rpn_3_output_}; for (auto& output : rpn_exe_network_.outputs()) { rpn_output_tensors_[i] = remote_context.create_tensor(output.get_element_type(), output.get_shape(), outputs[i]); i++; } } } void PointPillars::DeviceMemoryMalloc() { sycl::queue queue = devicemanager::GetCurrentQueue(); // Allocate all device memory vector dev_x_coors_ = sycl::malloc_device<int>(max_num_pillars_, queue); dev_y_coors_ = sycl::malloc_device<int>(max_num_pillars_, queue); dev_num_points_per_pillar_ = sycl::malloc_shared<float>(max_num_pillars_, queue); dev_sparse_pillar_map_ = sycl::malloc_device<int>(grid_y_size_ * grid_x_size_, queue); dev_pillar_x_ = sycl::malloc_shared<float>(max_num_pillars_ * max_num_points_per_pillar_, queue); dev_pillar_y_ = sycl::malloc_shared<float>(max_num_pillars_ * max_num_points_per_pillar_, queue); dev_pillar_z_ = sycl::malloc_shared<float>(max_num_pillars_ * max_num_points_per_pillar_, queue); dev_pillar_i_ = sycl::malloc_shared<float>(max_num_pillars_ * max_num_points_per_pillar_, queue); dev_x_coors_for_sub_shaped_ = sycl::malloc_shared<float>(max_num_pillars_ * max_num_points_per_pillar_, queue); dev_y_coors_for_sub_shaped_ = sycl::malloc_shared<float>(max_num_pillars_ * max_num_points_per_pillar_, queue); dev_pillar_feature_mask_ = sycl::malloc_shared<float>(max_num_pillars_ * max_num_points_per_pillar_, queue); // cumsum kernel dev_cumsum_workspace_ = sycl::malloc_device<int>(grid_y_size_ * grid_x_size_, queue); // for make anchor mask kernel dev_anchor_mask_ = sycl::malloc_device<int>(num_anchor_, queue); // for scatter kernel dev_scattered_feature_ = sycl::malloc_device<float>(num_features_ * grid_y_size_ * grid_x_size_, queue); // for filter dev_filtered_box_ = sycl::malloc_device<float>(num_anchor_ * num_output_box_feature_, queue); dev_filtered_score_ = sycl::malloc_device<float>(num_anchor_, queue); dev_multiclass_score_ = sycl::malloc_device<float>(num_anchor_ * num_cls_, queue); dev_filtered_dir_ = sycl::malloc_device<int>(num_anchor_, queue); dev_filtered_class_id_ = sycl::malloc_device<int>(num_anchor_, queue); dev_box_for_nms_ = sycl::malloc_device<float>(num_anchor_ * num_box_corners_, queue); dev_filter_count_ = sycl::malloc_device<int>(sizeof(int), queue); // CNN outputs pfe_output_ = sycl::malloc_device<float>(pfe_output_size_, queue); rpn_1_output_ = sycl::malloc_device<float>(rpn_box_output_size_, queue); rpn_2_output_ = sycl::malloc_device<float>(rpn_cls_output_size_, queue); rpn_3_output_ = sycl::malloc_device<float>(rpn_dir_output_size_, queue); } void PointPillars::PreProcessing(const float *in_points_array, const int in_num_points) { float *dev_points; sycl::queue queue = devicemanager::GetCurrentQueue(); // Before starting the PreProcessing, the device memory has to be reset dev_points = sycl::malloc_device<float>(in_num_points * num_box_corners_, queue); queue.memcpy(dev_points, in_points_array, in_num_points * num_box_corners_ * sizeof(float)); if (!devicemanager::GetCurrentDevice().is_gpu()) { queue.memset(dev_sparse_pillar_map_, 0, grid_y_size_ * grid_x_size_ * sizeof(int)); queue.memset(dev_pillar_x_, 0, max_num_pillars_ * max_num_points_per_pillar_ * sizeof(float)); queue.memset(dev_pillar_y_, 0, max_num_pillars_ * max_num_points_per_pillar_ * sizeof(float)); queue.memset(dev_pillar_z_, 0, max_num_pillars_ * max_num_points_per_pillar_ * sizeof(float)); queue.memset(dev_pillar_i_, 0, max_num_pillars_ * max_num_points_per_pillar_ * sizeof(float)); queue.memset(dev_x_coors_, 0, max_num_pillars_ * sizeof(int)); queue.memset(dev_y_coors_, 0, max_num_pillars_ * sizeof(int)); queue.memset(dev_num_points_per_pillar_, 0, max_num_pillars_ * sizeof(float)); queue.memset(dev_anchor_mask_, 0, num_anchor_ * sizeof(int)); queue.memset(dev_cumsum_workspace_, 0, grid_y_size_ * grid_x_size_ * sizeof(int)); queue.memset(dev_x_coors_for_sub_shaped_, 0, max_num_pillars_ * max_num_points_per_pillar_ * sizeof(float)); queue.memset(dev_y_coors_for_sub_shaped_, 0, max_num_pillars_ * max_num_points_per_pillar_ * sizeof(float)); queue.memset(dev_pillar_feature_mask_, 0, max_num_pillars_ * max_num_points_per_pillar_ * sizeof(float)); // wait until all memory operations were completed queue.wait(); } else { // For GPU, the queue.memset waste time, using GPU kernel to assign the value use less. auto e = queue.submit([&](auto &h){ auto dev_pillar_x_auto = dev_pillar_x_; auto dev_pillar_y_auto = dev_pillar_y_; auto dev_pillar_z_auto = dev_pillar_z_; auto dev_pillar_i_auto = dev_pillar_i_; auto dev_x_coors_for_sub_shaped_auto = dev_x_coors_for_sub_shaped_; auto dev_y_coors_for_sub_shaped_auto = dev_y_coors_for_sub_shaped_; auto dev_pillar_feature_mask_auto = dev_pillar_feature_mask_; auto x = max_num_pillars_; auto y = max_num_points_per_pillar_; h.parallel_for(sycl::range<1>(y*x),[=](auto index) { dev_pillar_x_auto[index] = 0; dev_pillar_y_auto[index] = 0; dev_pillar_z_auto[index] = 0; dev_pillar_i_auto[index] = 0; dev_x_coors_for_sub_shaped_auto[index] = 0; dev_y_coors_for_sub_shaped_auto[index] = 0; dev_pillar_feature_mask_auto[index] = 0; }); }); e.wait(); queue.memset(dev_sparse_pillar_map_, 0, grid_y_size_ * grid_x_size_ * sizeof(int)); queue.memset(dev_x_coors_, 0, max_num_pillars_ * sizeof(int)); queue.memset(dev_y_coors_, 0, max_num_pillars_ * sizeof(int)); queue.memset(dev_num_points_per_pillar_, 0, max_num_pillars_ * sizeof(float)); queue.memset(dev_anchor_mask_, 0, num_anchor_ * sizeof(int)); queue.memset(dev_cumsum_workspace_, 0, grid_y_size_ * grid_x_size_ * sizeof(int)); queue.wait(); } // Run the PreProcessing operations and generate the input feature map preprocess_points_ptr_->DoPreProcess(dev_points, in_num_points, dev_x_coors_, dev_y_coors_, dev_num_points_per_pillar_, dev_pillar_x_, dev_pillar_y_, dev_pillar_z_, dev_pillar_i_, dev_x_coors_for_sub_shaped_, dev_y_coors_for_sub_shaped_, dev_pillar_feature_mask_, dev_sparse_pillar_map_, host_pillar_count_); // remove no longer required memory sycl::free(dev_points, devicemanager::GetCurrentQueue()); } void PointPillars::Detect(const float *in_points_array, const int in_num_points, std::vector<ObjectDetection> &detections) { // Run the PointPillar detection algorthim // reset the detections detections.clear(); sycl::queue queue = devicemanager::GetCurrentQueue(); std::cout << "Starting PointPillars\n"; std::cout << " PreProcessing"; // First run the preprocessing to convert the LiDAR pointcloud into the required pillar format const auto t0 = std::chrono::high_resolution_clock::now(); PreProcessing(in_points_array, in_num_points); const auto t1 = std::chrono::high_resolution_clock::now(); std::cout << " - " << std::chrono::duration_cast<std::chrono::milliseconds>(t1 - t0).count() << "ms\n"; // 2nd step is to create the anchor mask used to optimize the decoding of the RegionProposalNetwork output std::cout << " AnchorMask"; anchor_grid_ptr_->CreateAnchorMask(dev_sparse_pillar_map_, grid_y_size_, grid_x_size_, pillar_x_size_, pillar_y_size_, dev_anchor_mask_, dev_cumsum_workspace_); const auto t2 = std::chrono::high_resolution_clock::now(); std::cout << " - " << std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count() << "ms\n"; // 3rd step is to execture the PillarFeatureExtraction (PFE) network // The required input data has to transfered from the SYCL device to be accessible by OpenVINO // Next, the inference can be excuted // At last, the results have to be copied back to the SYCL device std::cout << " PFE Inference"; // Fill input tensor with data for (auto &input : pfe_exe_network_.inputs()) { // Make PFE input data be accessible by OpenVINO // TODO: test inefficiency in GPU & CPU pfe_infer_request_.set_tensor(input.get_any_name(), pfe_input_tensor_map_[input.get_any_name()]); } auto pfe_output_name = pfe_exe_network_.output().get_any_name(); pfe_infer_request_.set_tensor(pfe_output_name, pfe_output_tensor_); // Launch the inference pfe_infer_request_.start_async(); cl_command_queue q = sycl::get_native<sycl::backend::opencl>(devicemanager::GetCurrentQueue()); //clEnqueueBarrierWithWaitList(q, 0, nullptr, nullptr); // Wait for the inference to finish pfe_infer_request_.wait(); const auto t3 = std::chrono::high_resolution_clock::now(); std::cout << " - " << std::chrono::duration_cast<std::chrono::milliseconds>(t3 - t2).count() << "ms\n"; // get PFE inference output // auto output_name = pfe_exe_network_.output().get_any_name(); // auto output_tensor = pfe_infer_request_.get_tensor(pfe_output_name); // auto output_blob = output_tensor.data<const float>(); // 4th step: Perform scatter operation, i.e. convert from pillar features to top view image-like features std::cout << " Scattering"; // queue.memcpy(pfe_output_, output_blob, pfe_output_size_ * sizeof(float)); if (!devicemanager::GetCurrentDevice().is_gpu()) { queue.memset(dev_scattered_feature_, 0, rpn_input_size_ * sizeof(float)); queue.wait(); } else { // For GPU, the queue.memset waste time, using GPU kernel to assign the value use less. auto e = queue.submit([&](auto &h){ auto x = rpn_input_size_; auto dev_scattered_feature_auto = dev_scattered_feature_; h.parallel_for(sycl::range<1>(x),[=](sycl::id<1> id) { dev_scattered_feature_auto[id] = 0; }); }); e.wait(); } scatter_ptr_->DoScatter(host_pillar_count_[0], dev_x_coors_, dev_y_coors_, pfe_output_, dev_scattered_feature_); const auto t4 = std::chrono::high_resolution_clock::now(); std::cout << " - " << std::chrono::duration_cast<std::chrono::milliseconds>(t4 - t3).count() << "ms\n"; std::cout << " RPN Inference"; // 5th step is to execute the RegionProposal (RPN) network // Therefore, first an inference request using OpenVINO has to be created // Then, the required input data has to transfered from the SYCL device to be accessible by OpenVINO // Next, the inference can be excuted // At last, the results have to be copied back to the SYCL device // Fill input tensor with data auto input = rpn_infer_request_.get_compiled_model().input(); rpn_infer_request_.set_input_tensor(input.get_index(), scattered_feature_tensor_); int output_index = 0; for (auto &output : rpn_exe_network_.outputs()) { rpn_infer_request_.set_tensor(output.get_any_name(), rpn_output_tensors_[output_index++]); } // Start the inference and wait for the results rpn_infer_request_.start_async(); rpn_infer_request_.wait(); queue.memset(dev_filter_count_, 0, sizeof(int)); queue.wait(); const auto t5 = std::chrono::high_resolution_clock::now(); std::cout << " - " << std::chrono::duration_cast<std::chrono::milliseconds>(t5 - t4).count() << "ms\n"; std::cout << " Postprocessing"; // Last step is to run the PostProcessing operation postprocess_ptr_->DoPostProcess( rpn_1_output_, rpn_2_output_, rpn_3_output_, dev_anchor_mask_, anchor_grid_ptr_->dev_anchors_px_, anchor_grid_ptr_->dev_anchors_py_, anchor_grid_ptr_->dev_anchors_pz_, anchor_grid_ptr_->dev_anchors_dx_, anchor_grid_ptr_->dev_anchors_dy_, anchor_grid_ptr_->dev_anchors_dz_, anchor_grid_ptr_->dev_anchors_ro_, dev_multiclass_score_, dev_filtered_box_, dev_filtered_score_, dev_filtered_dir_, dev_filtered_class_id_, dev_box_for_nms_, dev_filter_count_, detections); const auto t6 = std::chrono::high_resolution_clock::now(); std::cout << " - " << std::chrono::duration_cast<std::chrono::milliseconds>(t6 - t5).count() << "ms\n"; std::cout << "Done\n"; } } // namespace pointpillars
cpp
oneAPI-samples
data/projects/oneAPI-samples/AI-and-Analytics/End-to-end-Workloads/LidarObjectDetection-PointPillars/src/pointpillars/anchorgrid.cpp
/* * Copyright 2018-2019 Autoware Foundation. All rights reserved. * Copyright (c) 2019-2021 Intel Corporation (oneAPI modifications) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "pointpillars/anchorgrid.hpp" #include <sycl/sycl.hpp> #include <algorithm> #include "devicemanager/devicemanager.hpp" #include "pointpillars/common.hpp" #include "pointpillars/scan.hpp" namespace pointpillars { AnchorGrid::AnchorGrid(AnchorGridConfig &config) : config_{config}, dev_anchors_px_{nullptr}, dev_anchors_py_{nullptr}, dev_anchors_pz_{nullptr}, dev_anchors_dx_{nullptr}, dev_anchors_dy_{nullptr}, dev_anchors_dz_{nullptr}, dev_anchors_ro_{nullptr}, dev_anchors_rad_{nullptr}, host_anchors_px_{nullptr}, host_anchors_py_{nullptr}, host_anchors_pz_{nullptr}, host_anchors_dx_{nullptr}, host_anchors_dy_{nullptr}, host_anchors_dz_{nullptr}, host_anchors_ro_{nullptr}, host_anchors_rad_{nullptr} { mc_ = config_.anchors.size(); mr_ = config_.rotations.size(); // In the anchor map the Width is along y and the Height along x mh_ = static_cast<std::size_t>((config_.max_x_range - config_.min_x_range) / config_.x_stride); mw_ = static_cast<std::size_t>((config_.max_y_range - config_.min_y_range) / config_.y_stride); num_anchors_ = mw_ * mh_ * mc_ * mr_; } AnchorGrid::~AnchorGrid() { ClearDeviceMemory(); ClearHostMemory(); } void AnchorGrid::GenerateAnchors() { AllocateHostMemory(); // Minimum (x, y) anchor grid coordinates + pillar center offset float x_offset = config_.min_x_range + 0.5f * config_.x_stride; float y_offset = config_.min_y_range + 0.5f * config_.y_stride; // In the anchor map the Width is along y and the Height along x, c is the class, r is the rotation for (size_t y = 0; y < mw_; y++) { for (size_t x = 0; x < mh_; x++) { for (size_t c = 0; c < mc_; c++) { for (size_t r = 0; r < mr_; r++) { std::size_t index = y * mh_ * mc_ * mr_ + x * mc_ * mr_ + c * mr_ + r; // Set anchor grid locations at the center of the pillar host_anchors_px_[index] = static_cast<float>(x) * config_.x_stride + x_offset; host_anchors_py_[index] = static_cast<float>(y) * config_.y_stride + y_offset; // Assign z as dz host_anchors_pz_[index] = config_.anchors[c].dz; // Assign current anchor rotation r host_anchors_ro_[index] = config_.rotations[r]; // Assign anchors sizes for the given class c host_anchors_dx_[index] = config_.anchors[c].x; host_anchors_dy_[index] = config_.anchors[c].y; host_anchors_dz_[index] = config_.anchors[c].z; } } } } // host_anchors_rad_ is used to optimize the decoding by precalculating an effective radius around the anchor for (std::size_t c = 0; c < mc_; c++) { host_anchors_rad_[c] = std::min(config_.anchors[c].x, config_.anchors[c].y); } MoveAnchorsToDevice(); } void AnchorGrid::AllocateHostMemory() { host_anchors_px_ = new float[num_anchors_]; host_anchors_py_ = new float[num_anchors_]; host_anchors_pz_ = new float[num_anchors_]; host_anchors_dx_ = new float[num_anchors_]; host_anchors_dy_ = new float[num_anchors_]; host_anchors_dz_ = new float[num_anchors_]; host_anchors_ro_ = new float[num_anchors_]; for (std::size_t i = 0; i < num_anchors_; i++) { host_anchors_px_[i] = 0.f; host_anchors_py_[i] = 0.f; host_anchors_pz_[i] = 0.f; host_anchors_dx_[i] = 0.f; host_anchors_dy_[i] = 0.f; host_anchors_dz_[i] = 0.f; host_anchors_ro_[i] = 0.f; } host_anchors_rad_ = new float[mc_]; for (std::size_t i = 0; i < mc_; i++) { host_anchors_rad_[i] = 0.f; } } void AnchorGrid::ClearHostMemory() { delete[] host_anchors_px_; delete[] host_anchors_py_; delete[] host_anchors_pz_; delete[] host_anchors_dx_; delete[] host_anchors_dy_; delete[] host_anchors_dz_; delete[] host_anchors_ro_; delete[] host_anchors_rad_; host_anchors_px_ = nullptr; host_anchors_py_ = nullptr; host_anchors_pz_ = nullptr; host_anchors_dx_ = nullptr; host_anchors_dy_ = nullptr; host_anchors_dz_ = nullptr; host_anchors_ro_ = nullptr; host_anchors_rad_ = nullptr; } void AnchorGrid::ClearDeviceMemory() { sycl::queue queue = devicemanager::GetCurrentQueue(); sycl::free(dev_anchors_px_, queue); sycl::free(dev_anchors_py_, queue); sycl::free(dev_anchors_pz_, queue); sycl::free(dev_anchors_dx_, queue); sycl::free(dev_anchors_dy_, queue); sycl::free(dev_anchors_dz_, queue); sycl::free(dev_anchors_ro_, queue); sycl::free(dev_anchors_rad_, queue); dev_anchors_px_ = nullptr; dev_anchors_py_ = nullptr; dev_anchors_pz_ = nullptr; dev_anchors_dx_ = nullptr; dev_anchors_dy_ = nullptr; dev_anchors_dz_ = nullptr; dev_anchors_ro_ = nullptr; dev_anchors_rad_ = nullptr; } void AnchorGrid::AllocateDeviceMemory() { sycl::queue queue = devicemanager::GetCurrentQueue(); dev_anchors_px_ = sycl::malloc_device<float>(num_anchors_, queue); dev_anchors_py_ = sycl::malloc_device<float>(num_anchors_, queue); dev_anchors_pz_ = sycl::malloc_device<float>(num_anchors_, queue); dev_anchors_dx_ = sycl::malloc_device<float>(num_anchors_, queue); dev_anchors_dy_ = sycl::malloc_device<float>(num_anchors_, queue); dev_anchors_dz_ = sycl::malloc_device<float>(num_anchors_, queue); dev_anchors_ro_ = sycl::malloc_device<float>(num_anchors_, queue); dev_anchors_rad_ = sycl::malloc_device<float>(mc_, queue); } void AnchorGrid::MoveAnchorsToDevice() { AllocateDeviceMemory(); sycl::queue queue = devicemanager::GetCurrentQueue(); queue.memcpy(dev_anchors_px_, host_anchors_px_, num_anchors_ * sizeof(float)); queue.memcpy(dev_anchors_py_, host_anchors_py_, num_anchors_ * sizeof(float)); queue.memcpy(dev_anchors_pz_, host_anchors_pz_, num_anchors_ * sizeof(float)); queue.memcpy(dev_anchors_dx_, host_anchors_dx_, num_anchors_ * sizeof(float)); queue.memcpy(dev_anchors_dy_, host_anchors_dy_, num_anchors_ * sizeof(float)); queue.memcpy(dev_anchors_dz_, host_anchors_dz_, num_anchors_ * sizeof(float)); queue.memcpy(dev_anchors_ro_, host_anchors_ro_, num_anchors_ * sizeof(float)); queue.memcpy(dev_anchors_rad_, host_anchors_rad_, mc_ * sizeof(float)); queue.wait(); ClearHostMemory(); } void AnchorGrid::CreateAnchorMask(int *dev_pillar_map, const int pillar_map_w, const int pillar_map_h, const float pillar_size_x, const float pillar_size_y, int *dev_anchor_mask, int *dev_pillar_workspace) { // Calculate the cumulative sum over the 2D grid dev_pillar_map in both X and Y // Calculate an N greater than the current pillar map size enough to hold the cummulative sum matrix const std::size_t n = NextPower(static_cast<std::size_t>(std::max(pillar_map_h, pillar_map_w))); // Calculate the cumulative sum ScanX(dev_pillar_workspace, dev_pillar_map, pillar_map_h, pillar_map_w, n); ScanY(dev_pillar_map, dev_pillar_workspace, pillar_map_h, pillar_map_w, n); // Mask anchors only where input data is found MaskAnchors(dev_anchors_px_, dev_anchors_py_, dev_pillar_map, dev_anchor_mask, dev_anchors_rad_, config_.min_x_range, config_.min_y_range, pillar_size_x, pillar_size_y, pillar_map_h, pillar_map_w, mc_, mr_, mh_, mw_); } void MaskAnchorsKernel(const float *anchors_px, const float *anchors_py, const int *pillar_map, int *anchor_mask, const float *anchors_rad, const float min_x_range, const float min_y_range, const float pillar_x_size, const float pillar_y_size, const int grid_x_size, const int grid_y_size, sycl::nd_item<3> item_ct1) { const int H = item_ct1.get_local_range().get(2); const int R = item_ct1.get_local_range().get(1); const int C = item_ct1.get_group_range(1); const int x = item_ct1.get_local_id(2); const int r = item_ct1.get_local_id(1); const int y = item_ct1.get_group(2); const int c = item_ct1.get_group(1); int index = y * H * C * R + x * C * R + c * R + r; float rad = anchors_rad[c]; float x_anchor = anchors_px[index]; float y_anchor = anchors_py[index]; int anchor_coordinates_min_x = (x_anchor - rad - min_x_range) / pillar_x_size; int anchor_coordinates_min_y = (y_anchor - rad - min_y_range) / pillar_y_size; int anchor_coordinates_max_x = (x_anchor + rad - min_x_range) / pillar_x_size; int anchor_coordinates_max_y = (y_anchor + rad - min_y_range) / pillar_y_size; anchor_coordinates_min_x = sycl::max(anchor_coordinates_min_x, 0); anchor_coordinates_min_y = sycl::max(anchor_coordinates_min_y, 0); anchor_coordinates_max_x = sycl::min(anchor_coordinates_max_x, (int)(grid_x_size - 1)); anchor_coordinates_max_y = sycl::min(anchor_coordinates_max_y, (int)(grid_y_size - 1)); // cumulative sum difference int bottom_left = pillar_map[anchor_coordinates_max_y * grid_x_size + anchor_coordinates_min_x]; int top_left = pillar_map[anchor_coordinates_min_y * grid_x_size + anchor_coordinates_min_x]; int bottom_right = pillar_map[anchor_coordinates_max_y * grid_x_size + anchor_coordinates_max_x]; int top_right = pillar_map[anchor_coordinates_min_y * grid_x_size + anchor_coordinates_max_x]; // Area calculation int area = bottom_right - top_right - bottom_left + top_left; if (area >= 1) { anchor_mask[index] = 1; } else { anchor_mask[index] = 0; } } void MaskAnchorsSimpleKernel(const float *anchors_px, const float *anchors_py, const int *pillar_map, int *anchor_mask, const float *anchors_rad, const float min_x_range, const float min_y_range, const float pillar_x_size, const float pillar_y_size, const int grid_x_size, const int grid_y_size, const int index, const int c) { float rad = anchors_rad[c]; float x_anchor = anchors_px[index]; float y_anchor = anchors_py[index]; int anchor_coordinates_min_x = (x_anchor - rad - min_x_range) / pillar_x_size; int anchor_coordinates_min_y = (y_anchor - rad - min_y_range) / pillar_y_size; int anchor_coordinates_max_x = (x_anchor + rad - min_x_range) / pillar_x_size; int anchor_coordinates_max_y = (y_anchor + rad - min_y_range) / pillar_y_size; anchor_coordinates_min_x = sycl::max(anchor_coordinates_min_x, 0); anchor_coordinates_min_y = sycl::max(anchor_coordinates_min_y, 0); anchor_coordinates_max_x = sycl::min(anchor_coordinates_max_x, (int)(grid_x_size - 1)); anchor_coordinates_max_y = sycl::min(anchor_coordinates_max_y, (int)(grid_y_size - 1)); // cumulative sum difference int bottom_left = pillar_map[anchor_coordinates_max_y * grid_x_size + anchor_coordinates_min_x]; int top_left = pillar_map[anchor_coordinates_min_y * grid_x_size + anchor_coordinates_min_x]; int bottom_right = pillar_map[anchor_coordinates_max_y * grid_x_size + anchor_coordinates_max_x]; int top_right = pillar_map[anchor_coordinates_min_y * grid_x_size + anchor_coordinates_max_x]; // Area calculation int area = bottom_right - top_right - bottom_left + top_left; if (area >= 1) { anchor_mask[index] = 1; } else { anchor_mask[index] = 0; } } void AnchorGrid::MaskAnchors(const float *dev_anchors_px, const float *dev_anchors_py, const int *dev_pillar_map, int *dev_anchor_mask, const float *dev_anchors_rad, const float min_x_range, const float min_y_range, const float pillar_x_size, const float pillar_y_size, const int grid_x_size, const int grid_y_size, const int C, const int R, const int H, const int W) { sycl::queue queue = devicemanager::GetCurrentQueue(); if (!devicemanager::GetCurrentDevice().is_gpu()) { sycl::range<3> block(H, R, 1); sycl::range<3> grid(W, C, 1); queue.submit([&](auto &h) { auto range = grid * block; h.parallel_for(sycl::nd_range<3>(sycl::range<3>(range.get(2), range.get(1), range.get(0)), sycl::range<3>(block.get(2), block.get(1), block.get(0))), [=](sycl::nd_item<3> item_ct1) { MaskAnchorsKernel(dev_anchors_px, dev_anchors_py, dev_pillar_map, dev_anchor_mask, dev_anchors_rad, min_x_range, min_y_range, pillar_x_size, pillar_y_size, grid_x_size, grid_y_size, item_ct1); }); }); } else { const unsigned int length = H * W * C * R; queue.submit([&](auto &h) { h.parallel_for(sycl::range<1>{length}, [=](sycl::id<1> it) { const int index = it[0]; const int y = index / (H * C * R); const int x = (index - y * H * C * R) / (C * R); const int c = (index - y * H * C * R - x * C * R) / R; MaskAnchorsSimpleKernel(dev_anchors_px, dev_anchors_py, dev_pillar_map, dev_anchor_mask, dev_anchors_rad, min_x_range, min_y_range, pillar_x_size, pillar_y_size, grid_x_size, grid_y_size, index, c); }); }); } queue.wait(); } } // namespace pointpillars
cpp
oneAPI-samples
data/projects/oneAPI-samples/AI-and-Analytics/End-to-end-Workloads/LidarObjectDetection-PointPillars/src/pointpillars/scan.cpp
/* * Copyright 2018-2019 Autoware Foundation. All rights reserved. * Copyright (c) 2019-2021 Intel Corporation (oneAPI modifications) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "pointpillars/scan.hpp" #include <sycl/sycl.hpp> #include "devicemanager/devicemanager.hpp" namespace pointpillars { void ScanXKernel(int *output, const int *input, int n, sycl::nd_item<3> item_ct1, uint8_t *local) { auto temp = (int *)local; // allocated on invocation int thid = item_ct1.get_local_id(2); int bid = item_ct1.get_group(2); int bdim = item_ct1.get_local_range().get(2); int offset = 1; temp[2 * thid] = input[bid * bdim * 2 + 2 * thid]; // load input into shared memory temp[2 * thid + 1] = input[bid * bdim * 2 + 2 * thid + 1]; for (int d = n >> 1; d > 0; d >>= 1) { // build sum in place up the tree item_ct1.barrier(); if (thid < d) { int ai = offset * (2 * thid + 1) - 1; int bi = offset * (2 * thid + 2) - 1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) { temp[n - 1] = 0; } // clear the last element for (int d = 1; d < n; d *= 2) { // traverse down tree & build scan offset >>= 1; item_ct1.barrier(); if (thid < d) { int ai = offset * (2 * thid + 1) - 1; int bi = offset * (2 * thid + 2) - 1; int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } item_ct1.barrier(); output[bid * bdim * 2 + 2 * thid] = temp[2 * thid + 1]; // write results to device memory if (thid + 1 == bdim) { output[bid * bdim * 2 + 2 * thid + 1] = temp[2 * thid + 1] + input[bid * bdim * 2 + 2 * thid + 1]; } else { output[bid * bdim * 2 + 2 * thid + 1] = temp[2 * thid + 2]; } } void ScanYKernel(int *output, const int *input, int n, sycl::nd_item<3> item_ct1, uint8_t *local) { auto temp = (int *)local; // allocated on invocation int thid = item_ct1.get_local_id(2); int bid = item_ct1.get_group(2); int bdim = item_ct1.get_local_range().get(2); int gdim = item_ct1.get_group_range(2); int offset = 1; temp[2 * thid] = input[bid + 2 * thid * gdim]; // load input into shared memory temp[2 * thid + 1] = input[bid + 2 * thid * gdim + gdim]; for (int d = n >> 1; d > 0; d >>= 1) { // build sum in place up the tree item_ct1.barrier(); if (thid < d) { int ai = offset * (2 * thid + 1) - 1; int bi = offset * (2 * thid + 2) - 1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) { temp[n - 1] = 0; } // clear the last element for (int d = 1; d < n; d *= 2) { // traverse down tree & build scan offset >>= 1; item_ct1.barrier(); if (thid < d) { int ai = offset * (2 * thid + 1) - 1; int bi = offset * (2 * thid + 2) - 1; int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } item_ct1.barrier(); output[bid + 2 * thid * gdim] = temp[2 * thid + 1]; // write results to device memory int second_ind = 2 * thid + 2; if (second_ind == bdim * 2) { output[bid + 2 * thid * gdim + gdim] = temp[2 * thid + 1] + input[bid + 2 * thid * gdim + gdim]; } else { output[bid + 2 * thid * gdim + gdim] = temp[2 * thid + 2]; } } void ScanX(int *dev_output, const int *dev_input, int w, int h, int n) { sycl::queue queue = devicemanager::GetCurrentQueue(); if (!devicemanager::GetCurrentDevice().is_cpu()) { // For host and GPU (due to worker limitations) we use a sequential // implementation int *host_input = new int[w * h]; int *host_output = new int[w * h]; queue.memcpy(host_input, dev_input, w * h * sizeof(int)).wait(); for (int i = 0; i < h; i++) { for (int j = 0; j < w; j++) { if (j == 0) { host_output[i * w + j] = host_input[i * w + j]; } else { host_output[i * w + j] = host_input[i * w + j] + host_output[i * w + j - 1]; } } } queue.memcpy(dev_output, host_output, w * h * sizeof(int)).wait(); } else { queue.submit([&](sycl::handler &cgh) { sycl::accessor<uint8_t, 1, sycl::access::mode::read_write, sycl::access::target::local> local_acc_ct1( sycl::range<1>(n * sizeof(int)), cgh); cgh.parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, h) * sycl::range<3>(1, 1, w / 2), sycl::range<3>(1, 1, w / 2)), [=](sycl::nd_item<3> item_ct1) { ScanXKernel(dev_output, dev_input, n, item_ct1, local_acc_ct1.get_pointer()); }); }); } queue.wait(); } void ScanY(int *dev_output, const int *dev_input, int w, int h, int n) { sycl::queue queue = devicemanager::GetCurrentQueue(); if (!devicemanager::GetCurrentDevice().is_cpu()) { // For host and GPU (due to worker limitations) we use a sequential // implementation int *host_input = new int[w * h]; int *host_output = new int[w * h]; queue.memcpy(host_input, dev_input, w * h * sizeof(int)).wait(); for (int i = 0; i < w; i++) { for (int j = 0; j < h; j++) { if (j == 0) { host_output[i + j * w] = host_input[i + j * w]; } else { host_output[i + j * w] = host_input[i + j * w] + host_output[i + (j - 1) * w]; } } } queue.memcpy(dev_output, host_output, w * h * sizeof(int)).wait(); } else { queue.submit([&](sycl::handler &cgh) { sycl::accessor<uint8_t, 1, sycl::access::mode::read_write, sycl::access::target::local> local_acc_ct1( sycl::range<1>(n * sizeof(int)), cgh); cgh.parallel_for( sycl::nd_range<3>(sycl::range<3>(1, 1, w) * sycl::range<3>(1, 1, h / 2), sycl::range<3>(1, 1, h / 2)), [=](sycl::nd_item<3> item_ct1) { ScanYKernel(dev_output, dev_input, n, item_ct1, local_acc_ct1.get_pointer()); }); }); } queue.wait(); } } // namespace pointpillars
cpp
oneAPI-samples
data/projects/oneAPI-samples/AI-and-Analytics/End-to-end-Workloads/LidarObjectDetection-PointPillars/include/pointpillars/pointpillars.hpp
/* * Copyright 2018-2019 Autoware Foundation. All rights reserved. * Copyright (c) 2019-2021 Intel Corporation (oneAPI modifications) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <iostream> #include <map> #include <memory> #include <string> #include <vector> #include <numeric> #include <iomanip> #include <boost/filesystem.hpp> #include <openvino/openvino.hpp> #include "pointpillars/anchorgrid.hpp" #include "pointpillars/pointpillars_config.hpp" #include "pointpillars/pointpillars_util.hpp" #include "pointpillars/postprocess.hpp" #include "pointpillars/preprocess.hpp" #include "pointpillars/scatter.hpp" #include <openvino/runtime/intel_gpu/ocl/ocl.hpp> namespace pointpillars { /** * PointPillar's Main Class * * This class encapsulates the complete end-to-end * implementation of PointPillars. * * Users only need to create an object and call 'Detect'. */ class PointPillars { protected: PointPillarsConfig config_; const float score_threshold_; const float nms_overlap_threshold_; const std::string pfe_model_file_; const std::string rpn_model_file_; const int max_num_pillars_; const int max_num_points_per_pillar_; const int pfe_output_size_; const int grid_x_size_; const int grid_y_size_; const int grid_z_size_; const int rpn_input_size_; const int num_cls_; const int num_anchor_x_inds_; const int num_anchor_y_inds_; const int num_anchor_r_inds_; const int num_anchor_; const int rpn_box_output_size_; const int rpn_cls_output_size_; const int rpn_dir_output_size_; const float pillar_x_size_; const float pillar_y_size_; const float pillar_z_size_; const float min_x_range_; const float min_y_range_; const float min_z_range_; const float max_x_range_; const float max_y_range_; const float max_z_range_; const int batch_size_; const int num_features_; const int num_threads_; const int num_box_corners_; const int num_output_box_feature_; int host_pillar_count_[1]; int *dev_x_coors_; // Array that holds the coordinates of corresponding pillar in x int *dev_y_coors_; // Array that holds the coordinates of corresponding pillar in y float *dev_num_points_per_pillar_; // Array that stores the number of points in the corresponding pillar int *dev_sparse_pillar_map_; // Mask with values 0 or 1 that specifies if the corresponding pillar has points or not int *dev_cumsum_workspace_; // Device variable used as temporary storage of the cumulative sum during the anchor mask // creation // variables to store the pillar's points float *dev_pillar_x_; float *dev_pillar_y_; float *dev_pillar_z_; float *dev_pillar_i_; // variables to store the pillar coordinates in the pillar grid float *dev_x_coors_for_sub_shaped_; float *dev_y_coors_for_sub_shaped_; // Pillar mask used to ignore the features generated with empty pillars float *dev_pillar_feature_mask_; // Mask used to filter the anchors in regions with input points int *dev_anchor_mask_; // Device memory used to store the RPN input feature map after Scatter float *dev_scattered_feature_; // Device memory locations to store the object detections float *dev_filtered_box_; float *dev_filtered_score_; float *dev_multiclass_score_; int *dev_filtered_dir_; int *dev_filtered_class_id_; float *dev_box_for_nms_; int *dev_filter_count_; std::unique_ptr<PreProcess> preprocess_points_ptr_; std::unique_ptr<Scatter> scatter_ptr_; std::unique_ptr<PostProcess> postprocess_ptr_; std::unique_ptr<AnchorGrid> anchor_grid_ptr_; public: PointPillars() = delete; /** * @brief Constructor * @param[in] score_threshold Score threshold for filtering output * @param[in] nms_overlap_threshold IOU threshold for NMS * @param[in] config PointPillars net configuration file */ PointPillars(const float score_threshold, const float nms_threshold, const PointPillarsConfig &config); ~PointPillars(); /** * @brief Call PointPillars to perform the end-to-end object detection chain * @param[in] in_points_array Pointcloud array * @param[in] in_num_points Number of points * @param[in] detections Network output bounding box list * @details This is the main public interface to run the algorithm */ void Detect(const float *in_points_array, const int in_num_points, std::vector<ObjectDetection> &detections); private: ov::CompiledModel pfe_exe_network_; std::map<std::string, float *> pfe_input_map_; ov::CompiledModel rpn_exe_network_; ov::InferRequest pfe_infer_request_; ov::InferRequest rpn_infer_request_; std::map<std::string, ov::Tensor> pfe_input_tensor_map_; ov::Tensor pfe_output_tensor_; ov::Tensor scattered_feature_tensor_; ov::Tensor rpn_output_tensors_[3]; float *pfe_output_; float *rpn_1_output_; float *rpn_2_output_; float *rpn_3_output_; void InitComponents(); /** * @brief Memory allocation for device memory * @details Called in the constructor */ void DeviceMemoryMalloc(); /** * @brief Preprocess points * @param[in] in_points_array pointcloud array * @param[in] in_num_points Number of points * @details Call oneAPI preprocess */ void PreProcessing(const float *in_points_array, const int in_num_points); /** * @brief Setup the PFE executable network * @details Setup the PFE network */ void SetupPfeNetwork(); /** * @brief Setup the RPN executable network * @param[in] resizeInput If false, the network is not adapted to input size changes * @details Setup the RPN network */ void SetupRpnNetwork(bool resize_input); }; } // namespace pointpillars
hpp
oneAPI-samples
data/projects/oneAPI-samples/AI-and-Analytics/End-to-end-Workloads/LidarObjectDetection-PointPillars/include/pointpillars/pointpillars_util.hpp
//============================================================== // Copyright © 2020-2021 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= #pragma once #include <vector> namespace pointpillars { /** * 3D-Object representation */ struct ObjectDetection { ObjectDetection() : x(0.f), y(0.f), z(0.f), length(1.f), width(1.f), height(1.f), yaw(1.f), class_id(0), likelihood(1.f) {} ObjectDetection(float _x, float _y, float _z, float _l, float _w, float _h, float _yaw, int _class_id, float _likelihood) : x(_x), y(_y), z(_z), length(_l), width(_w), height(_h), yaw(_yaw), class_id(_class_id), likelihood(_likelihood) {} float x; float y; float z; float length; float width; float height; float yaw; int class_id; float likelihood; std::vector<float> class_probabilities; }; } // namespace pointpillars
hpp
oneAPI-samples
data/projects/oneAPI-samples/AI-and-Analytics/End-to-end-Workloads/LidarObjectDetection-PointPillars/include/pointpillars/nms.hpp
/* * Copyright 2018-2019 Autoware Foundation. All rights reserved. * Copyright (c) 2019-2021 Intel Corporation (oneAPI modifications) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <sycl/sycl.hpp> #include "pointpillars/common.hpp" namespace pointpillars { /** * Non-Maximum-Suppression * * Non-maximum suppression (NMS) is a way to eliminate points that do not lie in the important edges * of detected data. Here NMS is used to filter out overlapping object detections. Therefore, an * intersection-over-union (IOU) approach is used to caculate the overlap of two objects. At the end, * only the most relevant objects are kept. */ class NMS { private: const int num_threads_; // Number of threads used to execute the NMS kernel const int num_box_corners_; // Number of corners of a 2D box const float nms_overlap_threshold_; // Threshold below which objects are discarded public: /** * @brief Constructor * @param[in] num_threads Number of threads when launching kernel * @param[in] num_box_corners Number of corners for 2D box * @param[in] nms_overlap_threshold IOU threshold for NMS */ NMS(const int num_threads, const int num_box_corners, const float nms_overlap_threshold); /** * @brief Execute Non-Maximum Suppresion for network output * @param[in] host_filter_count Number of filtered output * @param[in] dev_sorted_box_for_nms Bounding box output sorted by score * @param[out] out_keep_inds Indexes of selected bounding box * @param[out] out_num_to_keep Number of kept bounding boxes */ void DoNMS(const size_t host_filter_count, float *dev_sorted_box_for_nms, int *out_keep_inds, size_t &out_num_to_keep); private: /** * @brief Parallel Non-Maximum Suppresion for network output using SYCL GPU * @details Parallel NMS and postprocessing for selecting box */ void ParallelNMS(const size_t host_filter_count, float *dev_sorted_box_for_nms, int *out_keep_inds, size_t &out_num_to_keep); /** * @brief Sequential Non-Maximum Suppresion for network output in SYCL CPU or Host device */ void SequentialNMS(const size_t host_filter_count, float *dev_sorted_box_for_nms, int *out_keep_inds, size_t &out_num_to_keep); }; } // namespace pointpillars
hpp
oneAPI-samples
data/projects/oneAPI-samples/AI-and-Analytics/End-to-end-Workloads/LidarObjectDetection-PointPillars/include/pointpillars/postprocess.hpp
/* * Copyright 2018-2019 Autoware Foundation. All rights reserved. * Copyright (c) 2019-2021 Intel Corporation (oneAPI modifications) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <sycl/sycl.hpp> #include <memory> #include <vector> #include "pointpillars/nms.hpp" #include "pointpillars/pointpillars_util.hpp" namespace pointpillars { /** * PointPillar's PostProcessing * * Use the output of the RegionProposalNetwork and the * anchors generated by the AnchorGrid to decode the * object position, dimension and class, filter out * redundant/clutter objects using NMS and sort them * according to likelihood. Finally convert into * object representation. */ class PostProcess { private: const float float_min_; const float float_max_; const size_t num_anchor_x_inds_; const size_t num_anchor_y_inds_; const size_t num_anchor_r_inds_; const size_t num_cls_; const float score_threshold_; const size_t num_threads_; const size_t num_box_corners_; const size_t num_output_box_feature_; std::unique_ptr<NMS> nms_ptr_; public: /** * @brief Constructor * @param[in] float_min The lowest float value * @param[in] float_max The maximum float value * @param[in] num_anchor_x_inds Number of x-indexes for anchors * @param[in] num_anchor_y_inds Number of y-indexes for anchors * @param[in] num_anchor_r_inds Number of rotation-indexes for anchors * @param[in] score_threshold Score threshold for filtering output * @param[in] num_threads Number of threads when launching kernel * @param[in] nms_overlap_threshold IOU threshold for NMS * @param[in] num_box_corners Number of box's corner * @param[in] num_output_box_feature Number of output box's feature */ PostProcess(const float float_min, const float float_max, const size_t num_anchor_x_inds, const size_t num_anchor_y_inds, const size_t num_anchor_r_inds, const size_t num_cls, const float score_threshold, const size_t num_threads, const float nms_overlap_threshold, const size_t num_box_corners, const size_t num_output_box_feature); /** * @brief Postprocessing for the network output * @param[in] rpn_box_output Box predictions from the network output * @param[in] rpn_cls_output Class predictions from the network output * @param[in] rpn_dir_output Direction predictions from the network output * @param[in] dev_anchor_mask Anchor mask for filtering the network output * @param[in] dev_anchors_px X-coordinate values for corresponding anchors * @param[in] dev_anchors_py Y-coordinate values for corresponding anchors * @param[in] dev_anchors_pz Z-coordinate values for corresponding anchors * @param[in] dev_anchors_dx X-dimension values for corresponding anchors * @param[in] dev_anchors_dy Y-dimension values for corresponding anchors * @param[in] dev_anchors_dz Z-dimension values for corresponding anchors * @param[in] dev_anchors_ro Rotation values for corresponding anchors * @param[in] dev_filtered_box Filtered box predictions * @param[in] dev_filtered_score Filtered score predictions * @param[in] dev_filtered_dir Filtered direction predictions * @param[in] dev_box_for_nms Decoded boxes in min_x min_y max_x max_y represenation from pose and dimension * @param[in] dev_filter_count The number of filtered output * @param[out] out_detection Output bounding boxes * @details dev_* represents device memory allocated variables */ void DoPostProcess(const float *rpn_box_output, const float *rpn_cls_output, const float *rpn_dir_output, int *dev_anchor_mask, const float *dev_anchors_px, const float *dev_anchors_py, const float *dev_anchors_pz, const float *dev_anchors_dx, const float *dev_anchors_dy, const float *dev_anchors_dz, const float *dev_anchors_ro, float *dev_multiclass_score, float *dev_filtered_box, float *dev_filtered_score, int *dev_filtered_dir, int *dev_filtered_class_id, float *dev_box_for_nms, int *dev_filter_count, std::vector<ObjectDetection> &detections); }; } // namespace pointpillars
hpp
oneAPI-samples
data/projects/oneAPI-samples/AI-and-Analytics/End-to-end-Workloads/LidarObjectDetection-PointPillars/include/pointpillars/pointpillars_config.hpp
//============================================================== // Copyright © 2020-2021 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= #pragma once #include <cmath> #include <string> #include <vector> namespace pointpillars { /** * Anchor representation */ struct Anchor { /** * @brief Constructor * @param[in] _x Size along x * @param[in] _y Size along y * @param[in] _z Size along z */ Anchor(float _x, float _y, float _z) : x(_x), y(_y), z(_z), dz(0.){}; /** * @brief Constructor * @param[in] _x Size along x * @param[in] _y Size along y * @param[in] _z Size along z * @param[in] _dz Position in z */ Anchor(float _x, float _y, float _z, float _dz) : x(_x), y(_y), z(_z), dz(_dz){}; float x{1.f}; float y{1.f}; float z{1.f}; float dz{0.f}; }; struct AnchorGridConfig { float min_x_range{0.0f}; // defines the area covered float max_x_range{1.0f}; // defines the area covered float min_y_range{0.0f}; // defines the area covered float max_y_range{1.0f}; // defines the area covered float min_z_range{0.0f}; // defines the area covered float max_z_range{1.0f}; // defines the area covered float x_stride{0.01f}; // spacing between anchors along x float y_stride{0.01f}; // spacing between anchors along y std::vector<Anchor> anchors = {Anchor(1.0f, 2.0f, 1.5f)}; std::vector<float> rotations = {0.f, M_PI_2}; // The set of rotations to in which the anchors should be generated }; struct PointPillarsConfig { std::string pfe_model_file{"pfe"}; std::string rpn_model_file{"rpn"}; float min_x_range{0.f}; // defines the area covered by the algorithm float max_x_range{69.12f}; // defines the area covered by the algorithm float min_y_range{-39.68f}; // defines the area covered by the algorithm float max_y_range{39.68f}; // defines the area covered by the algorithm float min_z_range{-3.0f}; // defines the area covered by the algorithm float max_z_range{1.0f}; // defines the area covered by the algorithm float rpn_scale{0.5f}; // The scaling factor that the RPN is applying = 1 / (final convolution stride) float pillar_x_size{0.16f}; // pillar voxelization size along X float pillar_y_size{0.16f}; // pillar voxelization size along Y float pillar_z_size{4.0f}; // pillar voxelization size along Z float x_stride{0.32f}; // spacing between pillars along x float y_stride{0.32f}; // spacing between pillars along y std::size_t max_num_pillars{12000}; std::size_t num_classes{1}; std::vector<Anchor> anchors = {Anchor(1.6f, 3.9f, 1.56f)}; std::vector<std::string> classes = {"Car"}; std::size_t max_num_points_per_pillar{100}; std::size_t pillar_features{64}; std::size_t grid_x_size{432}; // (max_x_range - min_x_range) / pillar_x_size std::size_t grid_y_size{496}; // (max_y_range - min_y_range) / pillar_y_size std::size_t grid_z_size{1}; // (max_z_range - min_z_range) / pillar_z_size }; } // namespace pointpillars
hpp
oneAPI-samples
data/projects/oneAPI-samples/AI-and-Analytics/End-to-end-Workloads/LidarObjectDetection-PointPillars/include/pointpillars/scatter.hpp
/* * Copyright 2018-2019 Autoware Foundation. All rights reserved. * Copyright (c) 2019-2021 Intel Corporation (oneAPI modifications) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <sycl/sycl.hpp> namespace pointpillars { /** * PointPillar's Scatter. * Converts learned features (output from PFE network = 1st CNN) from dense tensors * to sparse pseudo image. */ class Scatter { private: const int num_features_; // The number of features per pillar const int max_num_pillars_; // Maximum number of pillars const int grid_x_size_; // Number of pillars in x-coordinate const int grid_y_size_; // Number of pillars in x-coordinate public: /** * @brief Constructor * @param[in] num_features The number of features per pillar * @param[in] max_num_pillars Maximum number of pillars * @param[in] grid_x_size Number of pillars in x-coordinate * @param[in] grid_y_size Number of pillars in y-coordinate */ Scatter(const int num_features, const int max_num_pillars, const int grid_x_size, const int grid_y_size); /** * @brief Call scatter kernel * @param[in] pillar_count The valid number of pillars * @param[in] x_coors X-coordinate indexes for corresponding pillars * @param[in] y_coors Y-coordinate indexes for corresponding pillars * @param[in] pfe_output Output from Pillar Feature Extractor * @param[out] scattered_feature Gridmap representation for pillars' feature * @details Allocate pillars in gridmap based on index(coordinates) information */ void DoScatter(const int pillar_count, int *x_coors, int *y_coors, float *pfe_output, float *scattered_feature); }; } // namespace pointpillars
hpp
oneAPI-samples
data/projects/oneAPI-samples/AI-and-Analytics/End-to-end-Workloads/LidarObjectDetection-PointPillars/include/pointpillars/scan.hpp
/* * Copyright 2018-2019 Autoware Foundation. All rights reserved. * Copyright (c) 2019-2021 Intel Corporation (oneAPI modifications) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <sycl/sycl.hpp> #include <cstdint> namespace pointpillars { // Prefix sum in 2D coordinates // // These functions calculate the cumulative sum along X or Y in a 2D array // // X---> // W // Y o------------- // | | // | H | // v | // | // // For details about the algorithm please check: // Sengupta, Shubhabrata & Lefohn, Aaron & Owens, John. (2006). A Work-Efficient Step-Efficient Prefix Sum Algorithm. // // Prefix in x-direction, calculates the cumulative sum along x void ScanX(int *dev_output, const int *dev_input, int w, int h, int n); // Prefix in y-direction, calculates the cumulative sum along y void ScanY(int *dev_output, const int *dev_input, int w, int h, int n); } // namespace pointpillars
hpp
oneAPI-samples
data/projects/oneAPI-samples/AI-and-Analytics/End-to-end-Workloads/LidarObjectDetection-PointPillars/include/pointpillars/common.hpp
/* * Copyright 2018-2019 Autoware Foundation. All rights reserved. * Copyright (c) 2019-2021 Intel Corporation (oneAPI modifications) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <sycl/sycl.hpp> // using MACRO to allocate memory inside kernel #define NUM_3D_BOX_CORNERS_MACRO 8 #define NUM_2D_BOX_CORNERS_MACRO 4 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) // Performs atomic fetch and add operation using SYCL // Calls add on SYCL atomic object inline int AtomicFetchAdd(int *addr, int operand) { sycl::atomic<int, sycl::access::address_space::global_space> obj( (sycl::multi_ptr<int, sycl::access::address_space::global_space>(addr))); return sycl::atomic_fetch_add(obj, operand, sycl::memory_order::relaxed); } // Returns the next power of 2 for a given number uint32_t inline NextPower(uint32_t v) { v--; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; v++; return v; }
hpp
oneAPI-samples
data/projects/oneAPI-samples/AI-and-Analytics/End-to-end-Workloads/LidarObjectDetection-PointPillars/include/pointpillars/preprocess.hpp
/* * Copyright 2018-2019 Autoware Foundation. All rights reserved. * Copyright (c) 2019-2021 Intel Corporation (oneAPI modifications) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <sycl/sycl.hpp> namespace pointpillars { /** * PointPillar's PreProcessing * * Convert 3D point cloud data into 2D-grid/pillar form * to be able to feed it into the PillarFeatureNetwork */ class PreProcess { private: // initialzer list const int max_num_pillars_; const int max_num_points_per_pillar_; const int grid_x_size_; const int grid_y_size_; const int grid_z_size_; const float pillar_x_size_; const float pillar_y_size_; const float pillar_z_size_; const float min_x_range_; const float min_y_range_; const float min_z_range_; // end initalizer list float *dev_pillar_x_in_coors_; float *dev_pillar_y_in_coors_; float *dev_pillar_z_in_coors_; float *dev_pillar_i_in_coors_; int *dev_pillar_count_histo_; int *dev_counter_; int *dev_pillar_count_; float *dev_x_coors_for_sub_; float *dev_y_coors_for_sub_; public: /** * @brief Constructor * @param[in] max_num_pillars Maximum number of pillars * @param[in] max_points_per_pillar Maximum number of points per pillar * @param[in] grid_x_size Number of pillars in x-coordinate * @param[in] grid_y_size Number of pillars in y-coordinate * @param[in] grid_z_size Number of pillars in z-coordinate * @param[in] pillar_x_size Size of x-dimension for a pillar * @param[in] pillar_y_size Size of y-dimension for a pillar * @param[in] pillar_z_size Size of z-dimension for a pillar * @param[in] min_x_range Minimum x value for pointcloud * @param[in] min_y_range Minimum y value for pointcloud * @param[in] min_z_range Minimum z value for pointcloud * @param[in] num_box_corners Number of corners for 2D box */ PreProcess(const int max_num_pillars, const int max_points_per_pillar, const int grid_x_size, const int grid_y_size, const int grid_z_size, const float pillar_x_size, const float pillar_y_size, const float pillar_z_size, const float min_x_range, const float min_y_range, const float min_z_range); ~PreProcess(); /** * @brief Preprocessing for input pointcloud * @param[in] dev_points Pointcloud array * @param[in] in_num_points The number of points * @param[in] dev_x_coors X-coordinate indexes for corresponding pillars * @param[in] dev_y_coors Y-coordinate indexes for corresponding pillars * @param[in] dev_num_points_per_pillar Number of points in corresponding pillars * @param[in] dev_pillar_x X-coordinate values for points in each pillar * @param[in] dev_pillar_y Y-coordinate values for points in each pillar * @param[in] dev_pillar_z Z-coordinate values for points in each pillar * @param[in] dev_pillar_i Intensity values for points in each pillar * @param[in] dev_x_coors_for_sub_shaped Array for x substraction in the network * @param[in] dev_y_coors_for_sub_shaped Array for y substraction in the network * @param[in] dev_pillar_feature_mask Mask to make pillars' feature zero where no points in the pillars * @param[in] dev_sparse_pillar_map Grid map representation for pillar-occupancy * @param[in] host_pillar_count The numnber of valid pillars for an input pointcloud * @details Convert pointcloud to pillar representation */ void DoPreProcess(const float *dev_points, const int in_num_points, int *dev_x_coors, int *dev_y_coors, float *dev_num_points_per_pillar, float *dev_pillar_x, float *dev_pillar_y, float *dev_pillar_z, float *dev_pillar_i, float *dev_x_coors_for_sub_shaped, float *dev_y_coors_for_sub_shaped, float *dev_pillar_feature_mask, int *dev_sparse_pillar_map, int *host_pillar_count); }; } // namespace pointpillars
hpp
oneAPI-samples
data/projects/oneAPI-samples/AI-and-Analytics/End-to-end-Workloads/LidarObjectDetection-PointPillars/include/pointpillars/anchorgrid.hpp
/* * Copyright 2018-2019 Autoware Foundation. All rights reserved. * Copyright (c) 2019-2021 Intel Corporation (oneAPI modifications) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <sycl/sycl.hpp> #include <cmath> #include <string> #include <vector> #include "pointpillars/pointpillars_config.hpp" namespace pointpillars { /** * AnchorGrid * * The AnchorGrid class generates anchors in different sizes and orientations for every location in the grid. * Anchor based methods are used in object detection in which a list of predefined boxes are refined by a CNN. * */ class AnchorGrid { public: /** * @brief Constructor * * Class used to generate the anchor grid which is used as a prior box list during object detection * * @param[in] config Configuration used to generate anchors */ AnchorGrid(AnchorGridConfig &config); ~AnchorGrid(); AnchorGridConfig config_; // Pointers to device memory locations for the anchors float *dev_anchors_px_{nullptr}; float *dev_anchors_py_{nullptr}; float *dev_anchors_pz_{nullptr}; float *dev_anchors_dx_{nullptr}; float *dev_anchors_dy_{nullptr}; float *dev_anchors_dz_{nullptr}; float *dev_anchors_ro_{nullptr}; // Get size/number of anchors std::size_t size() { return num_anchors_; } // Generate default anchors void GenerateAnchors(); // Creates an anchor mask that can be used to ignore anchors in regions without points // Input is the current pillar map (map, width, height, size in x, size in y, size in z) // Output are the created anchors void CreateAnchorMask(int *dev_pillar_map, const int pillar_map_w, const int pillar_map_h, const float pillar_size_x, const float pillar_size_y, int *dev_anchor_mask, int *dev_pillar_workspace); private: std::size_t num_anchors_{0u}; std::size_t mh_{0u}; std::size_t mw_{0u}; std::size_t mc_{0u}; std::size_t mr_{0u}; // Anchor pointers on the host // Only required for initialization float *dev_anchors_rad_{nullptr}; float *host_anchors_px_{nullptr}; float *host_anchors_py_{nullptr}; float *host_anchors_pz_{nullptr}; float *host_anchors_dx_{nullptr}; float *host_anchors_dy_{nullptr}; float *host_anchors_dz_{nullptr}; float *host_anchors_ro_{nullptr}; float *host_anchors_rad_; // Clear host memory void ClearHostMemory(); // Clear device memory void ClearDeviceMemory(); // Allocate host memory void AllocateHostMemory(); // Allocate device memory void AllocateDeviceMemory(); // Move anchors from the host system to the target execution device void MoveAnchorsToDevice(); // Internal function to create anchor mask void MaskAnchors(const float *dev_anchors_px, const float *dev_anchors_py, const int *dev_pillar_map, int *dev_anchor_mask, const float *dev_anchors_rad, const float min_x_range, const float min_y_range, const float pillar_x_size, const float pillar_y_size, const int grid_x_size, const int grid_y_size, const int c, const int r, const int h, const int w); }; } // namespace pointpillars
hpp
oneAPI-samples
data/projects/oneAPI-samples/AI-and-Analytics/End-to-end-Workloads/LidarObjectDetection-PointPillars/include/devicemanager/devicemanager.hpp
//============================================================== // Copyright © 2021 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= #pragma once #include <sycl/sycl.hpp> #include <iostream> namespace devicemanager { // Report all available SYCL devices inline void GetDevices() { std::cout << "Available devices: \n"; // Query all SYCL devices in the system for (const auto &device : sycl::device::get_devices()) { switch (device.get_info<sycl::info::device::device_type>()) { case sycl::info::device_type::cpu: std::cout << " CPU: " << device.get_info<sycl::info::device::name>() << "\n"; break; case sycl::info::device_type::gpu: std::cout << " GPU: " << device.get_info<sycl::info::device::name>() << "\n"; break; case sycl::info::device_type::host: std::cout << " Host (single-threaded CPU)\n"; break; case sycl::info::device_type::accelerator: // The kernels were not tested for accelerators, only for Host, CPU and GPU std::cout << " Accelerator (not supported): " << device.get_info<sycl::info::device::name>() << "\n"; break; default: std::cout << " Unknown (not supported): " << device.get_info<sycl::info::device::name>() << "\n"; break; } } } // Singleton DeviceManager // Ensures consistent use of same SYCL device and SYCL queue among all kernels and subroutines // Allows user transparent device selection via command line class DeviceManager { public: // get the currently active device sycl::device &GetCurrentDevice() { return current_device_; } // get the currently used device queue sycl::queue &GetCurrentQueue() { return current_queue_; } // select a new device and queue // @return true on success, false otherwise // @details currently only SYCL Host device, or SYCL CPU/GPU device are supported bool SelectDevice(const sycl::info::device_type &device_type) { // loop over all SYCL devices and choose the required one (if available) for (const auto &device : sycl::device::get_devices()) { if (device.get_info<sycl::info::device::device_type>() == device_type) { current_device_ = device; } } // if the desired device was not chosen, provide a warning if (current_device_.get_info<sycl::info::device::device_type>() != device_type) { std::cout << "Requested device not available \n"; GetDevices(); return false; } else { if (current_device_.is_host()) { std::cout << "Using Host device (single-threaded CPU)\n"; } else { std::cout << "Using " << current_device_.get_info<sycl::info::device::name>() << "\n"; } current_queue_ = sycl::queue(current_device_); return true; } } // Returns the instance of device manager singleton. static DeviceManager &instance() { static DeviceManager device_manager; return device_manager; } // DeviceManager is a singleton // remove all constructors DeviceManager(const DeviceManager &) = delete; DeviceManager &operator=(const DeviceManager &) = delete; DeviceManager(DeviceManager &&) = delete; DeviceManager &operator=(DeviceManager &&) = delete; private: DeviceManager() { current_device_ = sycl::device(sycl::default_selector{}); } sycl::device current_device_; // SYCL device used by all kernels/operations sycl::queue current_queue_; // SYCL queue used by all kernels/operations }; // Get current queue for current device inline sycl::queue &GetCurrentQueue() { return DeviceManager::instance().GetCurrentQueue(); } // Get current device inline sycl::device &GetCurrentDevice() { return DeviceManager::instance().GetCurrentDevice(); } // Select a different device inline bool SelectDevice(const sycl::info::device_type &device_type) { return DeviceManager::instance().SelectDevice(device_type); } } // namespace devicemanager
hpp
oneAPI-samples
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/memory-movement/vec-buffer-host.cpp
//============================================================== // Copyright © 2022 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= #include <array> #include <chrono> #include <iostream> #include <CL/sycl.hpp> #include "align.hpp" template <typename T> using VectorAllocator = AlignedAllocator<T>; template <typename T> using AlignedVector = std::vector<T, VectorAllocator<T>>; constexpr size_t array_size = (1 << 15); // Snippet1 Begin int VectorAdd0(sycl::queue &q, AlignedVector<int> &a, AlignedVector<int> &b, AlignedVector<int> &sum, int iter) { sycl::range num_items{a.size()}; const sycl::property_list props = {sycl::property::buffer::use_host_ptr()}; for (int i = 0; i < iter; i++) { sycl::buffer a_buf(a, props); sycl::buffer b_buf(b, props); sycl::buffer sum_buf(sum.data(), num_items, props); { sycl::host_accessor a_host_acc(a_buf); std::cout << "add0: buff memory address =" << a_host_acc.get_pointer() << "\n"; std::cout << "add0: address of vector a = " << a.data() << "\n"; } q.submit([&](auto &h) { // Input accessors sycl::accessor a_acc(a_buf, h, sycl::read_only); sycl::accessor b_acc(b_buf, h, sycl::read_only); // Output accessor sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init); sycl::stream out(1024 * 1024, 1 * 128, h); h.parallel_for(num_items, [=](auto i) { if (i[0] == 0) out << "add0: dev addr = " << a_acc.get_pointer() << "\n"; sum_acc[i] = a_acc[i] + b_acc[i]; }); }); } q.wait(); return (0); } // Snippet1 End // Snippet2 Begin int VectorAdd1(sycl::queue &q, const AlignedVector<int> &a, const AlignedVector<int> &b, AlignedVector<int> &sum, int iter) { sycl::range num_items{a.size()}; const sycl::property_list props = {sycl::property::buffer::use_host_ptr()}; for (int i = 0; i < iter; i++) { sycl::buffer a_buf(a, props); sycl::buffer b_buf(b, props); sycl::buffer sum_buf(sum.data(), num_items, props); { sycl::host_accessor a_host_acc(a_buf); std::cout << "add1: buff memory address =" << a_host_acc.get_pointer() << "\n"; std::cout << "add1: address of vector aa = " << a.data() << "\n"; } q.submit([&](auto &h) { // Input accessors sycl::accessor a_acc(a_buf, h, sycl::read_only); sycl::accessor b_acc(b_buf, h, sycl::read_only); // Output accessor sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init); sycl::stream out(16 * 1024, 16 * 1024, h); h.parallel_for(num_items, [=](auto i) { if (i[0] == 0) out << "add1: dev addr = " << a_acc.get_pointer() << "\n"; sum_acc[i] = a_acc[i] + b_acc[i]; }); }); } q.wait(); return (0); } // Snippet2 End // Snippet3 Begin int VectorAdd2(sycl::queue &q, AlignedVector<int> &a, AlignedVector<int> &b, AlignedVector<int> &sum, int iter) { sycl::range num_items{a.size()}; const sycl::property_list props = {sycl::property::buffer::use_host_ptr()}; auto start = std::chrono::steady_clock::now(); for (int i = 0; i < iter; i++) { sycl::buffer a_buf(a, props); sycl::buffer b_buf(b, props); sycl::buffer sum_buf(sum.data(), num_items, props); q.submit([&](auto &h) { // Input accessors sycl::accessor a_acc(a_buf, h, sycl::read_only); sycl::accessor b_acc(b_buf, h, sycl::read_only); // Output accessor sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init); h.parallel_for(num_items, [=](auto i) { sum_acc[i] = a_acc[i] + b_acc[i]; }); }); } q.wait(); auto end = std::chrono::steady_clock::now(); std::cout << "Vector add2 completed on device - took " << (end - start).count() << " u-secs\n"; return ((end - start).count()); } // Snippet3 End // Snippet4 Begin int VectorAdd3(sycl::queue &q, const AlignedVector<int> &a, const AlignedVector<int> &b, AlignedVector<int> &sum, int iter) { sycl::range num_items{a.size()}; auto start = std::chrono::steady_clock::now(); for (int i = 0; i < iter; i++) { sycl::buffer a_buf(a); sycl::buffer b_buf(b); sycl::buffer sum_buf(sum.data(), num_items); auto e = q.submit([&](auto &h) { // Input accessors sycl::accessor a_acc(a_buf, h, sycl::read_only); sycl::accessor b_acc(b_buf, h, sycl::read_only); // Output accessor sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init); h.parallel_for(num_items, [=](auto i) { sum_acc[i] = a_acc[i] + b_acc[i]; }); }); } q.wait(); auto end = std::chrono::steady_clock::now(); std::cout << "Vector add3 completed on device - took " << (end - start).count() << " u-secs\n"; return ((end - start).count()); } // Snippet4 End void InitializeArray(AlignedVector<int> &a) { for (size_t i = 0; i < a.size(); i++) a[i] = i; } void Initialize(AlignedVector<int> &a) { for (size_t i = 0; i < a.size(); i++) a[i] = 0; } int main() { sycl::queue q(sycl::default_selector_v); VectorAllocator<int> alloc; AlignedVector<int> a(array_size, alloc); AlignedVector<int> b(array_size, alloc); AlignedVector<int> sum(array_size, alloc); InitializeArray(a); InitializeArray(b); std::cout << "Running on device: " << q.get_device().get_info<sycl::info::device::name>() << "\n"; std::cout << "Vector size: " << a.size() << "\n"; // jit the code VectorAdd1(q, a, b, sum, 1); // check results Initialize(sum); VectorAdd1(q, a, b, sum, 1); for (size_t i = 0; i < a.size(); i++) if (sum[i] != static_cast<int>(2 * i)) { std::cout << "add1 Did not match\n"; } Initialize(sum); VectorAdd0(q, a, b, sum, 1); for (size_t i = 0; i < a.size(); i++) if (sum[i] != static_cast<int>(2 * i)) { std::cout << "add0 Did not match\n"; } Initialize(sum); VectorAdd2(q, a, b, sum, 1000); Initialize(sum); VectorAdd3(q, a, b, sum, 1000); return 0; }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/memory-movement/align.hpp
//============================================================== // Copyright © 2022 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= #ifndef __ALIGN #define __ALIGN 1 enum class Alignment : size_t { Normal = sizeof(void *), SSE = 16, AVX = 32, PAGE = 4096, }; namespace detail { void *allocate_aligned_memory(size_t align, size_t size); void deallocate_aligned_memory(void *ptr) noexcept; } // namespace detail template <typename T, Alignment Align = Alignment::PAGE> class AlignedAllocator; template <Alignment Align> class AlignedAllocator<void, Align> { public: typedef void *pointer; typedef const void *const_pointer; typedef void value_type; template <class U> struct rebind { typedef AlignedAllocator<U, Align> other; }; }; template <typename T, Alignment Align> class AlignedAllocator { public: typedef T value_type; typedef T *pointer; typedef const T *const_pointer; typedef T &reference; typedef const T &const_reference; typedef size_t size_type; typedef ptrdiff_t difference_type; typedef std::true_type propagate_on_container_move_assignment; template <class U> struct rebind { typedef AlignedAllocator<U, Align> other; }; public: AlignedAllocator() noexcept {} template <class U> AlignedAllocator(const AlignedAllocator<U, Align> &) noexcept {} size_type max_size() const noexcept { return (size_type(~0) - size_type(Align)) / sizeof(T); } pointer address(reference x) const noexcept { return std::addressof(x); } const_pointer address(const_reference x) const noexcept { return std::addressof(x); } pointer allocate(size_type n, typename AlignedAllocator<void, Align>::const_pointer = 0) { const size_type alignment = static_cast<size_type>(Align); void *ptr = detail::allocate_aligned_memory(alignment, n * sizeof(T)); if (ptr == nullptr) { throw std::bad_alloc(); } return reinterpret_cast<pointer>(ptr); } void deallocate(pointer p, size_type) noexcept { return detail::deallocate_aligned_memory(p); } template <class U, class... Args> void construct(U *p, Args &&...args) { ::new (reinterpret_cast<void *>(p)) U(std::forward<Args>(args)...); } void destroy(pointer p) { p->~T(); } }; template <typename T, Alignment Align> class AlignedAllocator<const T, Align> { public: typedef T value_type; typedef const T *pointer; typedef const T *const_pointer; typedef const T &reference; typedef const T &const_reference; typedef size_t size_type; typedef ptrdiff_t difference_type; typedef std::true_type propagate_on_container_move_assignment; template <class U> struct rebind { typedef AlignedAllocator<U, Align> other; }; public: AlignedAllocator() noexcept {} template <class U> AlignedAllocator(const AlignedAllocator<U, Align> &) noexcept {} size_type max_size() const noexcept { return (size_type(~0) - size_type(Align)) / sizeof(T); } const_pointer address(const_reference x) const noexcept { return std::addressof(x); } pointer allocate(size_type n, typename AlignedAllocator<void, Align>::const_pointer = 0) { const size_type alignment = static_cast<size_type>(Align); void *ptr = detail::allocate_aligned_memory(alignment, n * sizeof(T)); if (ptr == nullptr) { throw std::bad_alloc(); } return reinterpret_cast<pointer>(ptr); } void deallocate(pointer p, size_type) noexcept { return detail::deallocate_aligned_memory(p); } template <class U, class... Args> void construct(U *p, Args &&...args) { ::new (reinterpret_cast<void *>(p)) U(std::forward<Args>(args)...); } void destroy(pointer p) { p->~T(); } }; template <typename T, Alignment TAlign, typename U, Alignment UAlign> inline bool operator==(const AlignedAllocator<T, TAlign> &, const AlignedAllocator<U, UAlign> &) noexcept { return TAlign == UAlign; } template <typename T, Alignment TAlign, typename U, Alignment UAlign> inline bool operator!=(const AlignedAllocator<T, TAlign> &, const AlignedAllocator<U, UAlign> &) noexcept { return TAlign != UAlign; } void *detail::allocate_aligned_memory(size_t align, size_t size) { assert(align >= sizeof(void *)); // assert(nail::is_power_of_two(align)); if (size == 0) { return nullptr; } void *ptr = nullptr; int rc = posix_memalign(&ptr, align, size); if (rc != 0) { return nullptr; } return ptr; } void detail::deallocate_aligned_memory(void *ptr) noexcept { return free(ptr); } #endif
hpp
oneAPI-samples
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/host-device-memory/mem-move.cpp
//============================================================== // Copyright © 2022 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= #include <array> #include <chrono> #include <iostream> #include <CL/sycl.hpp> #include "align.hpp" sycl::default_selector d_selector; template <typename T> using VectorAllocator = AlignedAllocator<T>; template <typename T> using AlignedVector = std::vector<T, VectorAllocator<T>>; constexpr size_t array_size = (10 * (1 << 20)); class Timer { public: Timer() : start_(std::chrono::steady_clock::now()) {} double Elapsed() { auto now = std::chrono::steady_clock::now(); return std::chrono::duration_cast<Duration>(now - start_).count(); } private: using Duration = std::chrono::duration<double>; std::chrono::steady_clock::time_point start_; }; int check_res(AlignedVector<int> &v) { for (int i = 0; i < v.size(); i += 2) if (v[i] != 24 || v[i + 1] != 2) return 0; return 1; } double myFunc1(sycl::queue &q, AlignedVector<int> &a, AlignedVector<int> &b, AlignedVector<int> &c, AlignedVector<int> &d, AlignedVector<int> &res, int iter) { sycl::range num_items{a.size()}; VectorAllocator<int> alloc; AlignedVector<int> sum(a.size(), alloc); const sycl::property_list props = {sycl::property::buffer::use_host_ptr()}; sycl::buffer a_buf(a, props); sycl::buffer b_buf(b, props); sycl::buffer c_buf(b, props); sycl::buffer d_buf(b, props); sycl::buffer res_buf(res, props); sycl::buffer sum_buf(sum.data(), num_items, props); Timer timer; for (int i = 0; i < iter; i++) { // kernel1 q.submit([&](auto &h) { // Input accessors sycl::accessor a_acc(a_buf, h, sycl::read_only); sycl::accessor b_acc(b_buf, h, sycl::read_only); // Output accessor sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init); h.parallel_for(num_items, [=](auto id) { sum_acc[id] = a_acc[id] + b_acc[id]; }); }); { sycl::host_accessor h_acc(sum_buf); for (int j = 0; j < a.size(); j++) if (h_acc[j] > 10) h_acc[j] = 1; else h_acc[j] = 0; } // kernel2 q.submit([&](auto &h) { // Input accessors sycl::accessor sum_acc(sum_buf, h, sycl::read_only); sycl::accessor c_acc(c_buf, h, sycl::read_only); sycl::accessor d_acc(d_buf, h, sycl::read_only); // Output accessor sycl::accessor res_acc(res_buf, h, sycl::write_only, sycl::no_init); h.parallel_for(num_items, [=](auto id) { res_acc[id] = sum_acc[id] * c_acc[id] + d_acc[id]; }); }); q.wait(); } double elapsed = timer.Elapsed() / iter; return (elapsed); } // end myFunc1 double myFunc2(sycl::queue &q, AlignedVector<int> &a, AlignedVector<int> &b, AlignedVector<int> &c, AlignedVector<int> &d, AlignedVector<int> &res, int iter) { sycl::range num_items{a.size()}; VectorAllocator<int> alloc; AlignedVector<int> sum(a.size(), alloc); const sycl::property_list props = {sycl::property::buffer::use_host_ptr()}; sycl::buffer a_buf(a, props); sycl::buffer b_buf(b, props); sycl::buffer c_buf(b, props); sycl::buffer d_buf(b, props); sycl::buffer res_buf(res, props); sycl::buffer sum_buf(sum.data(), num_items, props); Timer timer; for (int i = 0; i < iter; i++) { // kernel1 q.submit([&](auto &h) { // Input accessors sycl::accessor a_acc(a_buf, h, sycl::read_only); sycl::accessor b_acc(b_buf, h, sycl::read_only); // Output accessor sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init); h.parallel_for(num_items, [=](auto i) { sum_acc[i] = a_acc[i] + b_acc[i]; }); }); // kernel3 q.submit([&](auto &h) { sycl::accessor sum_acc(sum_buf, h, sycl::read_write); h.parallel_for(num_items, [=](auto id) { if (sum_acc[id] > 10) sum_acc[id] = 1; else sum_acc[id] = 0; }); }); // kernel2 q.submit([&](auto &h) { // Input accessors sycl::accessor sum_acc(sum_buf, h, sycl::read_only); sycl::accessor c_acc(c_buf, h, sycl::read_only); sycl::accessor d_acc(d_buf, h, sycl::read_only); // Output accessor sycl::accessor res_acc(res_buf, h, sycl::write_only, sycl::no_init); h.parallel_for(num_items, [=](auto i) { res_acc[i] = sum_acc[i] * c_acc[i] + d_acc[i]; }); }); q.wait(); } double elapsed = timer.Elapsed() / iter; return (elapsed); } // end myFunc2 double myFunc3(sycl::queue &q, AlignedVector<int> &a, AlignedVector<int> &b, AlignedVector<int> &c, AlignedVector<int> &d, AlignedVector<int> &res, int iter) { sycl::range num_items{a.size()}; VectorAllocator<int> alloc; AlignedVector<int> sum(a.size(), alloc); const sycl::property_list props = {sycl::property::buffer::use_host_ptr()}; sycl::buffer a_buf(a, props); sycl::buffer b_buf(b, props); sycl::buffer c_buf(b, props); sycl::buffer d_buf(b, props); sycl::buffer res_buf(res, props); sycl::buffer sum_buf(sum.data(), num_items, props); Timer timer; for (int i = 0; i < iter; i++) { // kernel1 q.submit([&](auto &h) { // Input accessors sycl::accessor a_acc(a_buf, h, sycl::read_only); sycl::accessor b_acc(b_buf, h, sycl::read_only); // Output accessor sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init); h.parallel_for(num_items, [=](auto i) { int t = a_acc[i] + b_acc[i]; if (t > 10) sum_acc[i] = 1; else sum_acc[i] = 0; }); }); // kernel2 q.submit([&](auto &h) { // Input accessors sycl::accessor sum_acc(sum_buf, h, sycl::read_only); sycl::accessor c_acc(c_buf, h, sycl::read_only); sycl::accessor d_acc(d_buf, h, sycl::read_only); // Output accessor sycl::accessor res_acc(res_buf, h, sycl::write_only, sycl::no_init); h.parallel_for(num_items, [=](auto i) { res_acc[i] = sum_acc[i] * c_acc[i] + d_acc[i]; }); }); q.wait(); } double elapsed = timer.Elapsed() / iter; return (elapsed); } // end myFunc3 double myFunc4(sycl::queue &q, AlignedVector<int> &a, AlignedVector<int> &b, AlignedVector<int> &c, AlignedVector<int> &d, AlignedVector<int> &res, int iter) { sycl::range num_items{a.size()}; VectorAllocator<int> alloc; const sycl::property_list props = {sycl::property::buffer::use_host_ptr()}; sycl::buffer a_buf(a, props); sycl::buffer b_buf(b, props); sycl::buffer c_buf(b, props); sycl::buffer d_buf(b, props); sycl::buffer res_buf(res, props); Timer timer; for (int i = 0; i < iter; i++) { // kernel1 q.submit([&](auto &h) { // Input accessors sycl::accessor a_acc(a_buf, h, sycl::read_only); sycl::accessor b_acc(b_buf, h, sycl::read_only); sycl::accessor c_acc(c_buf, h, sycl::read_only); sycl::accessor d_acc(d_buf, h, sycl::read_only); // Output accessor sycl::accessor res_acc(res_buf, h, sycl::write_only, sycl::no_init); h.parallel_for(num_items, [=](auto i) { int t = a_acc[i] + b_acc[i]; if (t > 10) res_acc[i] = c_acc[i] + d_acc[i]; else res_acc[i] = d_acc[i]; }); }); q.wait(); } double elapsed = timer.Elapsed() / iter; return (elapsed); } // end myFunc4 void InitializeArray(AlignedVector<int> &a) { for (size_t i = 0; i < a.size(); i += 2) a[i] = 12; for (size_t i = 1; i < a.size(); i += 2) a[i] = 2; } void Initialize(AlignedVector<int> &a) { for (size_t i = 0; i < a.size(); i++) a[i] = 0; } int main() { sycl::queue q(d_selector); VectorAllocator<int> alloc; AlignedVector<int> a(array_size, alloc); AlignedVector<int> b(array_size, alloc); AlignedVector<int> c(array_size, alloc); AlignedVector<int> d(array_size, alloc); AlignedVector<int> res(array_size, alloc); InitializeArray(a); InitializeArray(b); InitializeArray(c); InitializeArray(d); std::cout << "Running on device: " << q.get_device().get_info<sycl::info::device::name>() << "\n"; std::cout << "Vector size: " << a.size() << "\n"; // jit the code myFunc1(q, a, b, c, d, res, 1); // check results Initialize(res); double elapsed = myFunc1(q, a, b, c, d, res, 1); if (check_res(res)) std::cout << "SUCCESS: Time myFunc1 = " << elapsed << "s\n"; else std::cout << "ERROR: myFunc1 result did not match expected result\n"; elapsed = myFunc2(q, a, b, c, d, res, 1); if (check_res(res)) std::cout << "SUCCESS: Time myFunc2 = " << elapsed << "s\n"; else std::cout << "ERROR: myFunc1 result did not match expected result\n"; elapsed = myFunc3(q, a, b, c, d, res, 1); if (check_res(res)) std::cout << "SUCCESS: Time myFunc3 = " << elapsed << "s\n"; else std::cout << "ERROR: myFunc1 result did not match expected result\n"; elapsed = myFunc4(q, a, b, c, d, res, 1); if (check_res(res)) std::cout << "SUCCESS: Time myFunc4 = " << elapsed << "s\n"; else std::cout << "ERROR: myFunc1 result did not match expected result\n"; return 0; }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/host-device-memory/align.hpp
//============================================================== // Copyright © 2022 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= #ifndef __ALIGN #define __ALIGN 1 enum class Alignment : size_t { Normal = sizeof(void *), SSE = 16, AVX = 32, PAGE = 4096, }; namespace detail { void *allocate_aligned_memory(size_t align, size_t size); void deallocate_aligned_memory(void *ptr) noexcept; } // namespace detail template <typename T, Alignment Align = Alignment::PAGE> class AlignedAllocator; template <Alignment Align> class AlignedAllocator<void, Align> { public: typedef void *pointer; typedef const void *const_pointer; typedef void value_type; template <class U> struct rebind { typedef AlignedAllocator<U, Align> other; }; }; template <typename T, Alignment Align> class AlignedAllocator { public: typedef T value_type; typedef T *pointer; typedef const T *const_pointer; typedef T &reference; typedef const T &const_reference; typedef size_t size_type; typedef ptrdiff_t difference_type; typedef std::true_type propagate_on_container_move_assignment; template <class U> struct rebind { typedef AlignedAllocator<U, Align> other; }; public: AlignedAllocator() noexcept {} template <class U> AlignedAllocator(const AlignedAllocator<U, Align> &) noexcept {} size_type max_size() const noexcept { return (size_type(~0) - size_type(Align)) / sizeof(T); } pointer address(reference x) const noexcept { return std::addressof(x); } const_pointer address(const_reference x) const noexcept { return std::addressof(x); } pointer allocate(size_type n, typename AlignedAllocator<void, Align>::const_pointer = 0) { const size_type alignment = static_cast<size_type>(Align); void *ptr = detail::allocate_aligned_memory(alignment, n * sizeof(T)); if (ptr == nullptr) { throw std::bad_alloc(); } return reinterpret_cast<pointer>(ptr); } void deallocate(pointer p, size_type) noexcept { return detail::deallocate_aligned_memory(p); } template <class U, class... Args> void construct(U *p, Args &&...args) { ::new (reinterpret_cast<void *>(p)) U(std::forward<Args>(args)...); } void destroy(pointer p) { p->~T(); } }; template <typename T, Alignment Align> class AlignedAllocator<const T, Align> { public: typedef T value_type; typedef const T *pointer; typedef const T *const_pointer; typedef const T &reference; typedef const T &const_reference; typedef size_t size_type; typedef ptrdiff_t difference_type; typedef std::true_type propagate_on_container_move_assignment; template <class U> struct rebind { typedef AlignedAllocator<U, Align> other; }; public: AlignedAllocator() noexcept {} template <class U> AlignedAllocator(const AlignedAllocator<U, Align> &) noexcept {} size_type max_size() const noexcept { return (size_type(~0) - size_type(Align)) / sizeof(T); } const_pointer address(const_reference x) const noexcept { return std::addressof(x); } pointer allocate(size_type n, typename AlignedAllocator<void, Align>::const_pointer = 0) { const size_type alignment = static_cast<size_type>(Align); void *ptr = detail::allocate_aligned_memory(alignment, n * sizeof(T)); if (ptr == nullptr) { throw std::bad_alloc(); } return reinterpret_cast<pointer>(ptr); } void deallocate(pointer p, size_type) noexcept { return detail::deallocate_aligned_memory(p); } template <class U, class... Args> void construct(U *p, Args &&...args) { ::new (reinterpret_cast<void *>(p)) U(std::forward<Args>(args)...); } void destroy(pointer p) { p->~T(); } }; template <typename T, Alignment TAlign, typename U, Alignment UAlign> inline bool operator==(const AlignedAllocator<T, TAlign> &, const AlignedAllocator<U, UAlign> &) noexcept { return TAlign == UAlign; } template <typename T, Alignment TAlign, typename U, Alignment UAlign> inline bool operator!=(const AlignedAllocator<T, TAlign> &, const AlignedAllocator<U, UAlign> &) noexcept { return TAlign != UAlign; } void *detail::allocate_aligned_memory(size_t align, size_t size) { assert(align >= sizeof(void *)); // assert(nail::is_power_of_two(align)); if (size == 0) { return nullptr; } void *ptr = nullptr; int rc = posix_memalign(&ptr, align, size); if (rc != 0) { return nullptr; } return ptr; } void detail::deallocate_aligned_memory(void *ptr) noexcept { return free(ptr); } #endif
hpp
oneAPI-samples
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/restrict/vec-add-restrict.cpp
//============================================================== // Copyright © 2022 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= #include <CL/sycl.hpp> #include <array> #include <chrono> #include <iostream> // Array type and data size for this example. constexpr size_t array_size = 3 * 5 * 7 * (1 << 17); typedef std::array<int, array_size> IntArray; #define mysize (1 << 17) size_t VectorAdd(sycl::queue &q, const IntArray &a, const IntArray &b, IntArray &sum, int iter) { sycl::range num_items{a.size()}; sycl::buffer a_buf(a); sycl::buffer b_buf(b); sycl::buffer sum_buf(sum.data(), num_items); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < iter; i++) { auto e = q.submit([&](auto &h) { // Input accessors sycl::accessor a_acc(a_buf, h, sycl::read_only); sycl::accessor b_acc(b_buf, h, sycl::read_only); // Output accessor sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init); h.parallel_for(num_items, [=](auto i) { sum_acc[i] = a_acc[i] + b_acc[i]; }); }); } q.wait(); auto end = std::chrono::steady_clock::now(); std::cout << "Vector add completed on device - took " << (end - start).count() << " u-secs\n"; return ((end - start).count()); } // end VectorAdd size_t VectorAdd1(sycl::queue &q, const IntArray &a, const IntArray &b, IntArray &sum, int iter) { sycl::range num_items{a.size()}; sycl::buffer a_buf(a); sycl::buffer b_buf(b); sycl::buffer sum_buf(sum.data(), num_items); size_t num_groups = 1; size_t wg_size = 16; auto start = std::chrono::steady_clock::now(); for (int i = 0; i < iter; i++) { q.submit([&](auto &h) { // Input accessors sycl::accessor a_acc(a_buf, h, sycl::read_only); sycl::accessor b_acc(b_buf, h, sycl::read_only); // Output accessor sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init); h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size), [=](sycl::nd_item<1> index) [[intel::reqd_sub_group_size(16)]] { // no unrolling size_t loc_id = index.get_local_id(); for (size_t i = loc_id; i < mysize; i += wg_size) { sum_acc[i] = a_acc[i] + b_acc[i]; } }); }); } q.wait(); auto end = std::chrono::steady_clock::now(); std::cout << "Vector add1 completed on device - took " << (end - start).count() << " u-secs\n"; return ((end - start).count()); } // end VectorAdd1 size_t VectorAdd2(sycl::queue &q, const IntArray &a, const IntArray &b, IntArray &sum, int iter) { sycl::range num_items{a.size()}; sycl::buffer a_buf(a); sycl::buffer b_buf(b); sycl::buffer sum_buf(sum.data(), num_items); // size_t num_groups = // q.get_device().get_info<sycl::info::device::max_compute_units>(); size_t // wg_size = // q.get_device().get_info<sycl::info::device::max_work_group_size>(); size_t num_groups = 1; size_t wg_size = 16; auto start = std::chrono::steady_clock::now(); for (int i = 0; i < iter; i++) { q.submit([&](auto &h) { // Input accessors sycl::accessor a_acc(a_buf, h, sycl::read_only); sycl::accessor b_acc(b_buf, h, sycl::read_only); // Output accessor sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init); h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size), [=](sycl::nd_item<1> index) [[intel::reqd_sub_group_size( 16)]] [[intel::kernel_args_restrict]] { size_t loc_id = index.get_local_id(); // unroll with a directive #pragma unroll(2) for (size_t i = loc_id; i < mysize; i += wg_size) { sum_acc[i] = a_acc[i] + b_acc[i]; } }); }); } q.wait(); auto end = std::chrono::steady_clock::now(); std::cout << "Vector add2 completed on device - took " << (end - start).count() << " u-secs\n"; return ((end - start).count()); } // end VectorAdd2 size_t VectorAdd3(sycl::queue &q, const IntArray &a, const IntArray &b, IntArray &sum, int iter) { sycl::range num_items{a.size()}; sycl::buffer a_buf(a); sycl::buffer b_buf(b); sycl::buffer sum_buf(sum.data(), num_items); size_t num_groups = 1; size_t wg_size = 16; auto start = std::chrono::steady_clock::now(); for (int i = 0; i < iter; i++) { q.submit([&](auto &h) { // Input accessors sycl::accessor a_acc(a_buf, h, sycl::read_only); sycl::accessor b_acc(b_buf, h, sycl::read_only); // Output accessor sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init); h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size), [=](sycl::nd_item<1> index) [[intel::reqd_sub_group_size(16)]] { // Manual unrolling size_t loc_id = index.get_local_id(); for (size_t i = loc_id; i < mysize; i += 32) { sum_acc[i] = a_acc[i] + b_acc[i]; sum_acc[i + 16] = a_acc[i + 16] + b_acc[i + 16]; } }); }); } q.wait(); auto end = std::chrono::steady_clock::now(); std::cout << "Vector add3 completed on device - took " << (end - start).count() << " u-secs\n"; return ((end - start).count()); } // end VectorAdd3 size_t VectorAdd4(sycl::queue &q, const IntArray &a, const IntArray &b, IntArray &sum, int iter) { sycl::range num_items{a.size()}; sycl::buffer a_buf(a); sycl::buffer b_buf(b); sycl::buffer sum_buf(sum.data(), num_items); size_t num_groups = 1; size_t wg_size = 16; auto start = std::chrono::steady_clock::now(); for (int i = 0; i < iter; i++) { q.submit([&](auto &h) { // Input accessors sycl::accessor a_acc(a_buf, h, sycl::read_only); sycl::accessor b_acc(b_buf, h, sycl::read_only); // Output accessor sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init); h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size), [=](sycl::nd_item<1> index) [[intel::reqd_sub_group_size(16)]] { // Manual unrolling size_t loc_id = index.get_local_id(); for (size_t i = loc_id; i < mysize; i += 32) { int t1 = a_acc[i]; int t2 = b_acc[i]; int t3 = a_acc[i + 16]; int t4 = b_acc[i + 16]; sum_acc[i] = t1 + t2; sum_acc[i + 16] = t3 + t4; } }); }); } q.wait(); auto end = std::chrono::steady_clock::now(); std::cout << "Vector add4 completed on device - took " << (end - start).count() << " u-secs\n"; return ((end - start).count()); } // end VectorAdd4 size_t VectorAdd5(sycl::queue &q, const IntArray &a, const IntArray &b, IntArray &sum, int iter) { sycl::range num_items{a.size()}; sycl::buffer a_buf(a); sycl::buffer b_buf(b); sycl::buffer sum_buf(sum.data(), num_items); size_t num_groups = 1; size_t wg_size = 16; auto start = std::chrono::steady_clock::now(); for (int i = 0; i < iter; i++) { q.submit([&](auto &h) { // Input accessors sycl::accessor a_acc(a_buf, h, sycl::read_only); sycl::accessor b_acc(b_buf, h, sycl::read_only); // Output accessor sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init); h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size), [=](sycl::nd_item<1> index) [[intel::reqd_sub_group_size( 16)]] [[intel::kernel_args_restrict]] { // compiler needs to hoist the loads size_t loc_id = index.get_local_id(); for (size_t i = loc_id; i < mysize; i += 32) { sum_acc[i] = a_acc[i] + b_acc[i]; sum_acc[i + 16] = a_acc[i + 16] + b_acc[i + 16]; } }); }); } q.wait(); auto end = std::chrono::steady_clock::now(); std::cout << "Vector add5 completed on device - took " << (end - start).count() << " u-secs\n"; return ((end - start).count()); } // end VectorAdd5 void InitializeArray(IntArray &a) { for (size_t i = 0; i < a.size(); i++) a[i] = i; } void Initialize(IntArray &a) { for (size_t i = 0; i < a.size(); i++) a[i] = 0; } IntArray a, b, sum; int main() { sycl::queue q(sycl::default_selector_v); InitializeArray(a); InitializeArray(b); std::cout << "Running on device: " << q.get_device().get_info<sycl::info::device::name>() << "\n"; std::cout << "Vector size: " << a.size() << "\n"; // jit the code VectorAdd(q, a, b, sum, 1000); // check results Initialize(sum); VectorAdd1(q, a, b, sum, 1); for (int i = 0; i < mysize; i++) if (sum[i] != 2 * i) { std::cout << "add1 Did not match\n"; } Initialize(sum); VectorAdd2(q, a, b, sum, 1); for (int i = 0; i < mysize; i++) if (sum[i] != 2 * i) { std::cout << "add2 Did not match\n"; } // time the kernels Initialize(sum); VectorAdd3(q, a, b, sum, 1); for (int i = 0; i < mysize; i++) if (sum[i] != 2 * i) { std::cout << "add3 Did not match " << "sum[" << i << "]=" << sum[i] << "\n"; } Initialize(sum); VectorAdd5(q, a, b, sum, 1); for (int i = 0; i < mysize; i++) if (sum[i] != 2 * i) { std::cout << "add5 Did not match " << "sum[" << i << "]=" << sum[i] << "\n"; } Initialize(sum); VectorAdd1(q, a, b, sum, 1000); Initialize(sum); VectorAdd2(q, a, b, sum, 1000); Initialize(sum); VectorAdd3(q, a, b, sum, 1000); Initialize(sum); VectorAdd4(q, a, b, sum, 1000); Initialize(sum); VectorAdd5(q, a, b, sum, 1000); return 0; }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/sub-group/sub-group-0.cpp
//============================================================== // Copyright © 2022 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= #include <CL/sycl.hpp> #include <iostream> int main() { sycl::queue q{sycl::gpu_selector_v}; std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n"; // Snippet begin q.submit([&](auto &h) { sycl::stream out(65536, 256, h); h.parallel_for(sycl::nd_range(sycl::range{32}, sycl::range{32}), [=](sycl::nd_item<1> it) { int groupId = it.get_group(0); int globalId = it.get_global_linear_id(); auto sg = it.get_sub_group(); int sgSize = sg.get_local_range()[0]; int sgGroupId = sg.get_group_id()[0]; int sgId = sg.get_local_id()[0]; out << "globalId = " << sycl::setw(2) << globalId << " groupId = " << groupId << " sgGroupId = " << sgGroupId << " sgId = " << sgId << " sgSize = " << sycl::setw(2) << sgSize << sycl::endl; }); }); // Snippet end q.wait(); return 0; }
cpp
oneAPI-samples
data/projects/oneAPI-samples/Publications/GPU-Opt-Guide/sub-group/sub-group-4.cpp
//============================================================== // Copyright © 2022 Intel Corporation // // SPDX-License-Identifier: MIT // ============================================================= #include <CL/sycl.hpp> #include <iostream> int main() { sycl::queue q{sycl::gpu_selector_v, sycl::property::queue::enable_profiling{}}; std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << std::endl; // Snippet begin constexpr int N = 1024 * 1024; int *data = sycl::malloc_shared<int>(N, q); int *data2 = sycl::malloc_shared<int>(N, q); memset(data2, 0xFF, sizeof(int) * N); auto e = q.submit([&](auto &h) { h.parallel_for(sycl::nd_range(sycl::range{N / 16}, sycl::range{32}), [=](sycl::nd_item<1> it) { int i = it.get_global_linear_id(); i = i * 16; for (int j = i; j < (i + 16); j++) { data[j] = data2[j]; } }); }); // Snippet end q.wait(); std::cout << "Kernel time = " << (e.template get_profiling_info< sycl::info::event_profiling::command_end>() - e.template get_profiling_info< sycl::info::event_profiling::command_start>()) << " ns" << std::endl; return 0; }
cpp