repo
stringlengths
1
152
file
stringlengths
15
205
code
stringlengths
0
41.6M
file_length
int64
0
41.6M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
90 values
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/core/valgrind/helgrind.h
/* ---------------------------------------------------------------- Notice that the above BSD-style license applies to this one file (helgrind.h) only. The entire rest of Valgrind is licensed under the terms of the GNU General Public License, version 2. See the COPYING file in the source distribution for details. ---------------------------------------------------------------- This file is part of Helgrind, a Valgrind tool for detecting errors in threaded programs. Copyright (C) 2007-2017 OpenWorks LLP [email protected] Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 3. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 4. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------- Notice that the above BSD-style license applies to this one file (helgrind.h) only. The entire rest of Valgrind is licensed under the terms of the GNU General Public License, version 2. See the COPYING file in the source distribution for details. ---------------------------------------------------------------- */ #ifndef __HELGRIND_H #define __HELGRIND_H #include "valgrind.h" /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! This enum comprises an ABI exported by Valgrind to programs which use client requests. DO NOT CHANGE THE ORDER OF THESE ENTRIES, NOR DELETE ANY -- add new ones at the end. */ typedef enum { VG_USERREQ__HG_CLEAN_MEMORY = VG_USERREQ_TOOL_BASE('H','G'), /* The rest are for Helgrind's internal use. Not for end-user use. Do not use them unless you are a Valgrind developer. */ /* Notify the tool what this thread's pthread_t is. */ _VG_USERREQ__HG_SET_MY_PTHREAD_T = VG_USERREQ_TOOL_BASE('H','G') + 256, _VG_USERREQ__HG_PTH_API_ERROR, /* char*, int */ _VG_USERREQ__HG_PTHREAD_JOIN_POST, /* pthread_t of quitter */ _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST, /* pth_mx_t*, long mbRec */ _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE, /* pth_mx_t*, long isInit */ _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE, /* pth_mx_t* */ _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST, /* pth_mx_t* */ _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_PRE, /* void*, long isTryLock */ _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_POST, /* void* */ _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE, /* pth_cond_t* */ _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE, /* pth_cond_t* */ _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE, /* pth_cond_t*, pth_mx_t* */ _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST, /* pth_cond_t*, pth_mx_t* */ _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE, /* pth_cond_t*, long isInit */ _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST, /* pth_rwlk_t* */ _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, /* pth_rwlk_t* */ _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE, /* pth_rwlk_t*, long isW */ _VG_USERREQ__HG_PTHREAD_RWLOCK_ACQUIRED, /* void*, long isW */ _VG_USERREQ__HG_PTHREAD_RWLOCK_RELEASED, /* void* */ _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST, /* pth_rwlk_t* */ _VG_USERREQ__HG_POSIX_SEM_INIT_POST, /* sem_t*, ulong value */ _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE, /* sem_t* */ _VG_USERREQ__HG_POSIX_SEM_RELEASED, /* void* */ _VG_USERREQ__HG_POSIX_SEM_ACQUIRED, /* void* */ _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE, /* pth_bar_t*, ulong, ulong */ _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE, /* pth_bar_t* */ _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE, /* pth_bar_t* */ _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE, /* pth_slk_t* */ _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST, /* pth_slk_t* */ _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE, /* pth_slk_t* */ _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST, /* pth_slk_t* */ _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE, /* pth_slk_t* */ _VG_USERREQ__HG_CLIENTREQ_UNIMP, /* char* */ _VG_USERREQ__HG_USERSO_SEND_PRE, /* arbitrary UWord SO-tag */ _VG_USERREQ__HG_USERSO_RECV_POST, /* arbitrary UWord SO-tag */ _VG_USERREQ__HG_USERSO_FORGET_ALL, /* arbitrary UWord SO-tag */ _VG_USERREQ__HG_RESERVED2, /* Do not use */ _VG_USERREQ__HG_RESERVED3, /* Do not use */ _VG_USERREQ__HG_RESERVED4, /* Do not use */ _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED, /* Addr a, ulong len */ _VG_USERREQ__HG_ARANGE_MAKE_TRACKED, /* Addr a, ulong len */ _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE, /* pth_bar_t*, ulong */ _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK, /* Addr start_of_block */ _VG_USERREQ__HG_PTHREAD_COND_INIT_POST, /* pth_cond_t*, pth_cond_attr_t*/ _VG_USERREQ__HG_GNAT_MASTER_HOOK, /* void*d,void*m,Word ml */ _VG_USERREQ__HG_GNAT_MASTER_COMPLETED_HOOK, /* void*s,Word ml */ _VG_USERREQ__HG_GET_ABITS, /* Addr a,Addr abits, ulong len */ _VG_USERREQ__HG_PTHREAD_CREATE_BEGIN, _VG_USERREQ__HG_PTHREAD_CREATE_END, _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE, /* pth_mx_t*,long isTryLock */ _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST, /* pth_mx_t *,long tookLock */ _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST, /* pth_rwlk_t*,long isW,long */ _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE, /* pth_rwlk_t* */ _VG_USERREQ__HG_POSIX_SEM_POST_PRE, /* sem_t* */ _VG_USERREQ__HG_POSIX_SEM_POST_POST, /* sem_t* */ _VG_USERREQ__HG_POSIX_SEM_WAIT_PRE, /* sem_t* */ _VG_USERREQ__HG_POSIX_SEM_WAIT_POST, /* sem_t*, long tookLock */ _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_POST, /* pth_cond_t* */ _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_POST,/* pth_cond_t* */ _VG_USERREQ__HG_RTLD_BIND_GUARD, /* int flags */ _VG_USERREQ__HG_RTLD_BIND_CLEAR, /* int flags */ _VG_USERREQ__HG_GNAT_DEPENDENT_MASTER_JOIN /* void*d, void*m */ } Vg_TCheckClientRequest; /*----------------------------------------------------------------*/ /*--- ---*/ /*--- Implementation-only facilities. Not for end-user use. ---*/ /*--- For end-user facilities see below (the next section in ---*/ /*--- this file.) ---*/ /*--- ---*/ /*----------------------------------------------------------------*/ /* Do a client request. These are macros rather than a functions so as to avoid having an extra frame in stack traces. NB: these duplicate definitions in hg_intercepts.c. But here, we have to make do with weaker typing (no definition of Word etc) and no assertions, whereas in helgrind.h we can use those facilities. Obviously it's important the two sets of definitions are kept in sync. The commented-out asserts should actually hold, but unfortunately they can't be allowed to be visible here, because that would require the end-user code to #include <assert.h>. */ #define DO_CREQ_v_W(_creqF, _ty1F,_arg1F) \ do { \ long int _arg1; \ /* assert(sizeof(_ty1F) == sizeof(long int)); */ \ _arg1 = (long int)(_arg1F); \ VALGRIND_DO_CLIENT_REQUEST_STMT( \ (_creqF), \ _arg1, 0,0,0,0); \ } while (0) #define DO_CREQ_W_W(_resF, _dfltF, _creqF, _ty1F,_arg1F) \ do { \ long int _arg1; \ /* assert(sizeof(_ty1F) == sizeof(long int)); */ \ _arg1 = (long int)(_arg1F); \ _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR( \ (_dfltF), \ (_creqF), \ _arg1, 0,0,0,0); \ _resF = _qzz_res; \ } while (0) #define DO_CREQ_v_WW(_creqF, _ty1F,_arg1F, _ty2F,_arg2F) \ do { \ long int _arg1, _arg2; \ /* assert(sizeof(_ty1F) == sizeof(long int)); */ \ /* assert(sizeof(_ty2F) == sizeof(long int)); */ \ _arg1 = (long int)(_arg1F); \ _arg2 = (long int)(_arg2F); \ VALGRIND_DO_CLIENT_REQUEST_STMT( \ (_creqF), \ _arg1,_arg2,0,0,0); \ } while (0) #define DO_CREQ_v_WWW(_creqF, _ty1F,_arg1F, \ _ty2F,_arg2F, _ty3F, _arg3F) \ do { \ long int _arg1, _arg2, _arg3; \ /* assert(sizeof(_ty1F) == sizeof(long int)); */ \ /* assert(sizeof(_ty2F) == sizeof(long int)); */ \ /* assert(sizeof(_ty3F) == sizeof(long int)); */ \ _arg1 = (long int)(_arg1F); \ _arg2 = (long int)(_arg2F); \ _arg3 = (long int)(_arg3F); \ VALGRIND_DO_CLIENT_REQUEST_STMT( \ (_creqF), \ _arg1,_arg2,_arg3,0,0); \ } while (0) #define DO_CREQ_W_WWW(_resF, _dfltF, _creqF, _ty1F,_arg1F, \ _ty2F,_arg2F, _ty3F, _arg3F) \ do { \ long int _qzz_res; \ long int _arg1, _arg2, _arg3; \ /* assert(sizeof(_ty1F) == sizeof(long int)); */ \ _arg1 = (long int)(_arg1F); \ _arg2 = (long int)(_arg2F); \ _arg3 = (long int)(_arg3F); \ /* \ * XXX: here PMDK's version deviates from upstream;\ * without the fix, this code generates \ * a sign-conversion warning, which PMDK's \ * "awesome" build system promotes to an error \ */ \ _qzz_res = (long)VALGRIND_DO_CLIENT_REQUEST_EXPR( \ (_dfltF), \ (_creqF), \ _arg1,_arg2,_arg3,0,0); \ _resF = _qzz_res; \ } while (0) #define _HG_CLIENTREQ_UNIMP(_qzz_str) \ DO_CREQ_v_W(_VG_USERREQ__HG_CLIENTREQ_UNIMP, \ (char*),(_qzz_str)) /*----------------------------------------------------------------*/ /*--- ---*/ /*--- Helgrind-native requests. These allow access to ---*/ /*--- the same set of annotation primitives that are used ---*/ /*--- to build the POSIX pthread wrappers. ---*/ /*--- ---*/ /*----------------------------------------------------------------*/ /* ---------------------------------------------------------- For describing ordinary mutexes (non-rwlocks). For rwlock descriptions see ANNOTATE_RWLOCK_* below. ---------------------------------------------------------- */ /* Notify here immediately after mutex creation. _mbRec == 0 for a non-recursive mutex, 1 for a recursive mutex. */ #define VALGRIND_HG_MUTEX_INIT_POST(_mutex, _mbRec) \ DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST, \ void*,(_mutex), long,(_mbRec)) /* Notify here immediately before mutex acquisition. _isTryLock == 0 for a normal acquisition, 1 for a "try" style acquisition. */ #define VALGRIND_HG_MUTEX_LOCK_PRE(_mutex, _isTryLock) \ DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_PRE, \ void*,(_mutex), long,(_isTryLock)) /* Notify here immediately after a successful mutex acquisition. */ #define VALGRIND_HG_MUTEX_LOCK_POST(_mutex) \ DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_POST, \ void*,(_mutex)) /* Notify here immediately before a mutex release. */ #define VALGRIND_HG_MUTEX_UNLOCK_PRE(_mutex) \ DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE, \ void*,(_mutex)) /* Notify here immediately after a mutex release. */ #define VALGRIND_HG_MUTEX_UNLOCK_POST(_mutex) \ DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST, \ void*,(_mutex)) /* Notify here immediately before mutex destruction. */ #define VALGRIND_HG_MUTEX_DESTROY_PRE(_mutex) \ DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE, \ void*,(_mutex)) /* ---------------------------------------------------------- For describing semaphores. ---------------------------------------------------------- */ /* Notify here immediately after semaphore creation. */ #define VALGRIND_HG_SEM_INIT_POST(_sem, _value) \ DO_CREQ_v_WW(_VG_USERREQ__HG_POSIX_SEM_INIT_POST, \ void*, (_sem), unsigned long, (_value)) /* Notify here immediately after a semaphore wait (an acquire-style operation) */ #define VALGRIND_HG_SEM_WAIT_POST(_sem) \ DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_ACQUIRED, \ void*,(_sem)) /* Notify here immediately before semaphore post (a release-style operation) */ #define VALGRIND_HG_SEM_POST_PRE(_sem) \ DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_RELEASED, \ void*,(_sem)) /* Notify here immediately before semaphore destruction. */ #define VALGRIND_HG_SEM_DESTROY_PRE(_sem) \ DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE, \ void*, (_sem)) /* ---------------------------------------------------------- For describing barriers. ---------------------------------------------------------- */ /* Notify here immediately before barrier creation. _count is the capacity. _resizable == 0 means the barrier may not be resized, 1 means it may be. */ #define VALGRIND_HG_BARRIER_INIT_PRE(_bar, _count, _resizable) \ DO_CREQ_v_WWW(_VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE, \ void*,(_bar), \ unsigned long,(_count), \ unsigned long,(_resizable)) /* Notify here immediately before arrival at a barrier. */ #define VALGRIND_HG_BARRIER_WAIT_PRE(_bar) \ DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE, \ void*,(_bar)) /* Notify here immediately before a resize (change of barrier capacity). If _newcount >= the existing capacity, then there is no change in the state of any threads waiting at the barrier. If _newcount < the existing capacity, and >= _newcount threads are currently waiting at the barrier, then this notification is considered to also have the effect of telling the checker that all waiting threads have now moved past the barrier. (I can't think of any other sane semantics.) */ #define VALGRIND_HG_BARRIER_RESIZE_PRE(_bar, _newcount) \ DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE, \ void*,(_bar), \ unsigned long,(_newcount)) /* Notify here immediately before barrier destruction. */ #define VALGRIND_HG_BARRIER_DESTROY_PRE(_bar) \ DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE, \ void*,(_bar)) /* ---------------------------------------------------------- For describing memory ownership changes. ---------------------------------------------------------- */ /* Clean memory state. This makes Helgrind forget everything it knew about the specified memory range. Effectively this announces that the specified memory range now "belongs" to the calling thread, so that: (1) the calling thread can access it safely without synchronisation, and (2) all other threads must sync with this one to access it safely. This is particularly useful for memory allocators that wish to recycle memory. */ #define VALGRIND_HG_CLEAN_MEMORY(_qzz_start, _qzz_len) \ DO_CREQ_v_WW(VG_USERREQ__HG_CLEAN_MEMORY, \ void*,(_qzz_start), \ unsigned long,(_qzz_len)) /* The same, but for the heap block starting at _qzz_blockstart. This allows painting when we only know the address of an object, but not its size, which is sometimes the case in C++ code involving inheritance, and in which RTTI is not, for whatever reason, available. Returns the number of bytes painted, which can be zero for a zero-sized block. Hence, return values >= 0 indicate success (the block was found), and the value -1 indicates block not found, and -2 is returned when not running on Helgrind. */ #define VALGRIND_HG_CLEAN_MEMORY_HEAPBLOCK(_qzz_blockstart) \ (__extension__ \ ({long int _npainted; \ DO_CREQ_W_W(_npainted, (-2)/*default*/, \ _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK, \ void*,(_qzz_blockstart)); \ _npainted; \ })) /* ---------------------------------------------------------- For error control. ---------------------------------------------------------- */ /* Tell H that an address range is not to be "tracked" until further notice. This puts it in the NOACCESS state, in which case we ignore all reads and writes to it. Useful for ignoring ranges of memory where there might be races we don't want to see. If the memory is subsequently reallocated via malloc/new/stack allocation, then it is put back in the trackable state. Hence it is safe in the situation where checking is disabled, the containing area is deallocated and later reallocated for some other purpose. */ #define VALGRIND_HG_DISABLE_CHECKING(_qzz_start, _qzz_len) \ DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED, \ void*,(_qzz_start), \ unsigned long,(_qzz_len)) /* And put it back into the normal "tracked" state, that is, make it once again subject to the normal race-checking machinery. This puts it in the same state as new memory allocated by this thread -- that is, basically owned exclusively by this thread. */ #define VALGRIND_HG_ENABLE_CHECKING(_qzz_start, _qzz_len) \ DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_TRACKED, \ void*,(_qzz_start), \ unsigned long,(_qzz_len)) /* Checks the accessibility bits for addresses [zza..zza+zznbytes-1]. If zzabits array is provided, copy the accessibility bits in zzabits. Return values: -2 if not running on helgrind -1 if any parts of zzabits is not addressable >= 0 : success. When success, it returns the nr of addressable bytes found. So, to check that a whole range is addressable, check VALGRIND_HG_GET_ABITS(addr,NULL,len) == len In addition, if you want to examine the addressability of each byte of the range, you need to provide a non NULL ptr as second argument, pointing to an array of unsigned char of length len. Addressable bytes are indicated with 0xff. Non-addressable bytes are indicated with 0x00. */ #define VALGRIND_HG_GET_ABITS(zza,zzabits,zznbytes) \ (__extension__ \ ({long int _res; \ /* \ * XXX: here PMDK's version deviates from upstream; \ * without the fix, this macro doesn't return \ * the default value correctly \ */ \ DO_CREQ_W_WWW(_res, (-2LL)/*default*/, \ _VG_USERREQ__HG_GET_ABITS, \ void*,(zza), void*,(zzabits), \ unsigned long,(zznbytes)); \ _res; \ })) /* End-user request for Ada applications compiled with GNAT. Helgrind understands the Ada concept of Ada task dependencies and terminations. See Ada Reference Manual section 9.3 "Task Dependence - Termination of Tasks". However, in some cases, the master of (terminated) tasks completes only when the application exits. An example of this is dynamically allocated tasks with an access type defined at Library Level. By default, the state of such tasks in Helgrind will be 'exited but join not done yet'. Many tasks in such a state are however causing Helgrind CPU and memory to increase significantly. VALGRIND_HG_GNAT_DEPENDENT_MASTER_JOIN can be used to indicate to Helgrind that a not yet completed master has however already 'seen' the termination of a dependent : this is conceptually the same as a pthread_join and causes the cleanup of the dependent as done by Helgrind when a master completes. This allows to avoid the overhead in helgrind caused by such tasks. A typical usage for a master to indicate it has done conceptually a join with a dependent task before the master completes is: while not Dep_Task'Terminated loop ... do whatever to wait for Dep_Task termination. end loop; VALGRIND_HG_GNAT_DEPENDENT_MASTER_JOIN (Dep_Task'Identity, Ada.Task_Identification.Current_Task); Note that VALGRIND_HG_GNAT_DEPENDENT_MASTER_JOIN should be a binding to a C function built with the below macro. */ #define VALGRIND_HG_GNAT_DEPENDENT_MASTER_JOIN(_qzz_dep, _qzz_master) \ DO_CREQ_v_WW(_VG_USERREQ__HG_GNAT_DEPENDENT_MASTER_JOIN, \ void*,(_qzz_dep), \ void*,(_qzz_master)) /*----------------------------------------------------------------*/ /*--- ---*/ /*--- ThreadSanitizer-compatible requests ---*/ /*--- (mostly unimplemented) ---*/ /*--- ---*/ /*----------------------------------------------------------------*/ /* A quite-broad set of annotations, as used in the ThreadSanitizer project. This implementation aims to be a (source-level) compatible implementation of the macros defined in: http://code.google.com/p/data-race-test/source /browse/trunk/dynamic_annotations/dynamic_annotations.h (some of the comments below are taken from the above file) The implementation here is very incomplete, and intended as a starting point. Many of the macros are unimplemented. Rather than allowing unimplemented macros to silently do nothing, they cause an assertion. Intention is to implement them on demand. The major use of these macros is to make visible to race detectors, the behaviour (effects) of user-implemented synchronisation primitives, that the detectors could not otherwise deduce from the normal observation of pthread etc calls. Some of the macros are no-ops in Helgrind. That's because Helgrind is a pure happens-before detector, whereas ThreadSanitizer uses a hybrid lockset and happens-before scheme, which requires more accurate annotations for correct operation. The macros are listed in the same order as in dynamic_annotations.h (URL just above). I should point out that I am less than clear about the intended semantics of quite a number of them. Comments and clarifications welcomed! */ /* ---------------------------------------------------------------- These four allow description of user-level condition variables, apparently in the style of POSIX's pthread_cond_t. Currently unimplemented and will assert. ---------------------------------------------------------------- */ /* Report that wait on the condition variable at address CV has succeeded and the lock at address LOCK is now held. CV and LOCK are completely arbitrary memory addresses which presumably mean something to the application, but are meaningless to Helgrind. */ #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \ _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_LOCK_WAIT") /* Report that wait on the condition variable at CV has succeeded. Variant w/o lock. */ #define ANNOTATE_CONDVAR_WAIT(cv) \ _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_WAIT") /* Report that we are about to signal on the condition variable at address CV. */ #define ANNOTATE_CONDVAR_SIGNAL(cv) \ _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL") /* Report that we are about to signal_all on the condition variable at CV. */ #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \ _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL_ALL") /* ---------------------------------------------------------------- Create completely arbitrary happens-before edges between threads. If threads T1 .. Tn all do ANNOTATE_HAPPENS_BEFORE(obj) and later (w.r.t. some notional global clock for the computation) thread Tm does ANNOTATE_HAPPENS_AFTER(obj), then Helgrind will regard all memory accesses done by T1 .. Tn before the ..BEFORE.. call as happening-before all memory accesses done by Tm after the ..AFTER.. call. Hence Helgrind won't complain about races if Tm's accesses afterwards are to the same locations as accesses before by any of T1 .. Tn. OBJ is a machine word (unsigned long, or void*), is completely arbitrary, and denotes the identity of some synchronisation object you're modelling. You must do the _BEFORE call just before the real sync event on the signaller's side, and _AFTER just after the real sync event on the waiter's side. If none of the rest of these macros make sense to you, at least take the time to understand these two. They form the very essence of describing arbitrary inter-thread synchronisation events to Helgrind. You can get a long way just with them alone. See also, extensive discussion on semantics of this in https://bugs.kde.org/show_bug.cgi?id=243935 ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) is interim until such time as bug 243935 is fully resolved. It instructs Helgrind to forget about any ANNOTATE_HAPPENS_BEFORE calls on the specified object, in effect putting it back in its original state. Once in that state, a use of ANNOTATE_HAPPENS_AFTER on it has no effect on the calling thread. An implementation may optionally release resources it has associated with 'obj' when ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) happens. Users are recommended to use ANNOTATE_HAPPENS_BEFORE_FORGET_ALL to indicate when a synchronisation object is no longer needed, so as to avoid potential indefinite resource leaks. ---------------------------------------------------------------- */ #define ANNOTATE_HAPPENS_BEFORE(obj) \ DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_SEND_PRE, void*,(obj)) #define ANNOTATE_HAPPENS_AFTER(obj) \ DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_RECV_POST, void*,(obj)) #define ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) \ DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_FORGET_ALL, void*,(obj)) /* ---------------------------------------------------------------- Memory publishing. The TSan sources say: Report that the bytes in the range [pointer, pointer+size) are about to be published safely. The race checker will create a happens-before arc from the call ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) to subsequent accesses to this memory. I'm not sure I understand what this means exactly, nor whether it is relevant for a pure h-b detector. Leaving unimplemented for now. ---------------------------------------------------------------- */ #define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \ _HG_CLIENTREQ_UNIMP("ANNOTATE_PUBLISH_MEMORY_RANGE") /* DEPRECATED. Don't use it. */ /* #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) */ /* DEPRECATED. Don't use it. */ /* #define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size) */ /* ---------------------------------------------------------------- TSan sources say: Instruct the tool to create a happens-before arc between MU->Unlock() and MU->Lock(). This annotation may slow down the race detector; normally it is used only when it would be difficult to annotate each of the mutex's critical sections individually using the annotations above. If MU is a posix pthread_mutex_t then Helgrind will do this anyway. In any case, leave as unimp for now. I'm unsure about the intended behaviour. ---------------------------------------------------------------- */ #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \ _HG_CLIENTREQ_UNIMP("ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX") /* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */ /* #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) */ /* ---------------------------------------------------------------- TSan sources say: Annotations useful when defining memory allocators, or when memory that was protected in one way starts to be protected in another. Report that a new memory at "address" of size "size" has been allocated. This might be used when the memory has been retrieved from a free list and is about to be reused, or when a the locking discipline for a variable changes. AFAICS this is the same as VALGRIND_HG_CLEAN_MEMORY. ---------------------------------------------------------------- */ #define ANNOTATE_NEW_MEMORY(address, size) \ VALGRIND_HG_CLEAN_MEMORY((address), (size)) /* ---------------------------------------------------------------- TSan sources say: Annotations useful when defining FIFO queues that transfer data between threads. All unimplemented. Am not claiming to understand this (yet). ---------------------------------------------------------------- */ /* Report that the producer-consumer queue object at address PCQ has been created. The ANNOTATE_PCQ_* annotations should be used only for FIFO queues. For non-FIFO queues use ANNOTATE_HAPPENS_BEFORE (for put) and ANNOTATE_HAPPENS_AFTER (for get). */ #define ANNOTATE_PCQ_CREATE(pcq) \ _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_CREATE") /* Report that the queue at address PCQ is about to be destroyed. */ #define ANNOTATE_PCQ_DESTROY(pcq) \ _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_DESTROY") /* Report that we are about to put an element into a FIFO queue at address PCQ. */ #define ANNOTATE_PCQ_PUT(pcq) \ _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_PUT") /* Report that we've just got an element from a FIFO queue at address PCQ. */ #define ANNOTATE_PCQ_GET(pcq) \ _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_GET") /* ---------------------------------------------------------------- Annotations that suppress errors. It is usually better to express the program's synchronization using the other annotations, but these can be used when all else fails. Currently these are all unimplemented. I can't think of a simple way to implement them without at least some performance overhead. ---------------------------------------------------------------- */ /* Report that we may have a benign race at "pointer", with size "sizeof(*(pointer))". "pointer" must be a non-void* pointer. Insert at the point where "pointer" has been allocated, preferably close to the point where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. XXX: what's this actually supposed to do? And what's the type of DESCRIPTION? When does the annotation stop having an effect? */ #define ANNOTATE_BENIGN_RACE(pointer, description) \ _HG_CLIENTREQ_UNIMP("ANNOTATE_BENIGN_RACE") /* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to the memory range [address, address+size). */ #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ VALGRIND_HG_DISABLE_CHECKING(address, size) /* Request the analysis tool to ignore all reads in the current thread until ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey reads, while still checking other reads and all writes. */ #define ANNOTATE_IGNORE_READS_BEGIN() \ _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_BEGIN") /* Stop ignoring reads. */ #define ANNOTATE_IGNORE_READS_END() \ _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_END") /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */ #define ANNOTATE_IGNORE_WRITES_BEGIN() \ _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_BEGIN") /* Stop ignoring writes. */ #define ANNOTATE_IGNORE_WRITES_END() \ _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_END") /* Start ignoring all memory accesses (reads and writes). */ #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ do { \ ANNOTATE_IGNORE_READS_BEGIN(); \ ANNOTATE_IGNORE_WRITES_BEGIN(); \ } while (0) /* Stop ignoring all memory accesses. */ #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \ do { \ ANNOTATE_IGNORE_WRITES_END(); \ ANNOTATE_IGNORE_READS_END(); \ } while (0) /* ---------------------------------------------------------------- Annotations useful for debugging. Again, so for unimplemented, partly for performance reasons. ---------------------------------------------------------------- */ /* Request to trace every access to ADDRESS. */ #define ANNOTATE_TRACE_MEMORY(address) \ _HG_CLIENTREQ_UNIMP("ANNOTATE_TRACE_MEMORY") /* Report the current thread name to a race detector. */ #define ANNOTATE_THREAD_NAME(name) \ _HG_CLIENTREQ_UNIMP("ANNOTATE_THREAD_NAME") /* ---------------------------------------------------------------- Annotations for describing behaviour of user-implemented lock primitives. In all cases, the LOCK argument is a completely arbitrary machine word (unsigned long, or void*) and can be any value which gives a unique identity to the lock objects being modelled. We just pretend they're ordinary posix rwlocks. That'll probably give some rather confusing wording in error messages, claiming that the arbitrary LOCK values are pthread_rwlock_t*'s, when in fact they are not. Ah well. ---------------------------------------------------------------- */ /* Report that a lock has just been created at address LOCK. */ #define ANNOTATE_RWLOCK_CREATE(lock) \ DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST, \ void*,(lock)) /* Report that the lock at address LOCK is about to be destroyed. */ #define ANNOTATE_RWLOCK_DESTROY(lock) \ DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, \ void*,(lock)) /* Report that the lock at address LOCK has just been acquired. is_w=1 for writer lock, is_w=0 for reader lock. */ #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_RWLOCK_ACQUIRED, \ void*,(lock), unsigned long,(is_w)) /* Report that the lock at address LOCK is about to be released. */ #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_RELEASED, \ void*,(lock)) /* is_w is ignored */ /* ------------------------------------------------------------- Annotations useful when implementing barriers. They are not normally needed by modules that merely use barriers. The "barrier" argument is a pointer to the barrier object. ---------------------------------------------------------------- */ /* Report that the "barrier" has been initialized with initial "count". If 'reinitialization_allowed' is true, initialization is allowed to happen multiple times w/o calling barrier_destroy() */ #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \ _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_INIT") /* Report that we are about to enter barrier_wait("barrier"). */ #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \ _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY") /* Report that we just exited barrier_wait("barrier"). */ #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \ _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY") /* Report that the "barrier" has been destroyed. */ #define ANNOTATE_BARRIER_DESTROY(barrier) \ _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY") /* ---------------------------------------------------------------- Annotations useful for testing race detectors. ---------------------------------------------------------------- */ /* Report that we expect a race on the variable at ADDRESS. Use only in unit tests for a race detector. */ #define ANNOTATE_EXPECT_RACE(address, description) \ _HG_CLIENTREQ_UNIMP("ANNOTATE_EXPECT_RACE") /* A no-op. Insert where you like to test the interceptors. */ #define ANNOTATE_NO_OP(arg) \ _HG_CLIENTREQ_UNIMP("ANNOTATE_NO_OP") /* Force the race detector to flush its state. The actual effect depends on * the implementation of the detector. */ #define ANNOTATE_FLUSH_STATE() \ _HG_CLIENTREQ_UNIMP("ANNOTATE_FLUSH_STATE") #endif /* __HELGRIND_H */
39,544
45.965558
80
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/core/valgrind/valgrind.h
/* -*- c -*- ---------------------------------------------------------------- Notice that the following BSD-style license applies to this one file (valgrind.h) only. The rest of Valgrind is licensed under the terms of the GNU General Public License, version 2, unless otherwise indicated. See the COPYING file in the source distribution for details. ---------------------------------------------------------------- This file is part of Valgrind, a dynamic binary instrumentation framework. Copyright (C) 2000-2017 Julian Seward. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 3. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 4. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------- Notice that the above BSD-style license applies to this one file (valgrind.h) only. The entire rest of Valgrind is licensed under the terms of the GNU General Public License, version 2. See the COPYING file in the source distribution for details. ---------------------------------------------------------------- */ /* This file is for inclusion into client (your!) code. You can use these macros to manipulate and query Valgrind's execution inside your own programs. The resulting executables will still run without Valgrind, just a little bit more slowly than they otherwise would, but otherwise unchanged. When not running on valgrind, each client request consumes very few (eg. 7) instructions, so the resulting performance loss is negligible unless you plan to execute client requests millions of times per second. Nevertheless, if that is still a problem, you can compile with the NVALGRIND symbol defined (gcc -DNVALGRIND) so that client requests are not even compiled in. */ #ifndef __VALGRIND_H #define __VALGRIND_H /* ------------------------------------------------------------------ */ /* VERSION NUMBER OF VALGRIND */ /* ------------------------------------------------------------------ */ /* Specify Valgrind's version number, so that user code can conditionally compile based on our version number. Note that these were introduced at version 3.6 and so do not exist in version 3.5 or earlier. The recommended way to use them to check for "version X.Y or later" is (eg) #if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__) \ && (__VALGRIND_MAJOR__ > 3 \ || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6)) */ #define __VALGRIND_MAJOR__ 3 #define __VALGRIND_MINOR__ 14 #include <stdarg.h> /* Nb: this file might be included in a file compiled with -ansi. So we can't use C++ style "//" comments nor the "asm" keyword (instead use "__asm__"). */ /* Derive some tags indicating what the target platform is. Note that in this file we're using the compiler's CPP symbols for identifying architectures, which are different to the ones we use within the rest of Valgrind. Note, __powerpc__ is active for both 32 and 64-bit PPC, whereas __powerpc64__ is only active for the latter (on Linux, that is). Misc note: how to find out what's predefined in gcc by default: gcc -Wp,-dM somefile.c */ #undef PLAT_x86_darwin #undef PLAT_amd64_darwin #undef PLAT_x86_win32 #undef PLAT_amd64_win64 #undef PLAT_x86_linux #undef PLAT_amd64_linux #undef PLAT_ppc32_linux #undef PLAT_ppc64be_linux #undef PLAT_ppc64le_linux #undef PLAT_arm_linux #undef PLAT_arm64_linux #undef PLAT_s390x_linux #undef PLAT_mips32_linux #undef PLAT_mips64_linux #undef PLAT_x86_solaris #undef PLAT_amd64_solaris #if defined(__APPLE__) && defined(__i386__) # define PLAT_x86_darwin 1 #elif defined(__APPLE__) && defined(__x86_64__) # define PLAT_amd64_darwin 1 #elif (defined(__MINGW32__) && !defined(__MINGW64__)) \ || defined(__CYGWIN32__) \ || (defined(_WIN32) && defined(_M_IX86)) # define PLAT_x86_win32 1 #elif defined(__MINGW64__) \ || (defined(_WIN64) && defined(_M_X64)) # define PLAT_amd64_win64 1 #elif defined(__linux__) && defined(__i386__) # define PLAT_x86_linux 1 #elif defined(__linux__) && defined(__x86_64__) && !defined(__ILP32__) # define PLAT_amd64_linux 1 #elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__) # define PLAT_ppc32_linux 1 #elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF != 2 /* Big Endian uses ELF version 1 */ # define PLAT_ppc64be_linux 1 #elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF == 2 /* Little Endian uses ELF version 2 */ # define PLAT_ppc64le_linux 1 #elif defined(__linux__) && defined(__arm__) && !defined(__aarch64__) # define PLAT_arm_linux 1 #elif defined(__linux__) && defined(__aarch64__) && !defined(__arm__) # define PLAT_arm64_linux 1 #elif defined(__linux__) && defined(__s390__) && defined(__s390x__) # define PLAT_s390x_linux 1 #elif defined(__linux__) && defined(__mips__) && (__mips==64) # define PLAT_mips64_linux 1 #elif defined(__linux__) && defined(__mips__) && (__mips!=64) # define PLAT_mips32_linux 1 #elif defined(__sun) && defined(__i386__) # define PLAT_x86_solaris 1 #elif defined(__sun) && defined(__x86_64__) # define PLAT_amd64_solaris 1 #else /* If we're not compiling for our target platform, don't generate any inline asms. */ # if !defined(NVALGRIND) # define NVALGRIND 1 # endif #endif /* ------------------------------------------------------------------ */ /* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */ /* in here of use to end-users -- skip to the next section. */ /* ------------------------------------------------------------------ */ /* * VALGRIND_DO_CLIENT_REQUEST(): a statement that invokes a Valgrind client * request. Accepts both pointers and integers as arguments. * * VALGRIND_DO_CLIENT_REQUEST_STMT(): a statement that invokes a Valgrind * client request that does not return a value. * VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind * client request and whose value equals the client request result. Accepts * both pointers and integers as arguments. Note that such calls are not * necessarily pure functions -- they may have side effects. */ #define VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, \ _zzq_request, _zzq_arg1, _zzq_arg2, \ _zzq_arg3, _zzq_arg4, _zzq_arg5) \ do { (_zzq_rlval) = VALGRIND_DO_CLIENT_REQUEST_EXPR((_zzq_default), \ (_zzq_request), (_zzq_arg1), (_zzq_arg2), \ (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0) #define VALGRIND_DO_CLIENT_REQUEST_STMT(_zzq_request, _zzq_arg1, \ _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ do { (void) VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ (_zzq_request), (_zzq_arg1), (_zzq_arg2), \ (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0) #if defined(NVALGRIND) /* Define NVALGRIND to completely remove the Valgrind magic sequence from the compiled code (analogous to NDEBUG's effects on assert()) */ #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ (_zzq_default) #else /* ! NVALGRIND */ /* The following defines the magic code sequences which the JITter spots and handles magically. Don't look too closely at them as they will rot your brain. The assembly code sequences for all architectures is in this one file. This is because this file must be stand-alone, and we don't want to have multiple files. For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default value gets put in the return slot, so that everything works when this is executed not under Valgrind. Args are passed in a memory block, and so there's no intrinsic limit to the number that could be passed, but it's currently five. The macro args are: _zzq_rlval result lvalue _zzq_default default value (result returned when running on real CPU) _zzq_request request code _zzq_arg1..5 request params The other two macros are used to support function wrapping, and are a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the guest's NRADDR pseudo-register and whatever other information is needed to safely run the call original from the wrapper: on ppc64-linux, the R2 value at the divert point is also needed. This information is abstracted into a user-visible type, OrigFn. VALGRIND_CALL_NOREDIR_* behaves the same as the following on the guest, but guarantees that the branch instruction will not be redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64: branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a complete inline asm, since it needs to be combined with more magic inline asm stuff to be useful. */ /* ----------------- x86-{linux,darwin,solaris} ---------------- */ #if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \ || (defined(PLAT_x86_win32) && defined(__GNUC__)) \ || defined(PLAT_x86_solaris) typedef struct { unsigned int nraddr; /* where's the code? */ } OrigFn; #define __SPECIAL_INSTRUCTION_PREAMBLE \ "roll $3, %%edi ; roll $13, %%edi\n\t" \ "roll $29, %%edi ; roll $19, %%edi\n\t" #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ __extension__ \ ({volatile unsigned int _zzq_args[6]; \ volatile unsigned int _zzq_result; \ _zzq_args[0] = (unsigned int)(_zzq_request); \ _zzq_args[1] = (unsigned int)(_zzq_arg1); \ _zzq_args[2] = (unsigned int)(_zzq_arg2); \ _zzq_args[3] = (unsigned int)(_zzq_arg3); \ _zzq_args[4] = (unsigned int)(_zzq_arg4); \ _zzq_args[5] = (unsigned int)(_zzq_arg5); \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* %EDX = client_request ( %EAX ) */ \ "xchgl %%ebx,%%ebx" \ : "=d" (_zzq_result) \ : "a" (&_zzq_args[0]), "0" (_zzq_default) \ : "cc", "memory" \ ); \ _zzq_result; \ }) #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ volatile unsigned int __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* %EAX = guest_NRADDR */ \ "xchgl %%ecx,%%ecx" \ : "=a" (__addr) \ : \ : "cc", "memory" \ ); \ _zzq_orig->nraddr = __addr; \ } #define VALGRIND_CALL_NOREDIR_EAX \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* call-noredir *%EAX */ \ "xchgl %%edx,%%edx\n\t" #define VALGRIND_VEX_INJECT_IR() \ do { \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ "xchgl %%edi,%%edi\n\t" \ : : : "cc", "memory" \ ); \ } while (0) #endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__) || PLAT_x86_solaris */ /* ------------------------- x86-Win32 ------------------------- */ #if defined(PLAT_x86_win32) && !defined(__GNUC__) typedef struct { unsigned int nraddr; /* where's the code? */ } OrigFn; #if defined(_MSC_VER) #define __SPECIAL_INSTRUCTION_PREAMBLE \ __asm rol edi, 3 __asm rol edi, 13 \ __asm rol edi, 29 __asm rol edi, 19 #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ valgrind_do_client_request_expr((uintptr_t)(_zzq_default), \ (uintptr_t)(_zzq_request), (uintptr_t)(_zzq_arg1), \ (uintptr_t)(_zzq_arg2), (uintptr_t)(_zzq_arg3), \ (uintptr_t)(_zzq_arg4), (uintptr_t)(_zzq_arg5)) static __inline uintptr_t valgrind_do_client_request_expr(uintptr_t _zzq_default, uintptr_t _zzq_request, uintptr_t _zzq_arg1, uintptr_t _zzq_arg2, uintptr_t _zzq_arg3, uintptr_t _zzq_arg4, uintptr_t _zzq_arg5) { volatile uintptr_t _zzq_args[6]; volatile unsigned int _zzq_result; _zzq_args[0] = (uintptr_t)(_zzq_request); _zzq_args[1] = (uintptr_t)(_zzq_arg1); _zzq_args[2] = (uintptr_t)(_zzq_arg2); _zzq_args[3] = (uintptr_t)(_zzq_arg3); _zzq_args[4] = (uintptr_t)(_zzq_arg4); _zzq_args[5] = (uintptr_t)(_zzq_arg5); __asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default __SPECIAL_INSTRUCTION_PREAMBLE /* %EDX = client_request ( %EAX ) */ __asm xchg ebx,ebx __asm mov _zzq_result, edx } return _zzq_result; } #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ volatile unsigned int __addr; \ __asm { __SPECIAL_INSTRUCTION_PREAMBLE \ /* %EAX = guest_NRADDR */ \ __asm xchg ecx,ecx \ __asm mov __addr, eax \ } \ _zzq_orig->nraddr = __addr; \ } #define VALGRIND_CALL_NOREDIR_EAX ERROR #define VALGRIND_VEX_INJECT_IR() \ do { \ __asm { __SPECIAL_INSTRUCTION_PREAMBLE \ __asm xchg edi,edi \ } \ } while (0) #else #error Unsupported compiler. #endif #endif /* PLAT_x86_win32 */ /* ----------------- amd64-{linux,darwin,solaris} --------------- */ #if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \ || defined(PLAT_amd64_solaris) \ || (defined(PLAT_amd64_win64) && defined(__GNUC__)) typedef struct { unsigned long int nraddr; /* where's the code? */ } OrigFn; #define __SPECIAL_INSTRUCTION_PREAMBLE \ "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \ "rolq $61, %%rdi ; rolq $51, %%rdi\n\t" #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ __extension__ \ ({ volatile unsigned long int _zzq_args[6]; \ volatile unsigned long int _zzq_result; \ _zzq_args[0] = (unsigned long int)(_zzq_request); \ _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* %RDX = client_request ( %RAX ) */ \ "xchgq %%rbx,%%rbx" \ : "=d" (_zzq_result) \ : "a" (&_zzq_args[0]), "0" (_zzq_default) \ : "cc", "memory" \ ); \ _zzq_result; \ }) #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ volatile unsigned long int __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* %RAX = guest_NRADDR */ \ "xchgq %%rcx,%%rcx" \ : "=a" (__addr) \ : \ : "cc", "memory" \ ); \ _zzq_orig->nraddr = __addr; \ } #define VALGRIND_CALL_NOREDIR_RAX \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* call-noredir *%RAX */ \ "xchgq %%rdx,%%rdx\n\t" #define VALGRIND_VEX_INJECT_IR() \ do { \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ "xchgq %%rdi,%%rdi\n\t" \ : : : "cc", "memory" \ ); \ } while (0) #endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */ /* ------------------------- amd64-Win64 ------------------------- */ #if defined(PLAT_amd64_win64) && !defined(__GNUC__) #error Unsupported compiler. #endif /* PLAT_amd64_win64 */ /* ------------------------ ppc32-linux ------------------------ */ #if defined(PLAT_ppc32_linux) typedef struct { unsigned int nraddr; /* where's the code? */ } OrigFn; #define __SPECIAL_INSTRUCTION_PREAMBLE \ "rlwinm 0,0,3,0,31 ; rlwinm 0,0,13,0,31\n\t" \ "rlwinm 0,0,29,0,31 ; rlwinm 0,0,19,0,31\n\t" #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ \ __extension__ \ ({ unsigned int _zzq_args[6]; \ unsigned int _zzq_result; \ unsigned int* _zzq_ptr; \ _zzq_args[0] = (unsigned int)(_zzq_request); \ _zzq_args[1] = (unsigned int)(_zzq_arg1); \ _zzq_args[2] = (unsigned int)(_zzq_arg2); \ _zzq_args[3] = (unsigned int)(_zzq_arg3); \ _zzq_args[4] = (unsigned int)(_zzq_arg4); \ _zzq_args[5] = (unsigned int)(_zzq_arg5); \ _zzq_ptr = _zzq_args; \ __asm__ volatile("mr 3,%1\n\t" /*default*/ \ "mr 4,%2\n\t" /*ptr*/ \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* %R3 = client_request ( %R4 ) */ \ "or 1,1,1\n\t" \ "mr %0,3" /*result*/ \ : "=b" (_zzq_result) \ : "b" (_zzq_default), "b" (_zzq_ptr) \ : "cc", "memory", "r3", "r4"); \ _zzq_result; \ }) #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ unsigned int __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* %R3 = guest_NRADDR */ \ "or 2,2,2\n\t" \ "mr %0,3" \ : "=b" (__addr) \ : \ : "cc", "memory", "r3" \ ); \ _zzq_orig->nraddr = __addr; \ } #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* branch-and-link-to-noredir *%R11 */ \ "or 3,3,3\n\t" #define VALGRIND_VEX_INJECT_IR() \ do { \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ "or 5,5,5\n\t" \ ); \ } while (0) #endif /* PLAT_ppc32_linux */ /* ------------------------ ppc64-linux ------------------------ */ #if defined(PLAT_ppc64be_linux) typedef struct { unsigned long int nraddr; /* where's the code? */ unsigned long int r2; /* what tocptr do we need? */ } OrigFn; #define __SPECIAL_INSTRUCTION_PREAMBLE \ "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \ "rotldi 0,0,61 ; rotldi 0,0,51\n\t" #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ \ __extension__ \ ({ unsigned long int _zzq_args[6]; \ unsigned long int _zzq_result; \ unsigned long int* _zzq_ptr; \ _zzq_args[0] = (unsigned long int)(_zzq_request); \ _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ _zzq_ptr = _zzq_args; \ __asm__ volatile("mr 3,%1\n\t" /*default*/ \ "mr 4,%2\n\t" /*ptr*/ \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* %R3 = client_request ( %R4 ) */ \ "or 1,1,1\n\t" \ "mr %0,3" /*result*/ \ : "=b" (_zzq_result) \ : "b" (_zzq_default), "b" (_zzq_ptr) \ : "cc", "memory", "r3", "r4"); \ _zzq_result; \ }) #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ unsigned long int __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* %R3 = guest_NRADDR */ \ "or 2,2,2\n\t" \ "mr %0,3" \ : "=b" (__addr) \ : \ : "cc", "memory", "r3" \ ); \ _zzq_orig->nraddr = __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* %R3 = guest_NRADDR_GPR2 */ \ "or 4,4,4\n\t" \ "mr %0,3" \ : "=b" (__addr) \ : \ : "cc", "memory", "r3" \ ); \ _zzq_orig->r2 = __addr; \ } #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* branch-and-link-to-noredir *%R11 */ \ "or 3,3,3\n\t" #define VALGRIND_VEX_INJECT_IR() \ do { \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ "or 5,5,5\n\t" \ ); \ } while (0) #endif /* PLAT_ppc64be_linux */ #if defined(PLAT_ppc64le_linux) typedef struct { unsigned long int nraddr; /* where's the code? */ unsigned long int r2; /* what tocptr do we need? */ } OrigFn; #define __SPECIAL_INSTRUCTION_PREAMBLE \ "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \ "rotldi 0,0,61 ; rotldi 0,0,51\n\t" #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ \ __extension__ \ ({ unsigned long int _zzq_args[6]; \ unsigned long int _zzq_result; \ unsigned long int* _zzq_ptr; \ _zzq_args[0] = (unsigned long int)(_zzq_request); \ _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ _zzq_ptr = _zzq_args; \ __asm__ volatile("mr 3,%1\n\t" /*default*/ \ "mr 4,%2\n\t" /*ptr*/ \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* %R3 = client_request ( %R4 ) */ \ "or 1,1,1\n\t" \ "mr %0,3" /*result*/ \ : "=b" (_zzq_result) \ : "b" (_zzq_default), "b" (_zzq_ptr) \ : "cc", "memory", "r3", "r4"); \ _zzq_result; \ }) #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ unsigned long int __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* %R3 = guest_NRADDR */ \ "or 2,2,2\n\t" \ "mr %0,3" \ : "=b" (__addr) \ : \ : "cc", "memory", "r3" \ ); \ _zzq_orig->nraddr = __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* %R3 = guest_NRADDR_GPR2 */ \ "or 4,4,4\n\t" \ "mr %0,3" \ : "=b" (__addr) \ : \ : "cc", "memory", "r3" \ ); \ _zzq_orig->r2 = __addr; \ } #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* branch-and-link-to-noredir *%R12 */ \ "or 3,3,3\n\t" #define VALGRIND_VEX_INJECT_IR() \ do { \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ "or 5,5,5\n\t" \ ); \ } while (0) #endif /* PLAT_ppc64le_linux */ /* ------------------------- arm-linux ------------------------- */ #if defined(PLAT_arm_linux) typedef struct { unsigned int nraddr; /* where's the code? */ } OrigFn; #define __SPECIAL_INSTRUCTION_PREAMBLE \ "mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \ "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t" #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ \ __extension__ \ ({volatile unsigned int _zzq_args[6]; \ volatile unsigned int _zzq_result; \ _zzq_args[0] = (unsigned int)(_zzq_request); \ _zzq_args[1] = (unsigned int)(_zzq_arg1); \ _zzq_args[2] = (unsigned int)(_zzq_arg2); \ _zzq_args[3] = (unsigned int)(_zzq_arg3); \ _zzq_args[4] = (unsigned int)(_zzq_arg4); \ _zzq_args[5] = (unsigned int)(_zzq_arg5); \ __asm__ volatile("mov r3, %1\n\t" /*default*/ \ "mov r4, %2\n\t" /*ptr*/ \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* R3 = client_request ( R4 ) */ \ "orr r10, r10, r10\n\t" \ "mov %0, r3" /*result*/ \ : "=r" (_zzq_result) \ : "r" (_zzq_default), "r" (&_zzq_args[0]) \ : "cc","memory", "r3", "r4"); \ _zzq_result; \ }) #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ unsigned int __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* R3 = guest_NRADDR */ \ "orr r11, r11, r11\n\t" \ "mov %0, r3" \ : "=r" (__addr) \ : \ : "cc", "memory", "r3" \ ); \ _zzq_orig->nraddr = __addr; \ } #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* branch-and-link-to-noredir *%R4 */ \ "orr r12, r12, r12\n\t" #define VALGRIND_VEX_INJECT_IR() \ do { \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ "orr r9, r9, r9\n\t" \ : : : "cc", "memory" \ ); \ } while (0) #endif /* PLAT_arm_linux */ /* ------------------------ arm64-linux ------------------------- */ #if defined(PLAT_arm64_linux) typedef struct { unsigned long int nraddr; /* where's the code? */ } OrigFn; #define __SPECIAL_INSTRUCTION_PREAMBLE \ "ror x12, x12, #3 ; ror x12, x12, #13 \n\t" \ "ror x12, x12, #51 ; ror x12, x12, #61 \n\t" #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ \ __extension__ \ ({volatile unsigned long int _zzq_args[6]; \ volatile unsigned long int _zzq_result; \ _zzq_args[0] = (unsigned long int)(_zzq_request); \ _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ __asm__ volatile("mov x3, %1\n\t" /*default*/ \ "mov x4, %2\n\t" /*ptr*/ \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* X3 = client_request ( X4 ) */ \ "orr x10, x10, x10\n\t" \ "mov %0, x3" /*result*/ \ : "=r" (_zzq_result) \ : "r" ((unsigned long int)(_zzq_default)), \ "r" (&_zzq_args[0]) \ : "cc","memory", "x3", "x4"); \ _zzq_result; \ }) #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ unsigned long int __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* X3 = guest_NRADDR */ \ "orr x11, x11, x11\n\t" \ "mov %0, x3" \ : "=r" (__addr) \ : \ : "cc", "memory", "x3" \ ); \ _zzq_orig->nraddr = __addr; \ } #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* branch-and-link-to-noredir X8 */ \ "orr x12, x12, x12\n\t" #define VALGRIND_VEX_INJECT_IR() \ do { \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ "orr x9, x9, x9\n\t" \ : : : "cc", "memory" \ ); \ } while (0) #endif /* PLAT_arm64_linux */ /* ------------------------ s390x-linux ------------------------ */ #if defined(PLAT_s390x_linux) typedef struct { unsigned long int nraddr; /* where's the code? */ } OrigFn; /* __SPECIAL_INSTRUCTION_PREAMBLE will be used to identify Valgrind specific * code. This detection is implemented in platform specific toIR.c * (e.g. VEX/priv/guest_s390_decoder.c). */ #define __SPECIAL_INSTRUCTION_PREAMBLE \ "lr 15,15\n\t" \ "lr 1,1\n\t" \ "lr 2,2\n\t" \ "lr 3,3\n\t" #define __CLIENT_REQUEST_CODE "lr 2,2\n\t" #define __GET_NR_CONTEXT_CODE "lr 3,3\n\t" #define __CALL_NO_REDIR_CODE "lr 4,4\n\t" #define __VEX_INJECT_IR_CODE "lr 5,5\n\t" #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ __extension__ \ ({volatile unsigned long int _zzq_args[6]; \ volatile unsigned long int _zzq_result; \ _zzq_args[0] = (unsigned long int)(_zzq_request); \ _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ __asm__ volatile(/* r2 = args */ \ "lgr 2,%1\n\t" \ /* r3 = default */ \ "lgr 3,%2\n\t" \ __SPECIAL_INSTRUCTION_PREAMBLE \ __CLIENT_REQUEST_CODE \ /* results = r3 */ \ "lgr %0, 3\n\t" \ : "=d" (_zzq_result) \ : "a" (&_zzq_args[0]), "0" (_zzq_default) \ : "cc", "2", "3", "memory" \ ); \ _zzq_result; \ }) #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ volatile unsigned long int __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ __GET_NR_CONTEXT_CODE \ "lgr %0, 3\n\t" \ : "=a" (__addr) \ : \ : "cc", "3", "memory" \ ); \ _zzq_orig->nraddr = __addr; \ } #define VALGRIND_CALL_NOREDIR_R1 \ __SPECIAL_INSTRUCTION_PREAMBLE \ __CALL_NO_REDIR_CODE #define VALGRIND_VEX_INJECT_IR() \ do { \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ __VEX_INJECT_IR_CODE); \ } while (0) #endif /* PLAT_s390x_linux */ /* ------------------------- mips32-linux ---------------- */ #if defined(PLAT_mips32_linux) typedef struct { unsigned int nraddr; /* where's the code? */ } OrigFn; /* .word 0x342 * .word 0x742 * .word 0xC2 * .word 0x4C2*/ #define __SPECIAL_INSTRUCTION_PREAMBLE \ "srl $0, $0, 13\n\t" \ "srl $0, $0, 29\n\t" \ "srl $0, $0, 3\n\t" \ "srl $0, $0, 19\n\t" #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ __extension__ \ ({ volatile unsigned int _zzq_args[6]; \ volatile unsigned int _zzq_result; \ _zzq_args[0] = (unsigned int)(_zzq_request); \ _zzq_args[1] = (unsigned int)(_zzq_arg1); \ _zzq_args[2] = (unsigned int)(_zzq_arg2); \ _zzq_args[3] = (unsigned int)(_zzq_arg3); \ _zzq_args[4] = (unsigned int)(_zzq_arg4); \ _zzq_args[5] = (unsigned int)(_zzq_arg5); \ __asm__ volatile("move $11, %1\n\t" /*default*/ \ "move $12, %2\n\t" /*ptr*/ \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* T3 = client_request ( T4 ) */ \ "or $13, $13, $13\n\t" \ "move %0, $11\n\t" /*result*/ \ : "=r" (_zzq_result) \ : "r" (_zzq_default), "r" (&_zzq_args[0]) \ : "$11", "$12", "memory"); \ _zzq_result; \ }) #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ volatile unsigned int __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* %t9 = guest_NRADDR */ \ "or $14, $14, $14\n\t" \ "move %0, $11" /*result*/ \ : "=r" (__addr) \ : \ : "$11" \ ); \ _zzq_orig->nraddr = __addr; \ } #define VALGRIND_CALL_NOREDIR_T9 \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* call-noredir *%t9 */ \ "or $15, $15, $15\n\t" #define VALGRIND_VEX_INJECT_IR() \ do { \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ "or $11, $11, $11\n\t" \ ); \ } while (0) #endif /* PLAT_mips32_linux */ /* ------------------------- mips64-linux ---------------- */ #if defined(PLAT_mips64_linux) typedef struct { unsigned long nraddr; /* where's the code? */ } OrigFn; /* dsll $0,$0, 3 * dsll $0,$0, 13 * dsll $0,$0, 29 * dsll $0,$0, 19*/ #define __SPECIAL_INSTRUCTION_PREAMBLE \ "dsll $0,$0, 3 ; dsll $0,$0,13\n\t" \ "dsll $0,$0,29 ; dsll $0,$0,19\n\t" #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ __extension__ \ ({ volatile unsigned long int _zzq_args[6]; \ volatile unsigned long int _zzq_result; \ _zzq_args[0] = (unsigned long int)(_zzq_request); \ _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ __asm__ volatile("move $11, %1\n\t" /*default*/ \ "move $12, %2\n\t" /*ptr*/ \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* $11 = client_request ( $12 ) */ \ "or $13, $13, $13\n\t" \ "move %0, $11\n\t" /*result*/ \ : "=r" (_zzq_result) \ : "r" (_zzq_default), "r" (&_zzq_args[0]) \ : "$11", "$12", "memory"); \ _zzq_result; \ }) #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ volatile unsigned long int __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* $11 = guest_NRADDR */ \ "or $14, $14, $14\n\t" \ "move %0, $11" /*result*/ \ : "=r" (__addr) \ : \ : "$11"); \ _zzq_orig->nraddr = __addr; \ } #define VALGRIND_CALL_NOREDIR_T9 \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* call-noredir $25 */ \ "or $15, $15, $15\n\t" #define VALGRIND_VEX_INJECT_IR() \ do { \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ "or $11, $11, $11\n\t" \ ); \ } while (0) #endif /* PLAT_mips64_linux */ /* Insert assembly code for other platforms here... */ #endif /* NVALGRIND */ /* ------------------------------------------------------------------ */ /* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */ /* ugly. It's the least-worst tradeoff I can think of. */ /* ------------------------------------------------------------------ */ /* This section defines magic (a.k.a appalling-hack) macros for doing guaranteed-no-redirection macros, so as to get from function wrappers to the functions they are wrapping. The whole point is to construct standard call sequences, but to do the call itself with a special no-redirect call pseudo-instruction that the JIT understands and handles specially. This section is long and repetitious, and I can't see a way to make it shorter. The naming scheme is as follows: CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc} 'W' stands for "word" and 'v' for "void". Hence there are different macros for calling arity 0, 1, 2, 3, 4, etc, functions, and for each, the possibility of returning a word-typed result, or no result. */ /* Use these to write the name of your wrapper. NOTE: duplicates VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. NOTE also: inserts the default behaviour equivalance class tag "0000" into the name. See pub_tool_redir.h for details -- normally you don't need to think about this, though. */ /* Use an extra level of macroisation so as to ensure the soname/fnname args are fully macro-expanded before pasting them together. */ #define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd #define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \ VG_CONCAT4(_vgw00000ZU_,soname,_,fnname) #define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \ VG_CONCAT4(_vgw00000ZZ_,soname,_,fnname) /* Use this macro from within a wrapper function to collect the context (address and possibly other info) of the original function. Once you have that you can then use it in one of the CALL_FN_ macros. The type of the argument _lval is OrigFn. */ #define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval) /* Also provide end-user facilities for function replacement, rather than wrapping. A replacement function differs from a wrapper in that it has no way to get hold of the original function being called, and hence no way to call onwards to it. In a replacement function, VALGRIND_GET_ORIG_FN always returns zero. */ #define I_REPLACE_SONAME_FNNAME_ZU(soname,fnname) \ VG_CONCAT4(_vgr00000ZU_,soname,_,fnname) #define I_REPLACE_SONAME_FNNAME_ZZ(soname,fnname) \ VG_CONCAT4(_vgr00000ZZ_,soname,_,fnname) /* Derivatives of the main macros below, for calling functions returning void. */ #define CALL_FN_v_v(fnptr) \ do { volatile unsigned long _junk; \ CALL_FN_W_v(_junk,fnptr); } while (0) #define CALL_FN_v_W(fnptr, arg1) \ do { volatile unsigned long _junk; \ CALL_FN_W_W(_junk,fnptr,arg1); } while (0) #define CALL_FN_v_WW(fnptr, arg1,arg2) \ do { volatile unsigned long _junk; \ CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0) #define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3) \ do { volatile unsigned long _junk; \ CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0) #define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4) \ do { volatile unsigned long _junk; \ CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0) #define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5) \ do { volatile unsigned long _junk; \ CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0) #define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \ do { volatile unsigned long _junk; \ CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0) #define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7) \ do { volatile unsigned long _junk; \ CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0) /* ----------------- x86-{linux,darwin,solaris} ---------------- */ #if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \ || defined(PLAT_x86_solaris) /* These regs are trashed by the hidden call. No need to mention eax as gcc can already see that, plus causes gcc to bomb. */ #define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx" /* Macros to save and align the stack before making a function call and restore it afterwards as gcc may not keep the stack pointer aligned if it doesn't realise calls are being made to other functions. */ #define VALGRIND_ALIGN_STACK \ "movl %%esp,%%edi\n\t" \ "andl $0xfffffff0,%%esp\n\t" #define VALGRIND_RESTORE_STACK \ "movl %%edi,%%esp\n\t" /* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned long) == 4. */ #define CALL_FN_W_v(lval, orig) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[1]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_W(lval, orig, arg1) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[2]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "subl $12, %%esp\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "subl $8, %%esp\n\t" \ "pushl 8(%%eax)\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[4]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "subl $4, %%esp\n\t" \ "pushl 12(%%eax)\n\t" \ "pushl 8(%%eax)\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[5]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "pushl 16(%%eax)\n\t" \ "pushl 12(%%eax)\n\t" \ "pushl 8(%%eax)\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[6]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "subl $12, %%esp\n\t" \ "pushl 20(%%eax)\n\t" \ "pushl 16(%%eax)\n\t" \ "pushl 12(%%eax)\n\t" \ "pushl 8(%%eax)\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[7]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "subl $8, %%esp\n\t" \ "pushl 24(%%eax)\n\t" \ "pushl 20(%%eax)\n\t" \ "pushl 16(%%eax)\n\t" \ "pushl 12(%%eax)\n\t" \ "pushl 8(%%eax)\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[8]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "subl $4, %%esp\n\t" \ "pushl 28(%%eax)\n\t" \ "pushl 24(%%eax)\n\t" \ "pushl 20(%%eax)\n\t" \ "pushl 16(%%eax)\n\t" \ "pushl 12(%%eax)\n\t" \ "pushl 8(%%eax)\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[9]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "pushl 32(%%eax)\n\t" \ "pushl 28(%%eax)\n\t" \ "pushl 24(%%eax)\n\t" \ "pushl 20(%%eax)\n\t" \ "pushl 16(%%eax)\n\t" \ "pushl 12(%%eax)\n\t" \ "pushl 8(%%eax)\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[10]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "subl $12, %%esp\n\t" \ "pushl 36(%%eax)\n\t" \ "pushl 32(%%eax)\n\t" \ "pushl 28(%%eax)\n\t" \ "pushl 24(%%eax)\n\t" \ "pushl 20(%%eax)\n\t" \ "pushl 16(%%eax)\n\t" \ "pushl 12(%%eax)\n\t" \ "pushl 8(%%eax)\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[11]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "subl $8, %%esp\n\t" \ "pushl 40(%%eax)\n\t" \ "pushl 36(%%eax)\n\t" \ "pushl 32(%%eax)\n\t" \ "pushl 28(%%eax)\n\t" \ "pushl 24(%%eax)\n\t" \ "pushl 20(%%eax)\n\t" \ "pushl 16(%%eax)\n\t" \ "pushl 12(%%eax)\n\t" \ "pushl 8(%%eax)\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ arg6,arg7,arg8,arg9,arg10, \ arg11) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[12]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "subl $4, %%esp\n\t" \ "pushl 44(%%eax)\n\t" \ "pushl 40(%%eax)\n\t" \ "pushl 36(%%eax)\n\t" \ "pushl 32(%%eax)\n\t" \ "pushl 28(%%eax)\n\t" \ "pushl 24(%%eax)\n\t" \ "pushl 20(%%eax)\n\t" \ "pushl 16(%%eax)\n\t" \ "pushl 12(%%eax)\n\t" \ "pushl 8(%%eax)\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ arg6,arg7,arg8,arg9,arg10, \ arg11,arg12) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[13]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ _argvec[12] = (unsigned long)(arg12); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "pushl 48(%%eax)\n\t" \ "pushl 44(%%eax)\n\t" \ "pushl 40(%%eax)\n\t" \ "pushl 36(%%eax)\n\t" \ "pushl 32(%%eax)\n\t" \ "pushl 28(%%eax)\n\t" \ "pushl 24(%%eax)\n\t" \ "pushl 20(%%eax)\n\t" \ "pushl 16(%%eax)\n\t" \ "pushl 12(%%eax)\n\t" \ "pushl 8(%%eax)\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #endif /* PLAT_x86_linux || PLAT_x86_darwin || PLAT_x86_solaris */ /* ---------------- amd64-{linux,darwin,solaris} --------------- */ #if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \ || defined(PLAT_amd64_solaris) /* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */ /* These regs are trashed by the hidden call. */ #define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \ "rdi", "r8", "r9", "r10", "r11" /* This is all pretty complex. It's so as to make stack unwinding work reliably. See bug 243270. The basic problem is the sub and add of 128 of %rsp in all of the following macros. If gcc believes the CFA is in %rsp, then unwinding may fail, because what's at the CFA is not what gcc "expected" when it constructs the CFIs for the places where the macros are instantiated. But we can't just add a CFI annotation to increase the CFA offset by 128, to match the sub of 128 from %rsp, because we don't know whether gcc has chosen %rsp as the CFA at that point, or whether it has chosen some other register (eg, %rbp). In the latter case, adding a CFI annotation to change the CFA offset is simply wrong. So the solution is to get hold of the CFA using __builtin_dwarf_cfa(), put it in a known register, and add a CFI annotation to say what the register is. We choose %rbp for this (perhaps perversely), because: (1) %rbp is already subject to unwinding. If a new register was chosen then the unwinder would have to unwind it in all stack traces, which is expensive, and (2) %rbp is already subject to precise exception updates in the JIT. If a new register was chosen, we'd have to have precise exceptions for it too, which reduces performance of the generated code. However .. one extra complication. We can't just whack the result of __builtin_dwarf_cfa() into %rbp and then add %rbp to the list of trashed registers at the end of the inline assembly fragments; gcc won't allow %rbp to appear in that list. Hence instead we need to stash %rbp in %r15 for the duration of the asm, and say that %r15 is trashed instead. gcc seems happy to go with that. Oh .. and this all needs to be conditionalised so that it is unchanged from before this commit, when compiled with older gccs that don't support __builtin_dwarf_cfa. Furthermore, since this header file is freestanding, it has to be independent of config.h, and so the following conditionalisation cannot depend on configure time checks. Although it's not clear from 'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)', this expression excludes Darwin. .cfi directives in Darwin assembly appear to be completely different and I haven't investigated how they work. For even more entertainment value, note we have to use the completely undocumented __builtin_dwarf_cfa(), which appears to really compute the CFA, whereas __builtin_frame_address(0) claims to but actually doesn't. See https://bugs.kde.org/show_bug.cgi?id=243270#c47 */ #if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM) # define __FRAME_POINTER \ ,"r"(__builtin_dwarf_cfa()) # define VALGRIND_CFI_PROLOGUE \ "movq %%rbp, %%r15\n\t" \ "movq %2, %%rbp\n\t" \ ".cfi_remember_state\n\t" \ ".cfi_def_cfa rbp, 0\n\t" # define VALGRIND_CFI_EPILOGUE \ "movq %%r15, %%rbp\n\t" \ ".cfi_restore_state\n\t" #else # define __FRAME_POINTER # define VALGRIND_CFI_PROLOGUE # define VALGRIND_CFI_EPILOGUE #endif /* Macros to save and align the stack before making a function call and restore it afterwards as gcc may not keep the stack pointer aligned if it doesn't realise calls are being made to other functions. */ #define VALGRIND_ALIGN_STACK \ "movq %%rsp,%%r14\n\t" \ "andq $0xfffffffffffffff0,%%rsp\n\t" #define VALGRIND_RESTORE_STACK \ "movq %%r14,%%rsp\n\t" /* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned long) == 8. */ /* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_ macros. In order not to trash the stack redzone, we need to drop %rsp by 128 before the hidden call, and restore afterwards. The nastiness is that it is only by luck that the stack still appears to be unwindable during the hidden call - since then the behaviour of any routine using this macro does not match what the CFI data says. Sigh. Why is this important? Imagine that a wrapper has a stack allocated local, and passes to the hidden call, a pointer to it. Because gcc does not know about the hidden call, it may allocate that local in the redzone. Unfortunately the hidden call may then trash it before it comes to use it. So we must step clear of the redzone, for the duration of the hidden call, to make it safe. Probably the same problem afflicts the other redzone-style ABIs too (ppc64-linux); but for those, the stack is self describing (none of this CFI nonsense) so at least messing with the stack pointer doesn't give a danger of non-unwindable stack. */ #define CALL_FN_W_v(lval, orig) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[1]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $128,%%rsp\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_W(lval, orig, arg1) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[2]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $128,%%rsp\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $128,%%rsp\n\t" \ "movq 16(%%rax), %%rsi\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[4]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $128,%%rsp\n\t" \ "movq 24(%%rax), %%rdx\n\t" \ "movq 16(%%rax), %%rsi\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[5]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $128,%%rsp\n\t" \ "movq 32(%%rax), %%rcx\n\t" \ "movq 24(%%rax), %%rdx\n\t" \ "movq 16(%%rax), %%rsi\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[6]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $128,%%rsp\n\t" \ "movq 40(%%rax), %%r8\n\t" \ "movq 32(%%rax), %%rcx\n\t" \ "movq 24(%%rax), %%rdx\n\t" \ "movq 16(%%rax), %%rsi\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[7]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $128,%%rsp\n\t" \ "movq 48(%%rax), %%r9\n\t" \ "movq 40(%%rax), %%r8\n\t" \ "movq 32(%%rax), %%rcx\n\t" \ "movq 24(%%rax), %%rdx\n\t" \ "movq 16(%%rax), %%rsi\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[8]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $136,%%rsp\n\t" \ "pushq 56(%%rax)\n\t" \ "movq 48(%%rax), %%r9\n\t" \ "movq 40(%%rax), %%r8\n\t" \ "movq 32(%%rax), %%rcx\n\t" \ "movq 24(%%rax), %%rdx\n\t" \ "movq 16(%%rax), %%rsi\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[9]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $128,%%rsp\n\t" \ "pushq 64(%%rax)\n\t" \ "pushq 56(%%rax)\n\t" \ "movq 48(%%rax), %%r9\n\t" \ "movq 40(%%rax), %%r8\n\t" \ "movq 32(%%rax), %%rcx\n\t" \ "movq 24(%%rax), %%rdx\n\t" \ "movq 16(%%rax), %%rsi\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[10]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $136,%%rsp\n\t" \ "pushq 72(%%rax)\n\t" \ "pushq 64(%%rax)\n\t" \ "pushq 56(%%rax)\n\t" \ "movq 48(%%rax), %%r9\n\t" \ "movq 40(%%rax), %%r8\n\t" \ "movq 32(%%rax), %%rcx\n\t" \ "movq 24(%%rax), %%rdx\n\t" \ "movq 16(%%rax), %%rsi\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[11]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $128,%%rsp\n\t" \ "pushq 80(%%rax)\n\t" \ "pushq 72(%%rax)\n\t" \ "pushq 64(%%rax)\n\t" \ "pushq 56(%%rax)\n\t" \ "movq 48(%%rax), %%r9\n\t" \ "movq 40(%%rax), %%r8\n\t" \ "movq 32(%%rax), %%rcx\n\t" \ "movq 24(%%rax), %%rdx\n\t" \ "movq 16(%%rax), %%rsi\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10,arg11) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[12]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $136,%%rsp\n\t" \ "pushq 88(%%rax)\n\t" \ "pushq 80(%%rax)\n\t" \ "pushq 72(%%rax)\n\t" \ "pushq 64(%%rax)\n\t" \ "pushq 56(%%rax)\n\t" \ "movq 48(%%rax), %%r9\n\t" \ "movq 40(%%rax), %%r8\n\t" \ "movq 32(%%rax), %%rcx\n\t" \ "movq 24(%%rax), %%rdx\n\t" \ "movq 16(%%rax), %%rsi\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10,arg11,arg12) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[13]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ _argvec[12] = (unsigned long)(arg12); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $128,%%rsp\n\t" \ "pushq 96(%%rax)\n\t" \ "pushq 88(%%rax)\n\t" \ "pushq 80(%%rax)\n\t" \ "pushq 72(%%rax)\n\t" \ "pushq 64(%%rax)\n\t" \ "pushq 56(%%rax)\n\t" \ "movq 48(%%rax), %%r9\n\t" \ "movq 40(%%rax), %%r8\n\t" \ "movq 32(%%rax), %%rcx\n\t" \ "movq 24(%%rax), %%rdx\n\t" \ "movq 16(%%rax), %%rsi\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */ /* ------------------------ ppc32-linux ------------------------ */ #if defined(PLAT_ppc32_linux) /* This is useful for finding out about the on-stack stuff: extern int f9 ( int,int,int,int,int,int,int,int,int ); extern int f10 ( int,int,int,int,int,int,int,int,int,int ); extern int f11 ( int,int,int,int,int,int,int,int,int,int,int ); extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int ); int g9 ( void ) { return f9(11,22,33,44,55,66,77,88,99); } int g10 ( void ) { return f10(11,22,33,44,55,66,77,88,99,110); } int g11 ( void ) { return f11(11,22,33,44,55,66,77,88,99,110,121); } int g12 ( void ) { return f12(11,22,33,44,55,66,77,88,99,110,121,132); } */ /* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ /* These regs are trashed by the hidden call. */ #define __CALLER_SAVED_REGS \ "lr", "ctr", "xer", \ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ "r11", "r12", "r13" /* Macros to save and align the stack before making a function call and restore it afterwards as gcc may not keep the stack pointer aligned if it doesn't realise calls are being made to other functions. */ #define VALGRIND_ALIGN_STACK \ "mr 28,1\n\t" \ "rlwinm 1,1,0,0,27\n\t" #define VALGRIND_RESTORE_STACK \ "mr 1,28\n\t" /* These CALL_FN_ macros assume that on ppc32-linux, sizeof(unsigned long) == 4. */ #define CALL_FN_W_v(lval, orig) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[1]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_W(lval, orig, arg1) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[2]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 4,8(11)\n\t" \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[4]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 4,8(11)\n\t" \ "lwz 5,12(11)\n\t" \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[5]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 4,8(11)\n\t" \ "lwz 5,12(11)\n\t" \ "lwz 6,16(11)\n\t" /* arg4->r6 */ \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[6]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 4,8(11)\n\t" \ "lwz 5,12(11)\n\t" \ "lwz 6,16(11)\n\t" /* arg4->r6 */ \ "lwz 7,20(11)\n\t" \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[7]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 4,8(11)\n\t" \ "lwz 5,12(11)\n\t" \ "lwz 6,16(11)\n\t" /* arg4->r6 */ \ "lwz 7,20(11)\n\t" \ "lwz 8,24(11)\n\t" \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[8]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 4,8(11)\n\t" \ "lwz 5,12(11)\n\t" \ "lwz 6,16(11)\n\t" /* arg4->r6 */ \ "lwz 7,20(11)\n\t" \ "lwz 8,24(11)\n\t" \ "lwz 9,28(11)\n\t" \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[9]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ _argvec[8] = (unsigned long)arg8; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 4,8(11)\n\t" \ "lwz 5,12(11)\n\t" \ "lwz 6,16(11)\n\t" /* arg4->r6 */ \ "lwz 7,20(11)\n\t" \ "lwz 8,24(11)\n\t" \ "lwz 9,28(11)\n\t" \ "lwz 10,32(11)\n\t" /* arg8->r10 */ \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[10]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ _argvec[8] = (unsigned long)arg8; \ _argvec[9] = (unsigned long)arg9; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "addi 1,1,-16\n\t" \ /* arg9 */ \ "lwz 3,36(11)\n\t" \ "stw 3,8(1)\n\t" \ /* args1-8 */ \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 4,8(11)\n\t" \ "lwz 5,12(11)\n\t" \ "lwz 6,16(11)\n\t" /* arg4->r6 */ \ "lwz 7,20(11)\n\t" \ "lwz 8,24(11)\n\t" \ "lwz 9,28(11)\n\t" \ "lwz 10,32(11)\n\t" /* arg8->r10 */ \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[11]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ _argvec[8] = (unsigned long)arg8; \ _argvec[9] = (unsigned long)arg9; \ _argvec[10] = (unsigned long)arg10; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "addi 1,1,-16\n\t" \ /* arg10 */ \ "lwz 3,40(11)\n\t" \ "stw 3,12(1)\n\t" \ /* arg9 */ \ "lwz 3,36(11)\n\t" \ "stw 3,8(1)\n\t" \ /* args1-8 */ \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 4,8(11)\n\t" \ "lwz 5,12(11)\n\t" \ "lwz 6,16(11)\n\t" /* arg4->r6 */ \ "lwz 7,20(11)\n\t" \ "lwz 8,24(11)\n\t" \ "lwz 9,28(11)\n\t" \ "lwz 10,32(11)\n\t" /* arg8->r10 */ \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10,arg11) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[12]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ _argvec[8] = (unsigned long)arg8; \ _argvec[9] = (unsigned long)arg9; \ _argvec[10] = (unsigned long)arg10; \ _argvec[11] = (unsigned long)arg11; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "addi 1,1,-32\n\t" \ /* arg11 */ \ "lwz 3,44(11)\n\t" \ "stw 3,16(1)\n\t" \ /* arg10 */ \ "lwz 3,40(11)\n\t" \ "stw 3,12(1)\n\t" \ /* arg9 */ \ "lwz 3,36(11)\n\t" \ "stw 3,8(1)\n\t" \ /* args1-8 */ \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 4,8(11)\n\t" \ "lwz 5,12(11)\n\t" \ "lwz 6,16(11)\n\t" /* arg4->r6 */ \ "lwz 7,20(11)\n\t" \ "lwz 8,24(11)\n\t" \ "lwz 9,28(11)\n\t" \ "lwz 10,32(11)\n\t" /* arg8->r10 */ \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10,arg11,arg12) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[13]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ _argvec[8] = (unsigned long)arg8; \ _argvec[9] = (unsigned long)arg9; \ _argvec[10] = (unsigned long)arg10; \ _argvec[11] = (unsigned long)arg11; \ _argvec[12] = (unsigned long)arg12; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "addi 1,1,-32\n\t" \ /* arg12 */ \ "lwz 3,48(11)\n\t" \ "stw 3,20(1)\n\t" \ /* arg11 */ \ "lwz 3,44(11)\n\t" \ "stw 3,16(1)\n\t" \ /* arg10 */ \ "lwz 3,40(11)\n\t" \ "stw 3,12(1)\n\t" \ /* arg9 */ \ "lwz 3,36(11)\n\t" \ "stw 3,8(1)\n\t" \ /* args1-8 */ \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 4,8(11)\n\t" \ "lwz 5,12(11)\n\t" \ "lwz 6,16(11)\n\t" /* arg4->r6 */ \ "lwz 7,20(11)\n\t" \ "lwz 8,24(11)\n\t" \ "lwz 9,28(11)\n\t" \ "lwz 10,32(11)\n\t" /* arg8->r10 */ \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #endif /* PLAT_ppc32_linux */ /* ------------------------ ppc64-linux ------------------------ */ #if defined(PLAT_ppc64be_linux) /* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ /* These regs are trashed by the hidden call. */ #define __CALLER_SAVED_REGS \ "lr", "ctr", "xer", \ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ "r11", "r12", "r13" /* Macros to save and align the stack before making a function call and restore it afterwards as gcc may not keep the stack pointer aligned if it doesn't realise calls are being made to other functions. */ #define VALGRIND_ALIGN_STACK \ "mr 28,1\n\t" \ "rldicr 1,1,0,59\n\t" #define VALGRIND_RESTORE_STACK \ "mr 1,28\n\t" /* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned long) == 8. */ #define CALL_FN_W_v(lval, orig) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+0]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_W(lval, orig, arg1) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+1]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+2]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 4, 16(11)\n\t" /* arg2->r4 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+3]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 4, 16(11)\n\t" /* arg2->r4 */ \ "ld 5, 24(11)\n\t" /* arg3->r5 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+4]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 4, 16(11)\n\t" /* arg2->r4 */ \ "ld 5, 24(11)\n\t" /* arg3->r5 */ \ "ld 6, 32(11)\n\t" /* arg4->r6 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+5]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 4, 16(11)\n\t" /* arg2->r4 */ \ "ld 5, 24(11)\n\t" /* arg3->r5 */ \ "ld 6, 32(11)\n\t" /* arg4->r6 */ \ "ld 7, 40(11)\n\t" /* arg5->r7 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+6]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 4, 16(11)\n\t" /* arg2->r4 */ \ "ld 5, 24(11)\n\t" /* arg3->r5 */ \ "ld 6, 32(11)\n\t" /* arg4->r6 */ \ "ld 7, 40(11)\n\t" /* arg5->r7 */ \ "ld 8, 48(11)\n\t" /* arg6->r8 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+7]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 4, 16(11)\n\t" /* arg2->r4 */ \ "ld 5, 24(11)\n\t" /* arg3->r5 */ \ "ld 6, 32(11)\n\t" /* arg4->r6 */ \ "ld 7, 40(11)\n\t" /* arg5->r7 */ \ "ld 8, 48(11)\n\t" /* arg6->r8 */ \ "ld 9, 56(11)\n\t" /* arg7->r9 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+8]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ _argvec[2+8] = (unsigned long)arg8; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 4, 16(11)\n\t" /* arg2->r4 */ \ "ld 5, 24(11)\n\t" /* arg3->r5 */ \ "ld 6, 32(11)\n\t" /* arg4->r6 */ \ "ld 7, 40(11)\n\t" /* arg5->r7 */ \ "ld 8, 48(11)\n\t" /* arg6->r8 */ \ "ld 9, 56(11)\n\t" /* arg7->r9 */ \ "ld 10, 64(11)\n\t" /* arg8->r10 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+9]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ _argvec[2+8] = (unsigned long)arg8; \ _argvec[2+9] = (unsigned long)arg9; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "addi 1,1,-128\n\t" /* expand stack frame */ \ /* arg9 */ \ "ld 3,72(11)\n\t" \ "std 3,112(1)\n\t" \ /* args1-8 */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 4, 16(11)\n\t" /* arg2->r4 */ \ "ld 5, 24(11)\n\t" /* arg3->r5 */ \ "ld 6, 32(11)\n\t" /* arg4->r6 */ \ "ld 7, 40(11)\n\t" /* arg5->r7 */ \ "ld 8, 48(11)\n\t" /* arg6->r8 */ \ "ld 9, 56(11)\n\t" /* arg7->r9 */ \ "ld 10, 64(11)\n\t" /* arg8->r10 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+10]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ _argvec[2+8] = (unsigned long)arg8; \ _argvec[2+9] = (unsigned long)arg9; \ _argvec[2+10] = (unsigned long)arg10; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "addi 1,1,-128\n\t" /* expand stack frame */ \ /* arg10 */ \ "ld 3,80(11)\n\t" \ "std 3,120(1)\n\t" \ /* arg9 */ \ "ld 3,72(11)\n\t" \ "std 3,112(1)\n\t" \ /* args1-8 */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 4, 16(11)\n\t" /* arg2->r4 */ \ "ld 5, 24(11)\n\t" /* arg3->r5 */ \ "ld 6, 32(11)\n\t" /* arg4->r6 */ \ "ld 7, 40(11)\n\t" /* arg5->r7 */ \ "ld 8, 48(11)\n\t" /* arg6->r8 */ \ "ld 9, 56(11)\n\t" /* arg7->r9 */ \ "ld 10, 64(11)\n\t" /* arg8->r10 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10,arg11) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+11]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ _argvec[2+8] = (unsigned long)arg8; \ _argvec[2+9] = (unsigned long)arg9; \ _argvec[2+10] = (unsigned long)arg10; \ _argvec[2+11] = (unsigned long)arg11; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "addi 1,1,-144\n\t" /* expand stack frame */ \ /* arg11 */ \ "ld 3,88(11)\n\t" \ "std 3,128(1)\n\t" \ /* arg10 */ \ "ld 3,80(11)\n\t" \ "std 3,120(1)\n\t" \ /* arg9 */ \ "ld 3,72(11)\n\t" \ "std 3,112(1)\n\t" \ /* args1-8 */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 4, 16(11)\n\t" /* arg2->r4 */ \ "ld 5, 24(11)\n\t" /* arg3->r5 */ \ "ld 6, 32(11)\n\t" /* arg4->r6 */ \ "ld 7, 40(11)\n\t" /* arg5->r7 */ \ "ld 8, 48(11)\n\t" /* arg6->r8 */ \ "ld 9, 56(11)\n\t" /* arg7->r9 */ \ "ld 10, 64(11)\n\t" /* arg8->r10 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10,arg11,arg12) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+12]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ _argvec[2+8] = (unsigned long)arg8; \ _argvec[2+9] = (unsigned long)arg9; \ _argvec[2+10] = (unsigned long)arg10; \ _argvec[2+11] = (unsigned long)arg11; \ _argvec[2+12] = (unsigned long)arg12; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "addi 1,1,-144\n\t" /* expand stack frame */ \ /* arg12 */ \ "ld 3,96(11)\n\t" \ "std 3,136(1)\n\t" \ /* arg11 */ \ "ld 3,88(11)\n\t" \ "std 3,128(1)\n\t" \ /* arg10 */ \ "ld 3,80(11)\n\t" \ "std 3,120(1)\n\t" \ /* arg9 */ \ "ld 3,72(11)\n\t" \ "std 3,112(1)\n\t" \ /* args1-8 */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 4, 16(11)\n\t" /* arg2->r4 */ \ "ld 5, 24(11)\n\t" /* arg3->r5 */ \ "ld 6, 32(11)\n\t" /* arg4->r6 */ \ "ld 7, 40(11)\n\t" /* arg5->r7 */ \ "ld 8, 48(11)\n\t" /* arg6->r8 */ \ "ld 9, 56(11)\n\t" /* arg7->r9 */ \ "ld 10, 64(11)\n\t" /* arg8->r10 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #endif /* PLAT_ppc64be_linux */ /* ------------------------- ppc64le-linux ----------------------- */ #if defined(PLAT_ppc64le_linux) /* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ /* These regs are trashed by the hidden call. */ #define __CALLER_SAVED_REGS \ "lr", "ctr", "xer", \ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ "r11", "r12", "r13" /* Macros to save and align the stack before making a function call and restore it afterwards as gcc may not keep the stack pointer aligned if it doesn't realise calls are being made to other functions. */ #define VALGRIND_ALIGN_STACK \ "mr 28,1\n\t" \ "rldicr 1,1,0,59\n\t" #define VALGRIND_RESTORE_STACK \ "mr 1,28\n\t" /* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned long) == 8. */ #define CALL_FN_W_v(lval, orig) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+0]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_W(lval, orig, arg1) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+1]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+2]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 4, 16(12)\n\t" /* arg2->r4 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+3]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 4, 16(12)\n\t" /* arg2->r4 */ \ "ld 5, 24(12)\n\t" /* arg3->r5 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+4]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 4, 16(12)\n\t" /* arg2->r4 */ \ "ld 5, 24(12)\n\t" /* arg3->r5 */ \ "ld 6, 32(12)\n\t" /* arg4->r6 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+5]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 4, 16(12)\n\t" /* arg2->r4 */ \ "ld 5, 24(12)\n\t" /* arg3->r5 */ \ "ld 6, 32(12)\n\t" /* arg4->r6 */ \ "ld 7, 40(12)\n\t" /* arg5->r7 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+6]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 4, 16(12)\n\t" /* arg2->r4 */ \ "ld 5, 24(12)\n\t" /* arg3->r5 */ \ "ld 6, 32(12)\n\t" /* arg4->r6 */ \ "ld 7, 40(12)\n\t" /* arg5->r7 */ \ "ld 8, 48(12)\n\t" /* arg6->r8 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+7]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 4, 16(12)\n\t" /* arg2->r4 */ \ "ld 5, 24(12)\n\t" /* arg3->r5 */ \ "ld 6, 32(12)\n\t" /* arg4->r6 */ \ "ld 7, 40(12)\n\t" /* arg5->r7 */ \ "ld 8, 48(12)\n\t" /* arg6->r8 */ \ "ld 9, 56(12)\n\t" /* arg7->r9 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+8]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ _argvec[2+8] = (unsigned long)arg8; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 4, 16(12)\n\t" /* arg2->r4 */ \ "ld 5, 24(12)\n\t" /* arg3->r5 */ \ "ld 6, 32(12)\n\t" /* arg4->r6 */ \ "ld 7, 40(12)\n\t" /* arg5->r7 */ \ "ld 8, 48(12)\n\t" /* arg6->r8 */ \ "ld 9, 56(12)\n\t" /* arg7->r9 */ \ "ld 10, 64(12)\n\t" /* arg8->r10 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+9]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ _argvec[2+8] = (unsigned long)arg8; \ _argvec[2+9] = (unsigned long)arg9; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "addi 1,1,-128\n\t" /* expand stack frame */ \ /* arg9 */ \ "ld 3,72(12)\n\t" \ "std 3,96(1)\n\t" \ /* args1-8 */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 4, 16(12)\n\t" /* arg2->r4 */ \ "ld 5, 24(12)\n\t" /* arg3->r5 */ \ "ld 6, 32(12)\n\t" /* arg4->r6 */ \ "ld 7, 40(12)\n\t" /* arg5->r7 */ \ "ld 8, 48(12)\n\t" /* arg6->r8 */ \ "ld 9, 56(12)\n\t" /* arg7->r9 */ \ "ld 10, 64(12)\n\t" /* arg8->r10 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+10]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ _argvec[2+8] = (unsigned long)arg8; \ _argvec[2+9] = (unsigned long)arg9; \ _argvec[2+10] = (unsigned long)arg10; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "addi 1,1,-128\n\t" /* expand stack frame */ \ /* arg10 */ \ "ld 3,80(12)\n\t" \ "std 3,104(1)\n\t" \ /* arg9 */ \ "ld 3,72(12)\n\t" \ "std 3,96(1)\n\t" \ /* args1-8 */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 4, 16(12)\n\t" /* arg2->r4 */ \ "ld 5, 24(12)\n\t" /* arg3->r5 */ \ "ld 6, 32(12)\n\t" /* arg4->r6 */ \ "ld 7, 40(12)\n\t" /* arg5->r7 */ \ "ld 8, 48(12)\n\t" /* arg6->r8 */ \ "ld 9, 56(12)\n\t" /* arg7->r9 */ \ "ld 10, 64(12)\n\t" /* arg8->r10 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10,arg11) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+11]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ _argvec[2+8] = (unsigned long)arg8; \ _argvec[2+9] = (unsigned long)arg9; \ _argvec[2+10] = (unsigned long)arg10; \ _argvec[2+11] = (unsigned long)arg11; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "addi 1,1,-144\n\t" /* expand stack frame */ \ /* arg11 */ \ "ld 3,88(12)\n\t" \ "std 3,112(1)\n\t" \ /* arg10 */ \ "ld 3,80(12)\n\t" \ "std 3,104(1)\n\t" \ /* arg9 */ \ "ld 3,72(12)\n\t" \ "std 3,96(1)\n\t" \ /* args1-8 */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 4, 16(12)\n\t" /* arg2->r4 */ \ "ld 5, 24(12)\n\t" /* arg3->r5 */ \ "ld 6, 32(12)\n\t" /* arg4->r6 */ \ "ld 7, 40(12)\n\t" /* arg5->r7 */ \ "ld 8, 48(12)\n\t" /* arg6->r8 */ \ "ld 9, 56(12)\n\t" /* arg7->r9 */ \ "ld 10, 64(12)\n\t" /* arg8->r10 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10,arg11,arg12) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+12]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ _argvec[2+8] = (unsigned long)arg8; \ _argvec[2+9] = (unsigned long)arg9; \ _argvec[2+10] = (unsigned long)arg10; \ _argvec[2+11] = (unsigned long)arg11; \ _argvec[2+12] = (unsigned long)arg12; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "addi 1,1,-144\n\t" /* expand stack frame */ \ /* arg12 */ \ "ld 3,96(12)\n\t" \ "std 3,120(1)\n\t" \ /* arg11 */ \ "ld 3,88(12)\n\t" \ "std 3,112(1)\n\t" \ /* arg10 */ \ "ld 3,80(12)\n\t" \ "std 3,104(1)\n\t" \ /* arg9 */ \ "ld 3,72(12)\n\t" \ "std 3,96(1)\n\t" \ /* args1-8 */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 4, 16(12)\n\t" /* arg2->r4 */ \ "ld 5, 24(12)\n\t" /* arg3->r5 */ \ "ld 6, 32(12)\n\t" /* arg4->r6 */ \ "ld 7, 40(12)\n\t" /* arg5->r7 */ \ "ld 8, 48(12)\n\t" /* arg6->r8 */ \ "ld 9, 56(12)\n\t" /* arg7->r9 */ \ "ld 10, 64(12)\n\t" /* arg8->r10 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #endif /* PLAT_ppc64le_linux */ /* ------------------------- arm-linux ------------------------- */ #if defined(PLAT_arm_linux) /* These regs are trashed by the hidden call. */ #define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4", "r12", "r14" /* Macros to save and align the stack before making a function call and restore it afterwards as gcc may not keep the stack pointer aligned if it doesn't realise calls are being made to other functions. */ /* This is a bit tricky. We store the original stack pointer in r10 as it is callee-saves. gcc doesn't allow the use of r11 for some reason. Also, we can't directly "bic" the stack pointer in thumb mode since r13 isn't an allowed register number in that context. So use r4 as a temporary, since that is about to get trashed anyway, just after each use of this macro. Side effect is we need to be very careful about any future changes, since VALGRIND_ALIGN_STACK simply assumes r4 is usable. */ #define VALGRIND_ALIGN_STACK \ "mov r10, sp\n\t" \ "mov r4, sp\n\t" \ "bic r4, r4, #7\n\t" \ "mov sp, r4\n\t" #define VALGRIND_RESTORE_STACK \ "mov sp, r10\n\t" /* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned long) == 4. */ #define CALL_FN_W_v(lval, orig) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[1]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_W(lval, orig, arg1) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[2]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr r0, [%1, #4] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr r0, [%1, #4] \n\t" \ "ldr r1, [%1, #8] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[4]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr r0, [%1, #4] \n\t" \ "ldr r1, [%1, #8] \n\t" \ "ldr r2, [%1, #12] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[5]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr r0, [%1, #4] \n\t" \ "ldr r1, [%1, #8] \n\t" \ "ldr r2, [%1, #12] \n\t" \ "ldr r3, [%1, #16] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[6]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "sub sp, sp, #4 \n\t" \ "ldr r0, [%1, #20] \n\t" \ "push {r0} \n\t" \ "ldr r0, [%1, #4] \n\t" \ "ldr r1, [%1, #8] \n\t" \ "ldr r2, [%1, #12] \n\t" \ "ldr r3, [%1, #16] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[7]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr r0, [%1, #20] \n\t" \ "ldr r1, [%1, #24] \n\t" \ "push {r0, r1} \n\t" \ "ldr r0, [%1, #4] \n\t" \ "ldr r1, [%1, #8] \n\t" \ "ldr r2, [%1, #12] \n\t" \ "ldr r3, [%1, #16] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[8]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "sub sp, sp, #4 \n\t" \ "ldr r0, [%1, #20] \n\t" \ "ldr r1, [%1, #24] \n\t" \ "ldr r2, [%1, #28] \n\t" \ "push {r0, r1, r2} \n\t" \ "ldr r0, [%1, #4] \n\t" \ "ldr r1, [%1, #8] \n\t" \ "ldr r2, [%1, #12] \n\t" \ "ldr r3, [%1, #16] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[9]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr r0, [%1, #20] \n\t" \ "ldr r1, [%1, #24] \n\t" \ "ldr r2, [%1, #28] \n\t" \ "ldr r3, [%1, #32] \n\t" \ "push {r0, r1, r2, r3} \n\t" \ "ldr r0, [%1, #4] \n\t" \ "ldr r1, [%1, #8] \n\t" \ "ldr r2, [%1, #12] \n\t" \ "ldr r3, [%1, #16] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[10]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "sub sp, sp, #4 \n\t" \ "ldr r0, [%1, #20] \n\t" \ "ldr r1, [%1, #24] \n\t" \ "ldr r2, [%1, #28] \n\t" \ "ldr r3, [%1, #32] \n\t" \ "ldr r4, [%1, #36] \n\t" \ "push {r0, r1, r2, r3, r4} \n\t" \ "ldr r0, [%1, #4] \n\t" \ "ldr r1, [%1, #8] \n\t" \ "ldr r2, [%1, #12] \n\t" \ "ldr r3, [%1, #16] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[11]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr r0, [%1, #40] \n\t" \ "push {r0} \n\t" \ "ldr r0, [%1, #20] \n\t" \ "ldr r1, [%1, #24] \n\t" \ "ldr r2, [%1, #28] \n\t" \ "ldr r3, [%1, #32] \n\t" \ "ldr r4, [%1, #36] \n\t" \ "push {r0, r1, r2, r3, r4} \n\t" \ "ldr r0, [%1, #4] \n\t" \ "ldr r1, [%1, #8] \n\t" \ "ldr r2, [%1, #12] \n\t" \ "ldr r3, [%1, #16] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ arg6,arg7,arg8,arg9,arg10, \ arg11) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[12]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "sub sp, sp, #4 \n\t" \ "ldr r0, [%1, #40] \n\t" \ "ldr r1, [%1, #44] \n\t" \ "push {r0, r1} \n\t" \ "ldr r0, [%1, #20] \n\t" \ "ldr r1, [%1, #24] \n\t" \ "ldr r2, [%1, #28] \n\t" \ "ldr r3, [%1, #32] \n\t" \ "ldr r4, [%1, #36] \n\t" \ "push {r0, r1, r2, r3, r4} \n\t" \ "ldr r0, [%1, #4] \n\t" \ "ldr r1, [%1, #8] \n\t" \ "ldr r2, [%1, #12] \n\t" \ "ldr r3, [%1, #16] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ arg6,arg7,arg8,arg9,arg10, \ arg11,arg12) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[13]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ _argvec[12] = (unsigned long)(arg12); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr r0, [%1, #40] \n\t" \ "ldr r1, [%1, #44] \n\t" \ "ldr r2, [%1, #48] \n\t" \ "push {r0, r1, r2} \n\t" \ "ldr r0, [%1, #20] \n\t" \ "ldr r1, [%1, #24] \n\t" \ "ldr r2, [%1, #28] \n\t" \ "ldr r3, [%1, #32] \n\t" \ "ldr r4, [%1, #36] \n\t" \ "push {r0, r1, r2, r3, r4} \n\t" \ "ldr r0, [%1, #4] \n\t" \ "ldr r1, [%1, #8] \n\t" \ "ldr r2, [%1, #12] \n\t" \ "ldr r3, [%1, #16] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #endif /* PLAT_arm_linux */ /* ------------------------ arm64-linux ------------------------ */ #if defined(PLAT_arm64_linux) /* These regs are trashed by the hidden call. */ #define __CALLER_SAVED_REGS \ "x0", "x1", "x2", "x3","x4", "x5", "x6", "x7", "x8", "x9", \ "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", \ "x18", "x19", "x20", "x30", \ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", \ "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", \ "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", \ "v26", "v27", "v28", "v29", "v30", "v31" /* x21 is callee-saved, so we can use it to save and restore SP around the hidden call. */ #define VALGRIND_ALIGN_STACK \ "mov x21, sp\n\t" \ "bic sp, x21, #15\n\t" #define VALGRIND_RESTORE_STACK \ "mov sp, x21\n\t" /* These CALL_FN_ macros assume that on arm64-linux, sizeof(unsigned long) == 8. */ #define CALL_FN_W_v(lval, orig) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[1]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_W(lval, orig, arg1) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[2]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr x0, [%1, #8] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr x0, [%1, #8] \n\t" \ "ldr x1, [%1, #16] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[4]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr x0, [%1, #8] \n\t" \ "ldr x1, [%1, #16] \n\t" \ "ldr x2, [%1, #24] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[5]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr x0, [%1, #8] \n\t" \ "ldr x1, [%1, #16] \n\t" \ "ldr x2, [%1, #24] \n\t" \ "ldr x3, [%1, #32] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[6]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr x0, [%1, #8] \n\t" \ "ldr x1, [%1, #16] \n\t" \ "ldr x2, [%1, #24] \n\t" \ "ldr x3, [%1, #32] \n\t" \ "ldr x4, [%1, #40] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[7]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr x0, [%1, #8] \n\t" \ "ldr x1, [%1, #16] \n\t" \ "ldr x2, [%1, #24] \n\t" \ "ldr x3, [%1, #32] \n\t" \ "ldr x4, [%1, #40] \n\t" \ "ldr x5, [%1, #48] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[8]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr x0, [%1, #8] \n\t" \ "ldr x1, [%1, #16] \n\t" \ "ldr x2, [%1, #24] \n\t" \ "ldr x3, [%1, #32] \n\t" \ "ldr x4, [%1, #40] \n\t" \ "ldr x5, [%1, #48] \n\t" \ "ldr x6, [%1, #56] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[9]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr x0, [%1, #8] \n\t" \ "ldr x1, [%1, #16] \n\t" \ "ldr x2, [%1, #24] \n\t" \ "ldr x3, [%1, #32] \n\t" \ "ldr x4, [%1, #40] \n\t" \ "ldr x5, [%1, #48] \n\t" \ "ldr x6, [%1, #56] \n\t" \ "ldr x7, [%1, #64] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[10]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "sub sp, sp, #0x20 \n\t" \ "ldr x0, [%1, #8] \n\t" \ "ldr x1, [%1, #16] \n\t" \ "ldr x2, [%1, #24] \n\t" \ "ldr x3, [%1, #32] \n\t" \ "ldr x4, [%1, #40] \n\t" \ "ldr x5, [%1, #48] \n\t" \ "ldr x6, [%1, #56] \n\t" \ "ldr x7, [%1, #64] \n\t" \ "ldr x8, [%1, #72] \n\t" \ "str x8, [sp, #0] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[11]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "sub sp, sp, #0x20 \n\t" \ "ldr x0, [%1, #8] \n\t" \ "ldr x1, [%1, #16] \n\t" \ "ldr x2, [%1, #24] \n\t" \ "ldr x3, [%1, #32] \n\t" \ "ldr x4, [%1, #40] \n\t" \ "ldr x5, [%1, #48] \n\t" \ "ldr x6, [%1, #56] \n\t" \ "ldr x7, [%1, #64] \n\t" \ "ldr x8, [%1, #72] \n\t" \ "str x8, [sp, #0] \n\t" \ "ldr x8, [%1, #80] \n\t" \ "str x8, [sp, #8] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10,arg11) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[12]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "sub sp, sp, #0x30 \n\t" \ "ldr x0, [%1, #8] \n\t" \ "ldr x1, [%1, #16] \n\t" \ "ldr x2, [%1, #24] \n\t" \ "ldr x3, [%1, #32] \n\t" \ "ldr x4, [%1, #40] \n\t" \ "ldr x5, [%1, #48] \n\t" \ "ldr x6, [%1, #56] \n\t" \ "ldr x7, [%1, #64] \n\t" \ "ldr x8, [%1, #72] \n\t" \ "str x8, [sp, #0] \n\t" \ "ldr x8, [%1, #80] \n\t" \ "str x8, [sp, #8] \n\t" \ "ldr x8, [%1, #88] \n\t" \ "str x8, [sp, #16] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10,arg11, \ arg12) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[13]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ _argvec[12] = (unsigned long)(arg12); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "sub sp, sp, #0x30 \n\t" \ "ldr x0, [%1, #8] \n\t" \ "ldr x1, [%1, #16] \n\t" \ "ldr x2, [%1, #24] \n\t" \ "ldr x3, [%1, #32] \n\t" \ "ldr x4, [%1, #40] \n\t" \ "ldr x5, [%1, #48] \n\t" \ "ldr x6, [%1, #56] \n\t" \ "ldr x7, [%1, #64] \n\t" \ "ldr x8, [%1, #72] \n\t" \ "str x8, [sp, #0] \n\t" \ "ldr x8, [%1, #80] \n\t" \ "str x8, [sp, #8] \n\t" \ "ldr x8, [%1, #88] \n\t" \ "str x8, [sp, #16] \n\t" \ "ldr x8, [%1, #96] \n\t" \ "str x8, [sp, #24] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #endif /* PLAT_arm64_linux */ /* ------------------------- s390x-linux ------------------------- */ #if defined(PLAT_s390x_linux) /* Similar workaround as amd64 (see above), but we use r11 as frame pointer and save the old r11 in r7. r11 might be used for argvec, therefore we copy argvec in r1 since r1 is clobbered after the call anyway. */ #if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM) # define __FRAME_POINTER \ ,"d"(__builtin_dwarf_cfa()) # define VALGRIND_CFI_PROLOGUE \ ".cfi_remember_state\n\t" \ "lgr 1,%1\n\t" /* copy the argvec pointer in r1 */ \ "lgr 7,11\n\t" \ "lgr 11,%2\n\t" \ ".cfi_def_cfa r11, 0\n\t" # define VALGRIND_CFI_EPILOGUE \ "lgr 11, 7\n\t" \ ".cfi_restore_state\n\t" #else # define __FRAME_POINTER # define VALGRIND_CFI_PROLOGUE \ "lgr 1,%1\n\t" # define VALGRIND_CFI_EPILOGUE #endif /* Nb: On s390 the stack pointer is properly aligned *at all times* according to the s390 GCC maintainer. (The ABI specification is not precise in this regard.) Therefore, VALGRIND_ALIGN_STACK and VALGRIND_RESTORE_STACK are not defined here. */ /* These regs are trashed by the hidden call. Note that we overwrite r14 in s390_irgen_noredir (VEX/priv/guest_s390_irgen.c) to give the function a proper return address. All others are ABI defined call clobbers. */ #define __CALLER_SAVED_REGS "0","1","2","3","4","5","14", \ "f0","f1","f2","f3","f4","f5","f6","f7" /* Nb: Although r11 is modified in the asm snippets below (inside VALGRIND_CFI_PROLOGUE) it is not listed in the clobber section, for two reasons: (1) r11 is restored in VALGRIND_CFI_EPILOGUE, so effectively it is not modified (2) GCC will complain that r11 cannot appear inside a clobber section, when compiled with -O -fno-omit-frame-pointer */ #define CALL_FN_W_v(lval, orig) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[1]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-160\n\t" \ "lg 1, 0(1)\n\t" /* target->r1 */ \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,160\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "d" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) /* The call abi has the arguments in r2-r6 and stack */ #define CALL_FN_W_W(lval, orig, arg1) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[2]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-160\n\t" \ "lg 2, 8(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,160\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WW(lval, orig, arg1, arg2) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-160\n\t" \ "lg 2, 8(1)\n\t" \ "lg 3,16(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,160\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[4]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-160\n\t" \ "lg 2, 8(1)\n\t" \ "lg 3,16(1)\n\t" \ "lg 4,24(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,160\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[5]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-160\n\t" \ "lg 2, 8(1)\n\t" \ "lg 3,16(1)\n\t" \ "lg 4,24(1)\n\t" \ "lg 5,32(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,160\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[6]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-160\n\t" \ "lg 2, 8(1)\n\t" \ "lg 3,16(1)\n\t" \ "lg 4,24(1)\n\t" \ "lg 5,32(1)\n\t" \ "lg 6,40(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,160\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ arg6) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[7]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-168\n\t" \ "lg 2, 8(1)\n\t" \ "lg 3,16(1)\n\t" \ "lg 4,24(1)\n\t" \ "lg 5,32(1)\n\t" \ "lg 6,40(1)\n\t" \ "mvc 160(8,15), 48(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,168\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ arg6, arg7) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[8]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-176\n\t" \ "lg 2, 8(1)\n\t" \ "lg 3,16(1)\n\t" \ "lg 4,24(1)\n\t" \ "lg 5,32(1)\n\t" \ "lg 6,40(1)\n\t" \ "mvc 160(8,15), 48(1)\n\t" \ "mvc 168(8,15), 56(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,176\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ arg6, arg7 ,arg8) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[9]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ _argvec[8] = (unsigned long)arg8; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-184\n\t" \ "lg 2, 8(1)\n\t" \ "lg 3,16(1)\n\t" \ "lg 4,24(1)\n\t" \ "lg 5,32(1)\n\t" \ "lg 6,40(1)\n\t" \ "mvc 160(8,15), 48(1)\n\t" \ "mvc 168(8,15), 56(1)\n\t" \ "mvc 176(8,15), 64(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,184\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ arg6, arg7 ,arg8, arg9) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[10]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ _argvec[8] = (unsigned long)arg8; \ _argvec[9] = (unsigned long)arg9; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-192\n\t" \ "lg 2, 8(1)\n\t" \ "lg 3,16(1)\n\t" \ "lg 4,24(1)\n\t" \ "lg 5,32(1)\n\t" \ "lg 6,40(1)\n\t" \ "mvc 160(8,15), 48(1)\n\t" \ "mvc 168(8,15), 56(1)\n\t" \ "mvc 176(8,15), 64(1)\n\t" \ "mvc 184(8,15), 72(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,192\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ arg6, arg7 ,arg8, arg9, arg10) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[11]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ _argvec[8] = (unsigned long)arg8; \ _argvec[9] = (unsigned long)arg9; \ _argvec[10] = (unsigned long)arg10; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-200\n\t" \ "lg 2, 8(1)\n\t" \ "lg 3,16(1)\n\t" \ "lg 4,24(1)\n\t" \ "lg 5,32(1)\n\t" \ "lg 6,40(1)\n\t" \ "mvc 160(8,15), 48(1)\n\t" \ "mvc 168(8,15), 56(1)\n\t" \ "mvc 176(8,15), 64(1)\n\t" \ "mvc 184(8,15), 72(1)\n\t" \ "mvc 192(8,15), 80(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,200\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ arg6, arg7 ,arg8, arg9, arg10, arg11) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[12]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ _argvec[8] = (unsigned long)arg8; \ _argvec[9] = (unsigned long)arg9; \ _argvec[10] = (unsigned long)arg10; \ _argvec[11] = (unsigned long)arg11; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-208\n\t" \ "lg 2, 8(1)\n\t" \ "lg 3,16(1)\n\t" \ "lg 4,24(1)\n\t" \ "lg 5,32(1)\n\t" \ "lg 6,40(1)\n\t" \ "mvc 160(8,15), 48(1)\n\t" \ "mvc 168(8,15), 56(1)\n\t" \ "mvc 176(8,15), 64(1)\n\t" \ "mvc 184(8,15), 72(1)\n\t" \ "mvc 192(8,15), 80(1)\n\t" \ "mvc 200(8,15), 88(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,208\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ arg6, arg7 ,arg8, arg9, arg10, arg11, arg12)\ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[13]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ _argvec[8] = (unsigned long)arg8; \ _argvec[9] = (unsigned long)arg9; \ _argvec[10] = (unsigned long)arg10; \ _argvec[11] = (unsigned long)arg11; \ _argvec[12] = (unsigned long)arg12; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-216\n\t" \ "lg 2, 8(1)\n\t" \ "lg 3,16(1)\n\t" \ "lg 4,24(1)\n\t" \ "lg 5,32(1)\n\t" \ "lg 6,40(1)\n\t" \ "mvc 160(8,15), 48(1)\n\t" \ "mvc 168(8,15), 56(1)\n\t" \ "mvc 176(8,15), 64(1)\n\t" \ "mvc 184(8,15), 72(1)\n\t" \ "mvc 192(8,15), 80(1)\n\t" \ "mvc 200(8,15), 88(1)\n\t" \ "mvc 208(8,15), 96(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,216\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #endif /* PLAT_s390x_linux */ /* ------------------------- mips32-linux ----------------------- */ #if defined(PLAT_mips32_linux) /* These regs are trashed by the hidden call. */ #define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \ "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ "$25", "$31" /* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned long) == 4. */ #define CALL_FN_W_v(lval, orig) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[1]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "subu $29, $29, 16 \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 16\n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_W(lval, orig, arg1) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[2]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "subu $29, $29, 16 \n\t" \ "lw $4, 4(%1) \n\t" /* arg1*/ \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 16 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "subu $29, $29, 16 \n\t" \ "lw $4, 4(%1) \n\t" \ "lw $5, 8(%1) \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 16 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[4]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "subu $29, $29, 16 \n\t" \ "lw $4, 4(%1) \n\t" \ "lw $5, 8(%1) \n\t" \ "lw $6, 12(%1) \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 16 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[5]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "subu $29, $29, 16 \n\t" \ "lw $4, 4(%1) \n\t" \ "lw $5, 8(%1) \n\t" \ "lw $6, 12(%1) \n\t" \ "lw $7, 16(%1) \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 16 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[6]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "lw $4, 20(%1) \n\t" \ "subu $29, $29, 24\n\t" \ "sw $4, 16($29) \n\t" \ "lw $4, 4(%1) \n\t" \ "lw $5, 8(%1) \n\t" \ "lw $6, 12(%1) \n\t" \ "lw $7, 16(%1) \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 24 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[7]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "lw $4, 20(%1) \n\t" \ "subu $29, $29, 32\n\t" \ "sw $4, 16($29) \n\t" \ "lw $4, 24(%1) \n\t" \ "nop\n\t" \ "sw $4, 20($29) \n\t" \ "lw $4, 4(%1) \n\t" \ "lw $5, 8(%1) \n\t" \ "lw $6, 12(%1) \n\t" \ "lw $7, 16(%1) \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 32 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[8]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "lw $4, 20(%1) \n\t" \ "subu $29, $29, 32\n\t" \ "sw $4, 16($29) \n\t" \ "lw $4, 24(%1) \n\t" \ "sw $4, 20($29) \n\t" \ "lw $4, 28(%1) \n\t" \ "sw $4, 24($29) \n\t" \ "lw $4, 4(%1) \n\t" \ "lw $5, 8(%1) \n\t" \ "lw $6, 12(%1) \n\t" \ "lw $7, 16(%1) \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 32 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[9]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "lw $4, 20(%1) \n\t" \ "subu $29, $29, 40\n\t" \ "sw $4, 16($29) \n\t" \ "lw $4, 24(%1) \n\t" \ "sw $4, 20($29) \n\t" \ "lw $4, 28(%1) \n\t" \ "sw $4, 24($29) \n\t" \ "lw $4, 32(%1) \n\t" \ "sw $4, 28($29) \n\t" \ "lw $4, 4(%1) \n\t" \ "lw $5, 8(%1) \n\t" \ "lw $6, 12(%1) \n\t" \ "lw $7, 16(%1) \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 40 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[10]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "lw $4, 20(%1) \n\t" \ "subu $29, $29, 40\n\t" \ "sw $4, 16($29) \n\t" \ "lw $4, 24(%1) \n\t" \ "sw $4, 20($29) \n\t" \ "lw $4, 28(%1) \n\t" \ "sw $4, 24($29) \n\t" \ "lw $4, 32(%1) \n\t" \ "sw $4, 28($29) \n\t" \ "lw $4, 36(%1) \n\t" \ "sw $4, 32($29) \n\t" \ "lw $4, 4(%1) \n\t" \ "lw $5, 8(%1) \n\t" \ "lw $6, 12(%1) \n\t" \ "lw $7, 16(%1) \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 40 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[11]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "lw $4, 20(%1) \n\t" \ "subu $29, $29, 48\n\t" \ "sw $4, 16($29) \n\t" \ "lw $4, 24(%1) \n\t" \ "sw $4, 20($29) \n\t" \ "lw $4, 28(%1) \n\t" \ "sw $4, 24($29) \n\t" \ "lw $4, 32(%1) \n\t" \ "sw $4, 28($29) \n\t" \ "lw $4, 36(%1) \n\t" \ "sw $4, 32($29) \n\t" \ "lw $4, 40(%1) \n\t" \ "sw $4, 36($29) \n\t" \ "lw $4, 4(%1) \n\t" \ "lw $5, 8(%1) \n\t" \ "lw $6, 12(%1) \n\t" \ "lw $7, 16(%1) \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 48 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ arg6,arg7,arg8,arg9,arg10, \ arg11) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[12]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "lw $4, 20(%1) \n\t" \ "subu $29, $29, 48\n\t" \ "sw $4, 16($29) \n\t" \ "lw $4, 24(%1) \n\t" \ "sw $4, 20($29) \n\t" \ "lw $4, 28(%1) \n\t" \ "sw $4, 24($29) \n\t" \ "lw $4, 32(%1) \n\t" \ "sw $4, 28($29) \n\t" \ "lw $4, 36(%1) \n\t" \ "sw $4, 32($29) \n\t" \ "lw $4, 40(%1) \n\t" \ "sw $4, 36($29) \n\t" \ "lw $4, 44(%1) \n\t" \ "sw $4, 40($29) \n\t" \ "lw $4, 4(%1) \n\t" \ "lw $5, 8(%1) \n\t" \ "lw $6, 12(%1) \n\t" \ "lw $7, 16(%1) \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 48 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ arg6,arg7,arg8,arg9,arg10, \ arg11,arg12) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[13]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ _argvec[12] = (unsigned long)(arg12); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "lw $4, 20(%1) \n\t" \ "subu $29, $29, 56\n\t" \ "sw $4, 16($29) \n\t" \ "lw $4, 24(%1) \n\t" \ "sw $4, 20($29) \n\t" \ "lw $4, 28(%1) \n\t" \ "sw $4, 24($29) \n\t" \ "lw $4, 32(%1) \n\t" \ "sw $4, 28($29) \n\t" \ "lw $4, 36(%1) \n\t" \ "sw $4, 32($29) \n\t" \ "lw $4, 40(%1) \n\t" \ "sw $4, 36($29) \n\t" \ "lw $4, 44(%1) \n\t" \ "sw $4, 40($29) \n\t" \ "lw $4, 48(%1) \n\t" \ "sw $4, 44($29) \n\t" \ "lw $4, 4(%1) \n\t" \ "lw $5, 8(%1) \n\t" \ "lw $6, 12(%1) \n\t" \ "lw $7, 16(%1) \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 56 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #endif /* PLAT_mips32_linux */ /* ------------------------- mips64-linux ------------------------- */ #if defined(PLAT_mips64_linux) /* These regs are trashed by the hidden call. */ #define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \ "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ "$25", "$31" /* These CALL_FN_ macros assume that on mips64-linux, sizeof(long long) == 8. */ #define MIPS64_LONG2REG_CAST(x) ((long long)(long)x) #define CALL_FN_W_v(lval, orig) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long long _argvec[1]; \ volatile unsigned long long _res; \ _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ __asm__ volatile( \ "ld $25, 0(%1)\n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) (long)_res; \ } while (0) #define CALL_FN_W_W(lval, orig, arg1) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long long _argvec[2]; \ volatile unsigned long long _res; \ _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ __asm__ volatile( \ "ld $4, 8(%1)\n\t" /* arg1*/ \ "ld $25, 0(%1)\n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) (long)_res; \ } while (0) #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long long _argvec[3]; \ volatile unsigned long long _res; \ _argvec[0] = _orig.nraddr; \ _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ __asm__ volatile( \ "ld $4, 8(%1)\n\t" \ "ld $5, 16(%1)\n\t" \ "ld $25, 0(%1)\n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) (long)_res; \ } while (0) #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long long _argvec[4]; \ volatile unsigned long long _res; \ _argvec[0] = _orig.nraddr; \ _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ __asm__ volatile( \ "ld $4, 8(%1)\n\t" \ "ld $5, 16(%1)\n\t" \ "ld $6, 24(%1)\n\t" \ "ld $25, 0(%1)\n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) (long)_res; \ } while (0) #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long long _argvec[5]; \ volatile unsigned long long _res; \ _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ __asm__ volatile( \ "ld $4, 8(%1)\n\t" \ "ld $5, 16(%1)\n\t" \ "ld $6, 24(%1)\n\t" \ "ld $7, 32(%1)\n\t" \ "ld $25, 0(%1)\n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) (long)_res; \ } while (0) #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long long _argvec[6]; \ volatile unsigned long long _res; \ _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ __asm__ volatile( \ "ld $4, 8(%1)\n\t" \ "ld $5, 16(%1)\n\t" \ "ld $6, 24(%1)\n\t" \ "ld $7, 32(%1)\n\t" \ "ld $8, 40(%1)\n\t" \ "ld $25, 0(%1)\n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) (long)_res; \ } while (0) #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long long _argvec[7]; \ volatile unsigned long long _res; \ _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ __asm__ volatile( \ "ld $4, 8(%1)\n\t" \ "ld $5, 16(%1)\n\t" \ "ld $6, 24(%1)\n\t" \ "ld $7, 32(%1)\n\t" \ "ld $8, 40(%1)\n\t" \ "ld $9, 48(%1)\n\t" \ "ld $25, 0(%1)\n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) (long)_res; \ } while (0) #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long long _argvec[8]; \ volatile unsigned long long _res; \ _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ __asm__ volatile( \ "ld $4, 8(%1)\n\t" \ "ld $5, 16(%1)\n\t" \ "ld $6, 24(%1)\n\t" \ "ld $7, 32(%1)\n\t" \ "ld $8, 40(%1)\n\t" \ "ld $9, 48(%1)\n\t" \ "ld $10, 56(%1)\n\t" \ "ld $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) (long)_res; \ } while (0) #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long long _argvec[9]; \ volatile unsigned long long _res; \ _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ __asm__ volatile( \ "ld $4, 8(%1)\n\t" \ "ld $5, 16(%1)\n\t" \ "ld $6, 24(%1)\n\t" \ "ld $7, 32(%1)\n\t" \ "ld $8, 40(%1)\n\t" \ "ld $9, 48(%1)\n\t" \ "ld $10, 56(%1)\n\t" \ "ld $11, 64(%1)\n\t" \ "ld $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) (long)_res; \ } while (0) #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long long _argvec[10]; \ volatile unsigned long long _res; \ _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \ __asm__ volatile( \ "dsubu $29, $29, 8\n\t" \ "ld $4, 72(%1)\n\t" \ "sd $4, 0($29)\n\t" \ "ld $4, 8(%1)\n\t" \ "ld $5, 16(%1)\n\t" \ "ld $6, 24(%1)\n\t" \ "ld $7, 32(%1)\n\t" \ "ld $8, 40(%1)\n\t" \ "ld $9, 48(%1)\n\t" \ "ld $10, 56(%1)\n\t" \ "ld $11, 64(%1)\n\t" \ "ld $25, 0(%1)\n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "daddu $29, $29, 8\n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) (long)_res; \ } while (0) #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long long _argvec[11]; \ volatile unsigned long long _res; \ _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \ _argvec[10] = MIPS64_LONG2REG_CAST(arg10); \ __asm__ volatile( \ "dsubu $29, $29, 16\n\t" \ "ld $4, 72(%1)\n\t" \ "sd $4, 0($29)\n\t" \ "ld $4, 80(%1)\n\t" \ "sd $4, 8($29)\n\t" \ "ld $4, 8(%1)\n\t" \ "ld $5, 16(%1)\n\t" \ "ld $6, 24(%1)\n\t" \ "ld $7, 32(%1)\n\t" \ "ld $8, 40(%1)\n\t" \ "ld $9, 48(%1)\n\t" \ "ld $10, 56(%1)\n\t" \ "ld $11, 64(%1)\n\t" \ "ld $25, 0(%1)\n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "daddu $29, $29, 16\n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) (long)_res; \ } while (0) #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ arg6,arg7,arg8,arg9,arg10, \ arg11) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long long _argvec[12]; \ volatile unsigned long long _res; \ _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \ _argvec[10] = MIPS64_LONG2REG_CAST(arg10); \ _argvec[11] = MIPS64_LONG2REG_CAST(arg11); \ __asm__ volatile( \ "dsubu $29, $29, 24\n\t" \ "ld $4, 72(%1)\n\t" \ "sd $4, 0($29)\n\t" \ "ld $4, 80(%1)\n\t" \ "sd $4, 8($29)\n\t" \ "ld $4, 88(%1)\n\t" \ "sd $4, 16($29)\n\t" \ "ld $4, 8(%1)\n\t" \ "ld $5, 16(%1)\n\t" \ "ld $6, 24(%1)\n\t" \ "ld $7, 32(%1)\n\t" \ "ld $8, 40(%1)\n\t" \ "ld $9, 48(%1)\n\t" \ "ld $10, 56(%1)\n\t" \ "ld $11, 64(%1)\n\t" \ "ld $25, 0(%1)\n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "daddu $29, $29, 24\n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) (long)_res; \ } while (0) #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ arg6,arg7,arg8,arg9,arg10, \ arg11,arg12) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long long _argvec[13]; \ volatile unsigned long long _res; \ _argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \ _argvec[1] = MIPS64_LONG2REG_CAST(arg1); \ _argvec[2] = MIPS64_LONG2REG_CAST(arg2); \ _argvec[3] = MIPS64_LONG2REG_CAST(arg3); \ _argvec[4] = MIPS64_LONG2REG_CAST(arg4); \ _argvec[5] = MIPS64_LONG2REG_CAST(arg5); \ _argvec[6] = MIPS64_LONG2REG_CAST(arg6); \ _argvec[7] = MIPS64_LONG2REG_CAST(arg7); \ _argvec[8] = MIPS64_LONG2REG_CAST(arg8); \ _argvec[9] = MIPS64_LONG2REG_CAST(arg9); \ _argvec[10] = MIPS64_LONG2REG_CAST(arg10); \ _argvec[11] = MIPS64_LONG2REG_CAST(arg11); \ _argvec[12] = MIPS64_LONG2REG_CAST(arg12); \ __asm__ volatile( \ "dsubu $29, $29, 32\n\t" \ "ld $4, 72(%1)\n\t" \ "sd $4, 0($29)\n\t" \ "ld $4, 80(%1)\n\t" \ "sd $4, 8($29)\n\t" \ "ld $4, 88(%1)\n\t" \ "sd $4, 16($29)\n\t" \ "ld $4, 96(%1)\n\t" \ "sd $4, 24($29)\n\t" \ "ld $4, 8(%1)\n\t" \ "ld $5, 16(%1)\n\t" \ "ld $6, 24(%1)\n\t" \ "ld $7, 32(%1)\n\t" \ "ld $8, 40(%1)\n\t" \ "ld $9, 48(%1)\n\t" \ "ld $10, 56(%1)\n\t" \ "ld $11, 64(%1)\n\t" \ "ld $25, 0(%1)\n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "daddu $29, $29, 32\n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) (long)_res; \ } while (0) #endif /* PLAT_mips64_linux */ /* ------------------------------------------------------------------ */ /* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */ /* */ /* ------------------------------------------------------------------ */ /* Some request codes. There are many more of these, but most are not exposed to end-user view. These are the public ones, all of the form 0x1000 + small_number. Core ones are in the range 0x00000000--0x0000ffff. The non-public ones start at 0x2000. */ /* These macros are used by tools -- they must be public, but don't embed them into other programs. */ #define VG_USERREQ_TOOL_BASE(a,b) \ ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16)) #define VG_IS_TOOL_USERREQ(a, b, v) \ (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000)) /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! This enum comprises an ABI exported by Valgrind to programs which use client requests. DO NOT CHANGE THE NUMERIC VALUES OF THESE ENTRIES, NOR DELETE ANY -- add new ones at the end of the most relevant group. */ typedef enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001, VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002, /* These allow any function to be called from the simulated CPU but run on the real CPU. Nb: the first arg passed to the function is always the ThreadId of the running thread! So CLIENT_CALL0 actually requires a 1 arg function, etc. */ VG_USERREQ__CLIENT_CALL0 = 0x1101, VG_USERREQ__CLIENT_CALL1 = 0x1102, VG_USERREQ__CLIENT_CALL2 = 0x1103, VG_USERREQ__CLIENT_CALL3 = 0x1104, /* Can be useful in regression testing suites -- eg. can send Valgrind's output to /dev/null and still count errors. */ VG_USERREQ__COUNT_ERRORS = 0x1201, /* Allows the client program and/or gdbserver to execute a monitor command. */ VG_USERREQ__GDB_MONITOR_COMMAND = 0x1202, /* These are useful and can be interpreted by any tool that tracks malloc() et al, by using vg_replace_malloc.c. */ VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301, VG_USERREQ__RESIZEINPLACE_BLOCK = 0x130b, VG_USERREQ__FREELIKE_BLOCK = 0x1302, /* Memory pool support. */ VG_USERREQ__CREATE_MEMPOOL = 0x1303, VG_USERREQ__DESTROY_MEMPOOL = 0x1304, VG_USERREQ__MEMPOOL_ALLOC = 0x1305, VG_USERREQ__MEMPOOL_FREE = 0x1306, VG_USERREQ__MEMPOOL_TRIM = 0x1307, VG_USERREQ__MOVE_MEMPOOL = 0x1308, VG_USERREQ__MEMPOOL_CHANGE = 0x1309, VG_USERREQ__MEMPOOL_EXISTS = 0x130a, /* Allow printfs to valgrind log. */ /* The first two pass the va_list argument by value, which assumes it is the same size as or smaller than a UWord, which generally isn't the case. Hence are deprecated. The second two pass the vargs by reference and so are immune to this problem. */ /* both :: char* fmt, va_list vargs (DEPRECATED) */ VG_USERREQ__PRINTF = 0x1401, VG_USERREQ__PRINTF_BACKTRACE = 0x1402, /* both :: char* fmt, va_list* vargs */ VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403, VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404, /* Stack support. */ VG_USERREQ__STACK_REGISTER = 0x1501, VG_USERREQ__STACK_DEREGISTER = 0x1502, VG_USERREQ__STACK_CHANGE = 0x1503, /* Wine support */ VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601, /* Querying of debug info. */ VG_USERREQ__MAP_IP_TO_SRCLOC = 0x1701, /* Disable/enable error reporting level. Takes a single Word arg which is the delta to this thread's error disablement indicator. Hence 1 disables or further disables errors, and -1 moves back towards enablement. Other values are not allowed. */ VG_USERREQ__CHANGE_ERR_DISABLEMENT = 0x1801, /* Some requests used for Valgrind internal, such as self-test or self-hosting. */ /* Initialise IR injection */ VG_USERREQ__VEX_INIT_FOR_IRI = 0x1901, /* Used by Inner Valgrind to inform Outer Valgrind where to find the list of inner guest threads */ VG_USERREQ__INNER_THREADS = 0x1902 } Vg_ClientRequest; #if !defined(__GNUC__) # define __extension__ /* */ #endif /* Returns the number of Valgrinds this code is running under. That is, 0 if running natively, 1 if running under Valgrind, 2 if running under Valgrind which is running under another Valgrind, etc. */ #define RUNNING_ON_VALGRIND \ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* if not */, \ VG_USERREQ__RUNNING_ON_VALGRIND, \ 0, 0, 0, 0, 0) \ /* Discard translation of code in the range [_qzz_addr .. _qzz_addr + _qzz_len - 1]. Useful if you are debugging a JITter or some such, since it provides a way to make sure valgrind will retranslate the invalidated area. Returns no value. */ #define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DISCARD_TRANSLATIONS, \ _qzz_addr, _qzz_len, 0, 0, 0) #define VALGRIND_INNER_THREADS(_qzz_addr) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__INNER_THREADS, \ _qzz_addr, 0, 0, 0, 0) /* These requests are for getting Valgrind itself to print something. Possibly with a backtrace. This is a really ugly hack. The return value is the number of characters printed, excluding the "**<pid>** " part at the start and the backtrace (if present). */ #if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER) /* Modern GCC will optimize the static routine out if unused, and unused attribute will shut down warnings about it. */ static int VALGRIND_PRINTF(const char *format, ...) __attribute__((format(__printf__, 1, 2), __unused__)); #endif static int #if defined(_MSC_VER) __inline #endif VALGRIND_PRINTF(const char *format, ...) { #if defined(NVALGRIND) (void)format; return 0; #else /* NVALGRIND */ #if defined(_MSC_VER) || defined(__MINGW64__) uintptr_t _qzz_res; #else unsigned long _qzz_res; #endif va_list vargs; va_start(vargs, format); #if defined(_MSC_VER) || defined(__MINGW64__) _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__PRINTF_VALIST_BY_REF, (uintptr_t)format, (uintptr_t)&vargs, 0, 0, 0); #else _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__PRINTF_VALIST_BY_REF, (unsigned long)format, (unsigned long)&vargs, 0, 0, 0); #endif va_end(vargs); return (int)_qzz_res; #endif /* NVALGRIND */ } #if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER) static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...) __attribute__((format(__printf__, 1, 2), __unused__)); #endif static int #if defined(_MSC_VER) __inline #endif VALGRIND_PRINTF_BACKTRACE(const char *format, ...) { #if defined(NVALGRIND) (void)format; return 0; #else /* NVALGRIND */ #if defined(_MSC_VER) || defined(__MINGW64__) uintptr_t _qzz_res; #else unsigned long _qzz_res; #endif va_list vargs; va_start(vargs, format); #if defined(_MSC_VER) || defined(__MINGW64__) _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF, (uintptr_t)format, (uintptr_t)&vargs, 0, 0, 0); #else _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF, (unsigned long)format, (unsigned long)&vargs, 0, 0, 0); #endif va_end(vargs); return (int)_qzz_res; #endif /* NVALGRIND */ } /* These requests allow control to move from the simulated CPU to the real CPU, calling an arbitrary function. Note that the current ThreadId is inserted as the first argument. So this call: VALGRIND_NON_SIMD_CALL2(f, arg1, arg2) requires f to have this signature: Word f(Word tid, Word arg1, Word arg2) where "Word" is a word-sized type. Note that these client requests are not entirely reliable. For example, if you call a function with them that subsequently calls printf(), there's a high chance Valgrind will crash. Generally, your prospects of these working are made higher if the called function does not refer to any global variables, and does not refer to any libc or other functions (printf et al). Any kind of entanglement with libc or dynamic linking is likely to have a bad outcome, for tricky reasons which we've grappled with a lot in the past. */ #define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__CLIENT_CALL0, \ _qyy_fn, \ 0, 0, 0, 0) #define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__CLIENT_CALL1, \ _qyy_fn, \ _qyy_arg1, 0, 0, 0) #define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__CLIENT_CALL2, \ _qyy_fn, \ _qyy_arg1, _qyy_arg2, 0, 0) #define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__CLIENT_CALL3, \ _qyy_fn, \ _qyy_arg1, _qyy_arg2, \ _qyy_arg3, 0) /* Counts the number of errors that have been recorded by a tool. Nb: the tool must record the errors with VG_(maybe_record_error)() or VG_(unique_error)() for them to be counted. */ #define VALGRIND_COUNT_ERRORS \ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR( \ 0 /* default return */, \ VG_USERREQ__COUNT_ERRORS, \ 0, 0, 0, 0, 0) /* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing when heap blocks are allocated in order to give accurate results. This happens automatically for the standard allocator functions such as malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete, delete[], etc. But if your program uses a custom allocator, this doesn't automatically happen, and Valgrind will not do as well. For example, if you allocate superblocks with mmap() and then allocates chunks of the superblocks, all Valgrind's observations will be at the mmap() level and it won't know that the chunks should be considered separate entities. In Memcheck's case, that means you probably won't get heap block overrun detection (because there won't be redzones marked as unaddressable) and you definitely won't get any leak detection. The following client requests allow a custom allocator to be annotated so that it can be handled accurately by Valgrind. VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated by a malloc()-like function. For Memcheck (an illustrative case), this does two things: - It records that the block has been allocated. This means any addresses within the block mentioned in error messages will be identified as belonging to the block. It also means that if the block isn't freed it will be detected by the leak checker. - It marks the block as being addressable and undefined (if 'is_zeroed' is not set), or addressable and defined (if 'is_zeroed' is set). This controls how accesses to the block by the program are handled. 'addr' is the start of the usable block (ie. after any redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator can apply redzones -- these are blocks of padding at the start and end of each block. Adding redzones is recommended as it makes it much more likely Valgrind will spot block overruns. `is_zeroed' indicates if the memory is zeroed (or filled with another predictable value), as is the case for calloc(). VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a heap block -- that will be used by the client program -- is allocated. It's best to put it at the outermost level of the allocator if possible; for example, if you have a function my_alloc() which calls internal_alloc(), and the client request is put inside internal_alloc(), stack traces relating to the heap block will contain entries for both my_alloc() and internal_alloc(), which is probably not what you want. For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out custom blocks from within a heap block, B, that has been allocated with malloc/calloc/new/etc, then block B will be *ignored* during leak-checking -- the custom blocks will take precedence. VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For Memcheck, it does two things: - It records that the block has been deallocated. This assumes that the block was annotated as having been allocated via VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued. - It marks the block as being unaddressable. VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a heap block is deallocated. VALGRIND_RESIZEINPLACE_BLOCK informs a tool about reallocation. For Memcheck, it does four things: - It records that the size of a block has been changed. This assumes that the block was annotated as having been allocated via VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued. - If the block shrunk, it marks the freed memory as being unaddressable. - If the block grew, it marks the new area as undefined and defines a red zone past the end of the new block. - The V-bits of the overlap between the old and the new block are preserved. VALGRIND_RESIZEINPLACE_BLOCK should be put after allocation of the new block and before deallocation of the old block. In many cases, these three client requests will not be enough to get your allocator working well with Memcheck. More specifically, if your allocator writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call will be necessary to mark the memory as addressable just before the zeroing occurs, otherwise you'll get a lot of invalid write errors. For example, you'll need to do this if your allocator recycles freed blocks, but it zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK). Alternatively, if your allocator reuses freed blocks for allocator-internal data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary. Really, what's happening is a blurring of the lines between the client program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the memory should be considered unaddressable to the client program, but the allocator knows more than the rest of the client program and so may be able to safely access it. Extra client requests are necessary for Valgrind to understand the distinction between the allocator and the rest of the program. Ignored if addr == 0. */ #define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MALLOCLIKE_BLOCK, \ addr, sizeB, rzB, is_zeroed, 0) /* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details. Ignored if addr == 0. */ #define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__RESIZEINPLACE_BLOCK, \ addr, oldSizeB, newSizeB, rzB, 0) /* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details. Ignored if addr == 0. */ #define VALGRIND_FREELIKE_BLOCK(addr, rzB) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__FREELIKE_BLOCK, \ addr, rzB, 0, 0, 0) /* Create a memory pool. */ #define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \ pool, rzB, is_zeroed, 0, 0) /* Create a memory pool with some flags specifying extended behaviour. When flags is zero, the behaviour is identical to VALGRIND_CREATE_MEMPOOL. The flag VALGRIND_MEMPOOL_METAPOOL specifies that the pieces of memory associated with the pool using VALGRIND_MEMPOOL_ALLOC will be used by the application as superblocks to dole out MALLOC_LIKE blocks using VALGRIND_MALLOCLIKE_BLOCK. In other words, a meta pool is a "2 levels" pool : first level is the blocks described by VALGRIND_MEMPOOL_ALLOC. The second level blocks are described using VALGRIND_MALLOCLIKE_BLOCK. Note that the association between the pool and the second level blocks is implicit : second level blocks will be located inside first level blocks. It is necessary to use the VALGRIND_MEMPOOL_METAPOOL flag for such 2 levels pools, as otherwise valgrind will detect overlapping memory blocks, and will abort execution (e.g. during leak search). Such a meta pool can also be marked as an 'auto free' pool using the flag VALGRIND_MEMPOOL_AUTO_FREE, which must be OR-ed together with the VALGRIND_MEMPOOL_METAPOOL. For an 'auto free' pool, VALGRIND_MEMPOOL_FREE will automatically free the second level blocks that are contained inside the first level block freed with VALGRIND_MEMPOOL_FREE. In other words, calling VALGRIND_MEMPOOL_FREE will cause implicit calls to VALGRIND_FREELIKE_BLOCK for all the second level blocks included in the first level block. Note: it is an error to use the VALGRIND_MEMPOOL_AUTO_FREE flag without the VALGRIND_MEMPOOL_METAPOOL flag. */ #define VALGRIND_MEMPOOL_AUTO_FREE 1 #define VALGRIND_MEMPOOL_METAPOOL 2 #define VALGRIND_CREATE_MEMPOOL_EXT(pool, rzB, is_zeroed, flags) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \ pool, rzB, is_zeroed, flags, 0) /* Destroy a memory pool. */ #define VALGRIND_DESTROY_MEMPOOL(pool) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DESTROY_MEMPOOL, \ pool, 0, 0, 0, 0) /* Associate a piece of memory with a memory pool. */ #define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_ALLOC, \ pool, addr, size, 0, 0) /* Disassociate a piece of memory from a memory pool. */ #define VALGRIND_MEMPOOL_FREE(pool, addr) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_FREE, \ pool, addr, 0, 0, 0) /* Disassociate any pieces outside a particular range. */ #define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_TRIM, \ pool, addr, size, 0, 0) /* Resize and/or move a piece associated with a memory pool. */ #define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MOVE_MEMPOOL, \ poolA, poolB, 0, 0, 0) /* Resize and/or move a piece associated with a memory pool. */ #define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_CHANGE, \ pool, addrA, addrB, size, 0) /* Return 1 if a mempool exists, else 0. */ #define VALGRIND_MEMPOOL_EXISTS(pool) \ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ VG_USERREQ__MEMPOOL_EXISTS, \ pool, 0, 0, 0, 0) /* Mark a piece of memory as being a stack. Returns a stack id. start is the lowest addressable stack byte, end is the highest addressable stack byte. */ #define VALGRIND_STACK_REGISTER(start, end) \ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ VG_USERREQ__STACK_REGISTER, \ start, end, 0, 0, 0) /* Unmark the piece of memory associated with a stack id as being a stack. */ #define VALGRIND_STACK_DEREGISTER(id) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_DEREGISTER, \ id, 0, 0, 0, 0) /* Change the start and end address of the stack id. start is the new lowest addressable stack byte, end is the new highest addressable stack byte. */ #define VALGRIND_STACK_CHANGE(id, start, end) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_CHANGE, \ id, start, end, 0, 0) /* Load PDB debug info for Wine PE image_map. */ #define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__LOAD_PDB_DEBUGINFO, \ fd, ptr, total_size, delta, 0) /* Map a code address to a source file name and line number. buf64 must point to a 64-byte buffer in the caller's address space. The result will be dumped in there and is guaranteed to be zero terminated. If no info is found, the first byte is set to zero. */ #define VALGRIND_MAP_IP_TO_SRCLOC(addr, buf64) \ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ VG_USERREQ__MAP_IP_TO_SRCLOC, \ addr, buf64, 0, 0, 0) /* Disable error reporting for this thread. Behaves in a stack like way, so you can safely call this multiple times provided that VALGRIND_ENABLE_ERROR_REPORTING is called the same number of times to re-enable reporting. The first call of this macro disables reporting. Subsequent calls have no effect except to increase the number of VALGRIND_ENABLE_ERROR_REPORTING calls needed to re-enable reporting. Child threads do not inherit this setting from their parents -- they are always created with reporting enabled. */ #define VALGRIND_DISABLE_ERROR_REPORTING \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \ 1, 0, 0, 0, 0) /* Re-enable error reporting, as per comments on VALGRIND_DISABLE_ERROR_REPORTING. */ #define VALGRIND_ENABLE_ERROR_REPORTING \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \ -1, 0, 0, 0, 0) /* Execute a monitor command from the client program. If a connection is opened with GDB, the output will be sent according to the output mode set for vgdb. If no connection is opened, output will go to the log output. Returns 1 if command not recognised, 0 otherwise. */ #define VALGRIND_MONITOR_COMMAND(command) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__GDB_MONITOR_COMMAND, \ command, 0, 0, 0, 0) #undef PLAT_x86_darwin #undef PLAT_amd64_darwin #undef PLAT_x86_win32 #undef PLAT_amd64_win64 #undef PLAT_x86_linux #undef PLAT_amd64_linux #undef PLAT_ppc32_linux #undef PLAT_ppc64be_linux #undef PLAT_ppc64le_linux #undef PLAT_arm_linux #undef PLAT_s390x_linux #undef PLAT_mips32_linux #undef PLAT_mips64_linux #undef PLAT_x86_solaris #undef PLAT_amd64_solaris #endif /* __VALGRIND_H */
391,825
57.938929
92
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/core/valgrind/drd.h
/* ---------------------------------------------------------------- Notice that the following BSD-style license applies to this one file (drd.h) only. The rest of Valgrind is licensed under the terms of the GNU General Public License, version 2, unless otherwise indicated. See the COPYING file in the source distribution for details. ---------------------------------------------------------------- This file is part of DRD, a Valgrind tool for verification of multithreaded programs. Copyright (C) 2006-2017 Bart Van Assche <[email protected]>. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 3. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 4. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------- Notice that the above BSD-style license applies to this one file (drd.h) only. The entire rest of Valgrind is licensed under the terms of the GNU General Public License, version 2. See the COPYING file in the source distribution for details. ---------------------------------------------------------------- */ #ifndef __VALGRIND_DRD_H #define __VALGRIND_DRD_H #include "valgrind.h" /** Obtain the thread ID assigned by Valgrind's core. */ #define DRD_GET_VALGRIND_THREADID \ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ VG_USERREQ__DRD_GET_VALGRIND_THREAD_ID, \ 0, 0, 0, 0, 0) /** Obtain the thread ID assigned by DRD. */ #define DRD_GET_DRD_THREADID \ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ VG_USERREQ__DRD_GET_DRD_THREAD_ID, \ 0, 0, 0, 0, 0) /** Tell DRD not to complain about data races for the specified variable. */ #define DRD_IGNORE_VAR(x) ANNOTATE_BENIGN_RACE_SIZED(&(x), sizeof(x), "") /** Tell DRD to no longer ignore data races for the specified variable. */ #define DRD_STOP_IGNORING_VAR(x) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_FINISH_SUPPRESSION, \ &(x), sizeof(x), 0, 0, 0) /** * Tell DRD to trace all memory accesses for the specified variable * until the memory that was allocated for the variable is freed. */ #define DRD_TRACE_VAR(x) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_START_TRACE_ADDR, \ &(x), sizeof(x), 0, 0, 0) /** * Tell DRD to stop tracing memory accesses for the specified variable. */ #define DRD_STOP_TRACING_VAR(x) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_STOP_TRACE_ADDR, \ &(x), sizeof(x), 0, 0, 0) /** * @defgroup RaceDetectionAnnotations Data race detection annotations. * * @see See also the source file <a href="http://code.google.com/p/data-race-test/source/browse/trunk/dynamic_annotations/dynamic_annotations.h</a> * in the ThreadSanitizer project. */ /*@{*/ #ifndef __HELGRIND_H /** * Tell DRD to insert a happens-before mark. addr is the address of an object * that is not a pthread synchronization object. */ #define ANNOTATE_HAPPENS_BEFORE(addr) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATE_HAPPENS_BEFORE, \ addr, 0, 0, 0, 0) /** * Tell DRD that the memory accesses executed after this annotation will * happen after all memory accesses performed before all preceding * ANNOTATE_HAPPENS_BEFORE(addr). addr is the address of an object that is not * a pthread synchronization object. Inserting a happens-after annotation * before any other thread has passed by a happens-before annotation for the * same address is an error. */ #define ANNOTATE_HAPPENS_AFTER(addr) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATE_HAPPENS_AFTER, \ addr, 0, 0, 0, 0) #else /* __HELGRIND_H */ #undef ANNOTATE_CONDVAR_LOCK_WAIT #undef ANNOTATE_CONDVAR_WAIT #undef ANNOTATE_CONDVAR_SIGNAL #undef ANNOTATE_CONDVAR_SIGNAL_ALL #undef ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX #undef ANNOTATE_PUBLISH_MEMORY_RANGE #undef ANNOTATE_BARRIER_INIT #undef ANNOTATE_BARRIER_WAIT_BEFORE #undef ANNOTATE_BARRIER_WAIT_AFTER #undef ANNOTATE_BARRIER_DESTROY #undef ANNOTATE_PCQ_CREATE #undef ANNOTATE_PCQ_DESTROY #undef ANNOTATE_PCQ_PUT #undef ANNOTATE_PCQ_GET #undef ANNOTATE_BENIGN_RACE #undef ANNOTATE_BENIGN_RACE_SIZED #undef ANNOTATE_IGNORE_READS_BEGIN #undef ANNOTATE_IGNORE_READS_END #undef ANNOTATE_IGNORE_WRITES_BEGIN #undef ANNOTATE_IGNORE_WRITES_END #undef ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN #undef ANNOTATE_IGNORE_READS_AND_WRITES_END #undef ANNOTATE_NEW_MEMORY #undef ANNOTATE_TRACE_MEMORY #undef ANNOTATE_THREAD_NAME #endif /* __HELGRIND_H */ /** * Tell DRD that waiting on the condition variable at address cv has succeeded * and a lock on the mutex at address mtx is now held. Since DRD always inserts * a happens before relation between the pthread_cond_signal() or * pthread_cond_broadcast() call that wakes up a pthread_cond_wait() or * pthread_cond_timedwait() call and the woken up thread, this macro has been * defined such that it has no effect. */ #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, mtx) do { } while(0) /** * Tell DRD that the condition variable at address cv is about to be signaled. */ #define ANNOTATE_CONDVAR_SIGNAL(cv) do { } while(0) /** * Tell DRD that the condition variable at address cv is about to be signaled. */ #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) do { } while(0) /** * Tell DRD that waiting on condition variable at address cv succeeded and that * the memory operations performed after this annotation should be considered * to happen after the matching ANNOTATE_CONDVAR_SIGNAL(cv). Since this is the * default behavior of DRD, this macro and the macro above have been defined * such that they have no effect. */ #define ANNOTATE_CONDVAR_WAIT(cv) do { } while(0) /** * Tell DRD to consider the memory operations that happened before a mutex * unlock event and after the subsequent mutex lock event on the same mutex as * ordered. This is how DRD always behaves, so this macro has been defined * such that it has no effect. */ #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mtx) do { } while(0) /** Deprecated -- don't use this annotation. */ #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mtx) do { } while(0) /** * Tell DRD to handle the specified memory range like a pure happens-before * detector would do. Since this is how DRD always behaves, this annotation * has been defined such that it has no effect. */ #define ANNOTATE_PUBLISH_MEMORY_RANGE(addr, size) do { } while(0) /** Deprecated -- don't use this annotation. */ #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(addr, size) do { } while(0) /** Deprecated -- don't use this annotation. */ #define ANNOTATE_SWAP_MEMORY_RANGE(addr, size) do { } while(0) #ifndef __HELGRIND_H /** Tell DRD that a reader-writer lock object has been initialized. */ #define ANNOTATE_RWLOCK_CREATE(rwlock) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATE_RWLOCK_CREATE, \ rwlock, 0, 0, 0, 0); /** Tell DRD that a reader-writer lock object has been destroyed. */ #define ANNOTATE_RWLOCK_DESTROY(rwlock) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATE_RWLOCK_DESTROY, \ rwlock, 0, 0, 0, 0); /** * Tell DRD that a reader-writer lock has been acquired. is_w == 1 means that * a write lock has been obtained, is_w == 0 means that a read lock has been * obtained. */ #define ANNOTATE_RWLOCK_ACQUIRED(rwlock, is_w) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATE_RWLOCK_ACQUIRED, \ rwlock, is_w, 0, 0, 0) #endif /* __HELGRIND_H */ /** * Tell DRD that a reader lock has been acquired on a reader-writer * synchronization object. */ #define ANNOTATE_READERLOCK_ACQUIRED(rwlock) ANNOTATE_RWLOCK_ACQUIRED(rwlock, 0) /** * Tell DRD that a writer lock has been acquired on a reader-writer * synchronization object. */ #define ANNOTATE_WRITERLOCK_ACQUIRED(rwlock) ANNOTATE_RWLOCK_ACQUIRED(rwlock, 1) #ifndef __HELGRIND_H /** * Tell DRD that a reader-writer lock is about to be released. is_w == 1 means * that a write lock is about to be released, is_w == 0 means that a read lock * is about to be released. */ #define ANNOTATE_RWLOCK_RELEASED(rwlock, is_w) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATE_RWLOCK_RELEASED, \ rwlock, is_w, 0, 0, 0); #endif /* __HELGRIND_H */ /** * Tell DRD that a reader lock is about to be released. */ #define ANNOTATE_READERLOCK_RELEASED(rwlock) ANNOTATE_RWLOCK_RELEASED(rwlock, 0) /** * Tell DRD that a writer lock is about to be released. */ #define ANNOTATE_WRITERLOCK_RELEASED(rwlock) ANNOTATE_RWLOCK_RELEASED(rwlock, 1) /** Tell DRD that a semaphore object is going to be initialized. */ #define ANNOTATE_SEM_INIT_PRE(sem, value) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATE_SEM_INIT_PRE, \ sem, value, 0, 0, 0); /** Tell DRD that a semaphore object has been destroyed. */ #define ANNOTATE_SEM_DESTROY_POST(sem) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATE_SEM_DESTROY_POST, \ sem, 0, 0, 0, 0); /** Tell DRD that a semaphore is going to be acquired. */ #define ANNOTATE_SEM_WAIT_PRE(sem) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATE_SEM_WAIT_PRE, \ sem, 0, 0, 0, 0) /** Tell DRD that a semaphore has been acquired. */ #define ANNOTATE_SEM_WAIT_POST(sem) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATE_SEM_WAIT_POST, \ sem, 0, 0, 0, 0) /** Tell DRD that a semaphore is going to be released. */ #define ANNOTATE_SEM_POST_PRE(sem) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATE_SEM_POST_PRE, \ sem, 0, 0, 0, 0) /* * Report that a barrier has been initialized with a given barrier count. The * third argument specifies whether or not reinitialization is allowed, that * is, whether or not it is allowed to call barrier_init() several times * without calling barrier_destroy(). */ #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATION_UNIMP, \ "ANNOTATE_BARRIER_INIT", barrier, \ count, reinitialization_allowed, 0) /* Report that a barrier has been destroyed. */ #define ANNOTATE_BARRIER_DESTROY(barrier) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATION_UNIMP, \ "ANNOTATE_BARRIER_DESTROY", \ barrier, 0, 0, 0) /* Report that the calling thread is about to start waiting for a barrier. */ #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATION_UNIMP, \ "ANNOTATE_BARRIER_WAIT_BEFORE", \ barrier, 0, 0, 0) /* Report that the calling thread has just finished waiting for a barrier. */ #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_ANNOTATION_UNIMP, \ "ANNOTATE_BARRIER_WAIT_AFTER", \ barrier, 0, 0, 0) /** * Tell DRD that a FIFO queue has been created. The abbreviation PCQ stands for * <em>producer-consumer</em>. */ #define ANNOTATE_PCQ_CREATE(pcq) do { } while(0) /** Tell DRD that a FIFO queue has been destroyed. */ #define ANNOTATE_PCQ_DESTROY(pcq) do { } while(0) /** * Tell DRD that an element has been added to the FIFO queue at address pcq. */ #define ANNOTATE_PCQ_PUT(pcq) do { } while(0) /** * Tell DRD that an element has been removed from the FIFO queue at address pcq, * and that DRD should insert a happens-before relationship between the memory * accesses that occurred before the corresponding ANNOTATE_PCQ_PUT(pcq) * annotation and the memory accesses after this annotation. Correspondence * between PUT and GET annotations happens in FIFO order. Since locking * of the queue is needed anyway to add elements to or to remove elements from * the queue, for DRD all four FIFO annotations are defined as no-ops. */ #define ANNOTATE_PCQ_GET(pcq) do { } while(0) /** * Tell DRD that data races at the specified address are expected and must not * be reported. */ #define ANNOTATE_BENIGN_RACE(addr, descr) \ ANNOTATE_BENIGN_RACE_SIZED(addr, sizeof(*addr), descr) /* Same as ANNOTATE_BENIGN_RACE(addr, descr), but applies to the memory range [addr, addr + size). */ #define ANNOTATE_BENIGN_RACE_SIZED(addr, size, descr) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_START_SUPPRESSION, \ addr, size, 0, 0, 0) /** Tell DRD to ignore all reads performed by the current thread. */ #define ANNOTATE_IGNORE_READS_BEGIN() \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_RECORD_LOADS, \ 0, 0, 0, 0, 0); /** Tell DRD to no longer ignore the reads performed by the current thread. */ #define ANNOTATE_IGNORE_READS_END() \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_RECORD_LOADS, \ 1, 0, 0, 0, 0); /** Tell DRD to ignore all writes performed by the current thread. */ #define ANNOTATE_IGNORE_WRITES_BEGIN() \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_RECORD_STORES, \ 0, 0, 0, 0, 0) /** Tell DRD to no longer ignore the writes performed by the current thread. */ #define ANNOTATE_IGNORE_WRITES_END() \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_RECORD_STORES, \ 1, 0, 0, 0, 0) /** Tell DRD to ignore all memory accesses performed by the current thread. */ #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ do { ANNOTATE_IGNORE_READS_BEGIN(); ANNOTATE_IGNORE_WRITES_BEGIN(); } while(0) /** * Tell DRD to no longer ignore the memory accesses performed by the current * thread. */ #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \ do { ANNOTATE_IGNORE_READS_END(); ANNOTATE_IGNORE_WRITES_END(); } while(0) /** * Tell DRD that size bytes starting at addr has been allocated by a custom * memory allocator. */ #define ANNOTATE_NEW_MEMORY(addr, size) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_CLEAN_MEMORY, \ addr, size, 0, 0, 0) /** Ask DRD to report every access to the specified address. */ #define ANNOTATE_TRACE_MEMORY(addr) DRD_TRACE_VAR(*(char*)(addr)) /** * Tell DRD to assign the specified name to the current thread. This name will * be used in error messages printed by DRD. */ #define ANNOTATE_THREAD_NAME(name) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DRD_SET_THREAD_NAME, \ name, 0, 0, 0, 0) /*@}*/ /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! This enum comprises an ABI exported by Valgrind to programs which use client requests. DO NOT CHANGE THE ORDER OF THESE ENTRIES, NOR DELETE ANY -- add new ones at the end. */ enum { /* Ask the DRD tool to discard all information about memory accesses */ /* and client objects for the specified range. This client request is */ /* binary compatible with the similarly named Helgrind client request. */ VG_USERREQ__DRD_CLEAN_MEMORY = VG_USERREQ_TOOL_BASE('H','G'), /* args: Addr, SizeT. */ /* Ask the DRD tool the thread ID assigned by Valgrind. */ VG_USERREQ__DRD_GET_VALGRIND_THREAD_ID = VG_USERREQ_TOOL_BASE('D','R'), /* args: none. */ /* Ask the DRD tool the thread ID assigned by DRD. */ VG_USERREQ__DRD_GET_DRD_THREAD_ID, /* args: none. */ /* To tell the DRD tool to suppress data race detection on the */ /* specified address range. */ VG_USERREQ__DRD_START_SUPPRESSION, /* args: start address, size in bytes */ /* To tell the DRD tool no longer to suppress data race detection on */ /* the specified address range. */ VG_USERREQ__DRD_FINISH_SUPPRESSION, /* args: start address, size in bytes */ /* To ask the DRD tool to trace all accesses to the specified range. */ VG_USERREQ__DRD_START_TRACE_ADDR, /* args: Addr, SizeT. */ /* To ask the DRD tool to stop tracing accesses to the specified range. */ VG_USERREQ__DRD_STOP_TRACE_ADDR, /* args: Addr, SizeT. */ /* Tell DRD whether or not to record memory loads in the calling thread. */ VG_USERREQ__DRD_RECORD_LOADS, /* args: Bool. */ /* Tell DRD whether or not to record memory stores in the calling thread. */ VG_USERREQ__DRD_RECORD_STORES, /* args: Bool. */ /* Set the name of the thread that performs this client request. */ VG_USERREQ__DRD_SET_THREAD_NAME, /* args: null-terminated character string. */ /* Tell DRD that a DRD annotation has not yet been implemented. */ VG_USERREQ__DRD_ANNOTATION_UNIMP, /* args: char*. */ /* Tell DRD that a user-defined semaphore synchronization object * is about to be created. */ VG_USERREQ__DRD_ANNOTATE_SEM_INIT_PRE, /* args: Addr, UInt value. */ /* Tell DRD that a user-defined semaphore synchronization object * has been destroyed. */ VG_USERREQ__DRD_ANNOTATE_SEM_DESTROY_POST, /* args: Addr. */ /* Tell DRD that a user-defined semaphore synchronization * object is going to be acquired (semaphore wait). */ VG_USERREQ__DRD_ANNOTATE_SEM_WAIT_PRE, /* args: Addr. */ /* Tell DRD that a user-defined semaphore synchronization * object has been acquired (semaphore wait). */ VG_USERREQ__DRD_ANNOTATE_SEM_WAIT_POST, /* args: Addr. */ /* Tell DRD that a user-defined semaphore synchronization * object is about to be released (semaphore post). */ VG_USERREQ__DRD_ANNOTATE_SEM_POST_PRE, /* args: Addr. */ /* Tell DRD to ignore the inter-thread ordering introduced by a mutex. */ VG_USERREQ__DRD_IGNORE_MUTEX_ORDERING, /* args: Addr. */ /* Tell DRD that a user-defined reader-writer synchronization object * has been created. */ VG_USERREQ__DRD_ANNOTATE_RWLOCK_CREATE = VG_USERREQ_TOOL_BASE('H','G') + 256 + 14, /* args: Addr. */ /* Tell DRD that a user-defined reader-writer synchronization object * is about to be destroyed. */ VG_USERREQ__DRD_ANNOTATE_RWLOCK_DESTROY = VG_USERREQ_TOOL_BASE('H','G') + 256 + 15, /* args: Addr. */ /* Tell DRD that a lock on a user-defined reader-writer synchronization * object has been acquired. */ VG_USERREQ__DRD_ANNOTATE_RWLOCK_ACQUIRED = VG_USERREQ_TOOL_BASE('H','G') + 256 + 17, /* args: Addr, Int is_rw. */ /* Tell DRD that a lock on a user-defined reader-writer synchronization * object is about to be released. */ VG_USERREQ__DRD_ANNOTATE_RWLOCK_RELEASED = VG_USERREQ_TOOL_BASE('H','G') + 256 + 18, /* args: Addr, Int is_rw. */ /* Tell DRD that a Helgrind annotation has not yet been implemented. */ VG_USERREQ__HELGRIND_ANNOTATION_UNIMP = VG_USERREQ_TOOL_BASE('H','G') + 256 + 32, /* args: char*. */ /* Tell DRD to insert a happens-before annotation. */ VG_USERREQ__DRD_ANNOTATE_HAPPENS_BEFORE = VG_USERREQ_TOOL_BASE('H','G') + 256 + 33, /* args: Addr. */ /* Tell DRD to insert a happens-after annotation. */ VG_USERREQ__DRD_ANNOTATE_HAPPENS_AFTER = VG_USERREQ_TOOL_BASE('H','G') + 256 + 34, /* args: Addr. */ }; /** * @addtogroup RaceDetectionAnnotations */ /*@{*/ #ifdef __cplusplus /* ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racy reads. Instead of doing ANNOTATE_IGNORE_READS_BEGIN(); ... = x; ANNOTATE_IGNORE_READS_END(); one can use ... = ANNOTATE_UNPROTECTED_READ(x); */ template <typename T> inline T ANNOTATE_UNPROTECTED_READ(const volatile T& x) { ANNOTATE_IGNORE_READS_BEGIN(); const T result = x; ANNOTATE_IGNORE_READS_END(); return result; } /* Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable. */ #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ namespace { \ static class static_var##_annotator \ { \ public: \ static_var##_annotator() \ { \ ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \ #static_var ": " description); \ } \ } the_##static_var##_annotator; \ } #endif /*@}*/ #endif /* __VALGRIND_DRD_H */
22,982
39.18007
147
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/core/valgrind/pmemcheck.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2015, Intel Corporation */ #ifndef __PMEMCHECK_H #define __PMEMCHECK_H /* This file is for inclusion into client (your!) code. You can use these macros to manipulate and query memory permissions inside your own programs. See comment near the top of valgrind.h on how to use them. */ #include "valgrind.h" /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! This enum comprises an ABI exported by Valgrind to programs which use client requests. DO NOT CHANGE THE ORDER OF THESE ENTRIES, NOR DELETE ANY -- add new ones at the end. */ typedef enum { VG_USERREQ__PMC_REGISTER_PMEM_MAPPING = VG_USERREQ_TOOL_BASE('P','C'), VG_USERREQ__PMC_REGISTER_PMEM_FILE, VG_USERREQ__PMC_REMOVE_PMEM_MAPPING, VG_USERREQ__PMC_CHECK_IS_PMEM_MAPPING, VG_USERREQ__PMC_PRINT_PMEM_MAPPINGS, VG_USERREQ__PMC_DO_FLUSH, VG_USERREQ__PMC_DO_FENCE, VG_USERREQ__PMC_RESERVED1, /* Do not use. */ VG_USERREQ__PMC_WRITE_STATS, VG_USERREQ__PMC_RESERVED2, /* Do not use. */ VG_USERREQ__PMC_RESERVED3, /* Do not use. */ VG_USERREQ__PMC_RESERVED4, /* Do not use. */ VG_USERREQ__PMC_RESERVED5, /* Do not use. */ VG_USERREQ__PMC_RESERVED7, /* Do not use. */ VG_USERREQ__PMC_RESERVED8, /* Do not use. */ VG_USERREQ__PMC_RESERVED9, /* Do not use. */ VG_USERREQ__PMC_RESERVED10, /* Do not use. */ VG_USERREQ__PMC_SET_CLEAN, /* transaction support */ VG_USERREQ__PMC_START_TX, VG_USERREQ__PMC_START_TX_N, VG_USERREQ__PMC_END_TX, VG_USERREQ__PMC_END_TX_N, VG_USERREQ__PMC_ADD_TO_TX, VG_USERREQ__PMC_ADD_TO_TX_N, VG_USERREQ__PMC_REMOVE_FROM_TX, VG_USERREQ__PMC_REMOVE_FROM_TX_N, VG_USERREQ__PMC_ADD_THREAD_TO_TX_N, VG_USERREQ__PMC_REMOVE_THREAD_FROM_TX_N, VG_USERREQ__PMC_ADD_TO_GLOBAL_TX_IGNORE, VG_USERREQ__PMC_RESERVED6, /* Do not use. */ VG_USERREQ__PMC_EMIT_LOG, } Vg_PMemCheckClientRequest; /* Client-code macros to manipulate pmem mappings */ /** Register a persistent memory mapping region */ #define VALGRIND_PMC_REGISTER_PMEM_MAPPING(_qzz_addr, _qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_REGISTER_PMEM_MAPPING, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /** Register a persistent memory file */ #define VALGRIND_PMC_REGISTER_PMEM_FILE(_qzz_desc, _qzz_addr_base, \ _qzz_size, _qzz_offset) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_REGISTER_PMEM_FILE, \ (_qzz_desc), (_qzz_addr_base), (_qzz_size), \ (_qzz_offset), 0) /** Remove a persistent memory mapping region */ #define VALGRIND_PMC_REMOVE_PMEM_MAPPING(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_REMOVE_PMEM_MAPPING, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /** Check if the given range is a registered persistent memory mapping */ #define VALGRIND_PMC_CHECK_IS_PMEM_MAPPING(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_CHECK_IS_PMEM_MAPPING, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /** Register an SFENCE */ #define VALGRIND_PMC_PRINT_PMEM_MAPPINGS \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_PRINT_PMEM_MAPPINGS, \ 0, 0, 0, 0, 0) /** Register a CLFLUSH-like operation */ #define VALGRIND_PMC_DO_FLUSH(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_DO_FLUSH, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /** Register an SFENCE */ #define VALGRIND_PMC_DO_FENCE \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_DO_FENCE, \ 0, 0, 0, 0, 0) /** Write tool stats */ #define VALGRIND_PMC_WRITE_STATS \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_WRITE_STATS, \ 0, 0, 0, 0, 0) /** Emit user log */ #define VALGRIND_PMC_EMIT_LOG(_qzz_emit_log) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_EMIT_LOG, \ (_qzz_emit_log), 0, 0, 0, 0) /** Set a region of persistent memory as clean */ #define VALGRIND_PMC_SET_CLEAN(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_SET_CLEAN, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /** Support for transactions */ /** Start an implicit persistent memory transaction */ #define VALGRIND_PMC_START_TX \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_START_TX, \ 0, 0, 0, 0, 0) /** Start an explicit persistent memory transaction */ #define VALGRIND_PMC_START_TX_N(_qzz_txn) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_START_TX_N, \ (_qzz_txn), 0, 0, 0, 0) /** End an implicit persistent memory transaction */ #define VALGRIND_PMC_END_TX \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_END_TX, \ 0, 0, 0, 0, 0) /** End an explicit persistent memory transaction */ #define VALGRIND_PMC_END_TX_N(_qzz_txn) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_END_TX_N, \ (_qzz_txn), 0, 0, 0, 0) /** Add a persistent memory region to the implicit transaction */ #define VALGRIND_PMC_ADD_TO_TX(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_ADD_TO_TX, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /** Add a persistent memory region to an explicit transaction */ #define VALGRIND_PMC_ADD_TO_TX_N(_qzz_txn,_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_ADD_TO_TX_N, \ (_qzz_txn), (_qzz_addr), (_qzz_len), 0, 0) /** Remove a persistent memory region from the implicit transaction */ #define VALGRIND_PMC_REMOVE_FROM_TX(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_REMOVE_FROM_TX, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /** Remove a persistent memory region from an explicit transaction */ #define VALGRIND_PMC_REMOVE_FROM_TX_N(_qzz_txn,_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_REMOVE_FROM_TX_N, \ (_qzz_txn), (_qzz_addr), (_qzz_len), 0, 0) /** End an explicit persistent memory transaction */ #define VALGRIND_PMC_ADD_THREAD_TX_N(_qzz_txn) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_ADD_THREAD_TO_TX_N, \ (_qzz_txn), 0, 0, 0, 0) /** End an explicit persistent memory transaction */ #define VALGRIND_PMC_REMOVE_THREAD_FROM_TX_N(_qzz_txn) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_REMOVE_THREAD_FROM_TX_N, \ (_qzz_txn), 0, 0, 0, 0) /** Remove a persistent memory region from the implicit transaction */ #define VALGRIND_PMC_ADD_TO_GLOBAL_TX_IGNORE(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_ADD_TO_GLOBAL_TX_IGNORE,\ (_qzz_addr), (_qzz_len), 0, 0, 0) #endif
9,085
47.588235
77
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/clo_vec.hpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * clo_vec.hpp -- command line options vector declarations */ #include "queue.h" #include <cstdlib> struct clo_vec_args { PMDK_TAILQ_ENTRY(clo_vec_args) next; void *args; }; struct clo_vec_alloc { PMDK_TAILQ_ENTRY(clo_vec_alloc) next; void *ptr; }; struct clo_vec_value { PMDK_TAILQ_ENTRY(clo_vec_value) next; void *ptr; }; struct clo_vec_vlist { PMDK_TAILQ_HEAD(valueshead, clo_vec_value) head; size_t nvalues; }; struct clo_vec { size_t size; PMDK_TAILQ_HEAD(argshead, clo_vec_args) args; size_t nargs; PMDK_TAILQ_HEAD(allochead, clo_vec_alloc) allocs; size_t nallocs; }; struct clo_vec *clo_vec_alloc(size_t size); void clo_vec_free(struct clo_vec *clovec); void *clo_vec_get_args(struct clo_vec *clovec, size_t i); int clo_vec_add_alloc(struct clo_vec *clovec, void *ptr); int clo_vec_memcpy(struct clo_vec *clovec, size_t off, size_t size, void *ptr); int clo_vec_memcpy_list(struct clo_vec *clovec, size_t off, size_t size, struct clo_vec_vlist *list); struct clo_vec_vlist *clo_vec_vlist_alloc(void); void clo_vec_vlist_free(struct clo_vec_vlist *list); void clo_vec_vlist_add(struct clo_vec_vlist *list, void *ptr, size_t size);
1,249
25.595745
79
hpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/pmem_flush.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2020, Intel Corporation */ /* * pmem_flush.cpp -- benchmark implementation for pmem_persist and pmem_msync */ #include <cassert> #include <cerrno> #include <climits> #include <cstdio> #include <cstdlib> #include <cstring> #include <fcntl.h> #include <libpmem.h> #include <sys/mman.h> #include <unistd.h> #include "benchmark.hpp" #include "file.h" #define PAGE_4K ((uintptr_t)1 << 12) #define PAGE_2M ((uintptr_t)1 << 21) /* * align_addr -- round addr down to given boundary */ static void * align_addr(void *addr, uintptr_t align) { return (char *)((uintptr_t)addr & ~(align - 1)); } /* * align_len -- increase len by the amount we gain when we round addr down */ static size_t align_len(size_t len, void *addr, uintptr_t align) { return len + ((uintptr_t)addr & (align - 1)); } /* * roundup_len -- increase len by the amount we gain when we round addr down, * then round up to the nearest multiple of 4K */ static size_t roundup_len(size_t len, void *addr, uintptr_t align) { return (align_len(len, addr, align) + align - 1) & ~(align - 1); } /* * pmem_args -- benchmark specific arguments */ struct pmem_args { char *operation; /* msync, dummy_msync, persist, ... */ char *mode; /* stat, seq, rand */ bool no_warmup; /* don't do warmup */ }; /* * pmem_bench -- benchmark context */ struct pmem_bench { uint64_t *offsets; /* write offsets */ size_t n_offsets; /* number of elements in offsets array */ size_t fsize; /* The size of the allocated PMEM */ struct pmem_args *pargs; /* prog_args structure */ void *pmem_addr; /* PMEM base address */ size_t pmem_len; /* length of PMEM mapping */ void *invalid_addr; /* invalid pages */ void *nondirty_addr; /* non-dirty pages */ void *pmem_addr_aligned; /* PMEM pages - 2M aligned */ void *invalid_addr_aligned; /* invalid pages - 2M aligned */ void *nondirty_addr_aligned; /* non-dirty pages - 2M aligned */ /* the actual benchmark operation */ int (*func_op)(struct pmem_bench *pmb, void *addr, size_t len); }; /* * mode_seq -- if copy mode is sequential, returns index of a chunk. */ static uint64_t mode_seq(struct pmem_bench *pmb, uint64_t index) { return index; } /* * mode_stat -- if mode is static, the offset is always 0 */ static uint64_t mode_stat(struct pmem_bench *pmb, uint64_t index) { return 0; } /* * mode_rand -- if mode is random, returns index of a random chunk */ static uint64_t mode_rand(struct pmem_bench *pmb, uint64_t index) { return rand() % pmb->n_offsets; } /* * operation_mode -- the mode of the copy process * * * static - write always the same chunk, * * sequential - write chunk by chunk, * * random - write to chunks selected randomly. */ struct op_mode { const char *mode; uint64_t (*func_mode)(struct pmem_bench *pmb, uint64_t index); }; static struct op_mode modes[] = { {"stat", mode_stat}, {"seq", mode_seq}, {"rand", mode_rand}, }; #define MODES (sizeof(modes) / sizeof(modes[0])) /* * parse_op_mode -- parses command line "--mode" * and returns proper operation mode index. */ static int parse_op_mode(const char *arg) { for (unsigned i = 0; i < MODES; i++) { if (strcmp(arg, modes[i].mode) == 0) return i; } return -1; } /* * flush_noop -- dummy flush, does nothing */ static int flush_noop(struct pmem_bench *pmb, void *addr, size_t len) { return 0; } /* * flush_persist -- flush data to persistence using pmem_persist() */ static int flush_persist(struct pmem_bench *pmb, void *addr, size_t len) { pmem_persist(addr, len); return 0; } /* * flush_persist_4K -- always flush entire 4K page(s) using pmem_persist() */ static int flush_persist_4K(struct pmem_bench *pmb, void *addr, size_t len) { void *ptr = align_addr(addr, PAGE_4K); len = roundup_len(len, addr, PAGE_4K); pmem_persist(ptr, len); return 0; } /* * flush_persist_2M -- always flush entire 2M page(s) using pmem_persist() */ static int flush_persist_2M(struct pmem_bench *pmb, void *addr, size_t len) { void *ptr = align_addr(addr, PAGE_2M); len = roundup_len(len, addr, PAGE_2M); pmem_persist(ptr, len); return 0; } /* * flush_msync -- flush data to persistence using pmem_msync() */ static int flush_msync(struct pmem_bench *pmb, void *addr, size_t len) { pmem_msync(addr, len); return 0; } /* * flush_msync_async -- emulate dummy msync() using MS_ASYNC flag */ static int flush_msync_async(struct pmem_bench *pmb, void *addr, size_t len) { void *ptr = align_addr(addr, PAGE_4K); len = align_len(len, addr, PAGE_4K); msync(ptr, len, MS_ASYNC); return 0; } /* * flush_msync_0 -- emulate dummy msync() using zero length */ static int flush_msync_0(struct pmem_bench *pmb, void *addr, size_t len) { void *ptr = align_addr(addr, PAGE_4K); (void)len; msync(ptr, 0, MS_SYNC); return 0; } /* * flush_persist_4K_msync_0 -- emulate msync() that only flushes CPU cache * * Do flushing in user space (4K pages) + dummy syscall. */ static int flush_persist_4K_msync_0(struct pmem_bench *pmb, void *addr, size_t len) { void *ptr = align_addr(addr, PAGE_4K); len = roundup_len(len, addr, PAGE_4K); pmem_persist(ptr, len); msync(ptr, 0, MS_SYNC); return 0; } /* * flush_persist_2M_msync_0 -- emulate msync() that only flushes CPU cache * * Do flushing in user space (2M pages) + dummy syscall. */ static int flush_persist_2M_msync_0(struct pmem_bench *pmb, void *addr, size_t len) { void *ptr = align_addr(addr, PAGE_2M); len = roundup_len(len, addr, PAGE_2M); pmem_persist(ptr, len); msync(ptr, 0, MS_SYNC); return 0; } /* * flush_msync_err -- emulate dummy msync() using invalid flags */ static int flush_msync_err(struct pmem_bench *pmb, void *addr, size_t len) { void *ptr = align_addr(addr, PAGE_4K); len = align_len(len, addr, PAGE_4K); msync(ptr, len, MS_SYNC | MS_ASYNC); return 0; } /* * flush_msync_nodirty -- call msync() on non-dirty pages */ static int flush_msync_nodirty(struct pmem_bench *pmb, void *addr, size_t len) { uintptr_t uptr = (uintptr_t)addr - (uintptr_t)pmb->pmem_addr_aligned; uptr += (uintptr_t)pmb->nondirty_addr_aligned; void *ptr = align_addr((void *)uptr, PAGE_4K); len = align_len(len, (void *)uptr, PAGE_4K); pmem_msync(ptr, len); return 0; } /* * flush_msync_invalid -- emulate dummy msync() using invalid address */ static int flush_msync_invalid(struct pmem_bench *pmb, void *addr, size_t len) { uintptr_t uptr = (uintptr_t)addr - (uintptr_t)pmb->pmem_addr_aligned; uptr += (uintptr_t)pmb->invalid_addr_aligned; void *ptr = align_addr((void *)uptr, PAGE_4K); len = align_len(len, (void *)uptr, PAGE_4K); pmem_msync(ptr, len); return 0; } struct op { const char *opname; int (*func_op)(struct pmem_bench *pmb, void *addr, size_t len); }; static struct op ops[] = { {"noop", flush_noop}, {"persist", flush_persist}, {"persist_4K", flush_persist_4K}, {"persist_2M", flush_persist_2M}, {"msync", flush_msync}, {"msync_0", flush_msync_0}, {"msync_err", flush_msync_err}, {"persist_4K_msync_0", flush_persist_4K_msync_0}, {"persist_2M_msync_0", flush_persist_2M_msync_0}, {"msync_async", flush_msync_async}, {"msync_nodirty", flush_msync_nodirty}, {"msync_invalid", flush_msync_invalid}, }; #define NOPS (sizeof(ops) / sizeof(ops[0])) /* * parse_op_type -- parses command line "--operation" argument * and returns proper operation type. */ static int parse_op_type(const char *arg) { for (unsigned i = 0; i < NOPS; i++) { if (strcmp(arg, ops[i].opname) == 0) return i; } return -1; } /* * pmem_flush_init -- benchmark initialization * * Parses command line arguments, allocates persistent memory, and maps it. */ static int pmem_flush_init(struct benchmark *bench, struct benchmark_args *args) { assert(bench != nullptr); assert(args != nullptr); size_t file_size = 0; int flags = 0; enum file_type type = util_file_get_type(args->fname); if (type == OTHER_ERROR) { fprintf(stderr, "could not check type of file %s\n", args->fname); return -1; } uint64_t (*func_mode)(struct pmem_bench * pmb, uint64_t index); auto *pmb = (struct pmem_bench *)malloc(sizeof(struct pmem_bench)); assert(pmb != nullptr); pmb->pargs = (struct pmem_args *)args->opts; assert(pmb->pargs != nullptr); int i = parse_op_type(pmb->pargs->operation); if (i == -1) { fprintf(stderr, "wrong operation: %s\n", pmb->pargs->operation); goto err_free_pmb; } pmb->func_op = ops[i].func_op; pmb->n_offsets = args->n_ops_per_thread * args->n_threads; pmb->fsize = pmb->n_offsets * args->dsize + (2 * PAGE_2M); /* round up to 2M boundary */ pmb->fsize = (pmb->fsize + PAGE_2M - 1) & ~(PAGE_2M - 1); i = parse_op_mode(pmb->pargs->mode); if (i == -1) { fprintf(stderr, "wrong mode: %s\n", pmb->pargs->mode); goto err_free_pmb; } func_mode = modes[i].func_mode; /* populate offsets array */ assert(pmb->n_offsets != 0); pmb->offsets = (size_t *)malloc(pmb->n_offsets * sizeof(*pmb->offsets)); assert(pmb->offsets != nullptr); for (size_t i = 0; i < pmb->n_offsets; ++i) pmb->offsets[i] = func_mode(pmb, i); if (type != TYPE_DEVDAX) { file_size = pmb->fsize; flags = PMEM_FILE_CREATE | PMEM_FILE_EXCL; } /* create a pmem file and memory map it */ pmb->pmem_addr = pmem_map_file(args->fname, file_size, flags, args->fmode, &pmb->pmem_len, nullptr); if (pmb->pmem_addr == nullptr) { perror("pmem_map_file"); goto err_free_pmb; } pmb->nondirty_addr = mmap(nullptr, pmb->fsize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); if (pmb->nondirty_addr == MAP_FAILED) { perror("mmap(1)"); goto err_unmap1; } pmb->invalid_addr = mmap(nullptr, pmb->fsize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); if (pmb->invalid_addr == MAP_FAILED) { perror("mmap(2)"); goto err_unmap2; } munmap(pmb->invalid_addr, pmb->fsize); pmb->pmem_addr_aligned = (void *)(((uintptr_t)pmb->pmem_addr + PAGE_2M - 1) & ~(PAGE_2M - 1)); pmb->nondirty_addr_aligned = (void *)(((uintptr_t)pmb->nondirty_addr + PAGE_2M - 1) & ~(PAGE_2M - 1)); pmb->invalid_addr_aligned = (void *)(((uintptr_t)pmb->invalid_addr + PAGE_2M - 1) & ~(PAGE_2M - 1)); pmembench_set_priv(bench, pmb); if (!pmb->pargs->no_warmup) { size_t off; for (off = 0; off < pmb->fsize - PAGE_2M; off += PAGE_4K) { *(int *)((char *)pmb->pmem_addr_aligned + off) = 0; *(int *)((char *)pmb->nondirty_addr_aligned + off) = 0; } } return 0; err_unmap2: munmap(pmb->nondirty_addr, pmb->fsize); err_unmap1: pmem_unmap(pmb->pmem_addr, pmb->pmem_len); err_free_pmb: free(pmb); return -1; } /* * pmem_flush_exit -- benchmark cleanup */ static int pmem_flush_exit(struct benchmark *bench, struct benchmark_args *args) { auto *pmb = (struct pmem_bench *)pmembench_get_priv(bench); pmem_unmap(pmb->pmem_addr, pmb->fsize); munmap(pmb->nondirty_addr, pmb->fsize); free(pmb); return 0; } /* * pmem_flush_operation -- actual benchmark operation */ static int pmem_flush_operation(struct benchmark *bench, struct operation_info *info) { auto *pmb = (struct pmem_bench *)pmembench_get_priv(bench); size_t op_idx = info->index; assert(op_idx < pmb->n_offsets); uint64_t chunk_idx = pmb->offsets[op_idx]; void *addr = (char *)pmb->pmem_addr_aligned + chunk_idx * info->args->dsize; /* store + flush */ *(int *)addr = *(int *)addr + 1; pmb->func_op(pmb, addr, info->args->dsize); return 0; } /* structure to define command line arguments */ static struct benchmark_clo pmem_flush_clo[3]; /* Stores information about benchmark. */ static struct benchmark_info pmem_flush_bench; CONSTRUCTOR(pmem_flush_constructor) void pmem_flush_constructor(void) { pmem_flush_clo[0].opt_short = 'o'; pmem_flush_clo[0].opt_long = "operation"; pmem_flush_clo[0].descr = "Operation type - persist," " msync, ..."; pmem_flush_clo[0].type = CLO_TYPE_STR; pmem_flush_clo[0].off = clo_field_offset(struct pmem_args, operation); pmem_flush_clo[0].def = "noop"; pmem_flush_clo[1].opt_short = 0; pmem_flush_clo[1].opt_long = "mode"; pmem_flush_clo[1].descr = "mode - stat, seq or rand"; pmem_flush_clo[1].type = CLO_TYPE_STR; pmem_flush_clo[1].off = clo_field_offset(struct pmem_args, mode); pmem_flush_clo[1].def = "stat"; pmem_flush_clo[2].opt_short = 'w'; pmem_flush_clo[2].opt_long = "no-warmup"; pmem_flush_clo[2].descr = "Don't do warmup"; pmem_flush_clo[2].type = CLO_TYPE_FLAG; pmem_flush_clo[2].off = clo_field_offset(struct pmem_args, no_warmup); pmem_flush_bench.name = "pmem_flush"; pmem_flush_bench.brief = "Benchmark for pmem_msync() " "and pmem_persist()"; pmem_flush_bench.init = pmem_flush_init; pmem_flush_bench.exit = pmem_flush_exit; pmem_flush_bench.multithread = true; pmem_flush_bench.multiops = true; pmem_flush_bench.operation = pmem_flush_operation; pmem_flush_bench.measure_time = true; pmem_flush_bench.clos = pmem_flush_clo; pmem_flush_bench.nclos = ARRAY_SIZE(pmem_flush_clo); pmem_flush_bench.opts_size = sizeof(struct pmem_args); pmem_flush_bench.rm_file = true; pmem_flush_bench.allow_poolset = false; REGISTER_BENCHMARK(pmem_flush_bench); }
13,147
23.303142
77
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/pmembench.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * pmembench.cpp -- main source file for benchmark framework */ #include <cassert> #include <cerrno> #include <cfloat> #include <cinttypes> #include <cmath> #include <cstdio> #include <cstring> #include <dirent.h> #include <err.h> #include <getopt.h> #include <linux/limits.h> #include <sched.h> #include <sys/wait.h> #include <unistd.h> #include "benchmark.hpp" #include "benchmark_worker.hpp" #include "clo.hpp" #include "clo_vec.hpp" #include "config_reader.hpp" #include "file.h" #include "libpmempool.h" #include "mmap.h" #include "os.h" #include "os_thread.h" #include "queue.h" #include "scenario.hpp" #include "set.h" #include "util.h" #ifndef _WIN32 #include "rpmem_common.h" #include "rpmem_ssh.h" #include "rpmem_util.h" #endif /* average time required to get a current time from the system */ unsigned long long Get_time_avg; #define MIN_EXE_TIME_E 0.5 /* * struct pmembench -- main context */ struct pmembench { int argc; char **argv; struct scenario *scenario; struct clo_vec *clovec; bool override_clos; }; /* * struct benchmark -- benchmark's context */ struct benchmark { PMDK_LIST_ENTRY(benchmark) next; struct benchmark_info *info; void *priv; struct benchmark_clo *clos; size_t nclos; size_t args_size; }; /* * struct bench_list -- list of available benchmarks */ struct bench_list { PMDK_LIST_HEAD(benchmarks_head, benchmark) head; bool initialized; }; /* * struct benchmark_opts -- arguments for pmembench */ struct benchmark_opts { bool help; bool version; const char *file_name; }; static struct version_s { unsigned major; unsigned minor; } version = {1, 0}; /* benchmarks list initialization */ static struct bench_list benchmarks; /* common arguments for benchmarks */ static struct benchmark_clo pmembench_clos[13]; /* list of arguments for pmembench */ static struct benchmark_clo pmembench_opts[2]; CONSTRUCTOR(pmembench_constructor) void pmembench_constructor(void) { pmembench_opts[0].opt_short = 'h'; pmembench_opts[0].opt_long = "help"; pmembench_opts[0].descr = "Print help"; pmembench_opts[0].type = CLO_TYPE_FLAG; pmembench_opts[0].off = clo_field_offset(struct benchmark_opts, help); pmembench_opts[0].ignore_in_res = true; pmembench_opts[1].opt_short = 'v'; pmembench_opts[1].opt_long = "version"; pmembench_opts[1].descr = "Print version"; pmembench_opts[1].type = CLO_TYPE_FLAG; pmembench_opts[1].off = clo_field_offset(struct benchmark_opts, version); pmembench_opts[1].ignore_in_res = true; pmembench_clos[0].opt_short = 'h'; pmembench_clos[0].opt_long = "help"; pmembench_clos[0].descr = "Print help for single benchmark"; pmembench_clos[0].type = CLO_TYPE_FLAG; pmembench_clos[0].off = clo_field_offset(struct benchmark_args, help); pmembench_clos[0].ignore_in_res = true; pmembench_clos[1].opt_short = 't'; pmembench_clos[1].opt_long = "threads"; pmembench_clos[1].type = CLO_TYPE_UINT; pmembench_clos[1].descr = "Number of working threads"; pmembench_clos[1].off = clo_field_offset(struct benchmark_args, n_threads); pmembench_clos[1].def = "1"; pmembench_clos[1].type_uint.size = clo_field_size(struct benchmark_args, n_threads); pmembench_clos[1].type_uint.base = CLO_INT_BASE_DEC; pmembench_clos[1].type_uint.min = 1; pmembench_clos[1].type_uint.max = UINT_MAX; pmembench_clos[2].opt_short = 'n'; pmembench_clos[2].opt_long = "ops-per-thread"; pmembench_clos[2].type = CLO_TYPE_UINT; pmembench_clos[2].descr = "Number of operations per thread"; pmembench_clos[2].off = clo_field_offset(struct benchmark_args, n_ops_per_thread); pmembench_clos[2].def = "1"; pmembench_clos[2].type_uint.size = clo_field_size(struct benchmark_args, n_ops_per_thread); pmembench_clos[2].type_uint.base = CLO_INT_BASE_DEC; pmembench_clos[2].type_uint.min = 1; pmembench_clos[2].type_uint.max = ULLONG_MAX; pmembench_clos[3].opt_short = 'd'; pmembench_clos[3].opt_long = "data-size"; pmembench_clos[3].type = CLO_TYPE_UINT; pmembench_clos[3].descr = "IO data size"; pmembench_clos[3].off = clo_field_offset(struct benchmark_args, dsize); pmembench_clos[3].def = "1"; pmembench_clos[3].type_uint.size = clo_field_size(struct benchmark_args, dsize); pmembench_clos[3].type_uint.base = CLO_INT_BASE_DEC | CLO_INT_BASE_HEX; pmembench_clos[3].type_uint.min = 1; pmembench_clos[3].type_uint.max = ULONG_MAX; pmembench_clos[4].opt_short = 'f'; pmembench_clos[4].opt_long = "file"; pmembench_clos[4].type = CLO_TYPE_STR; pmembench_clos[4].descr = "File name"; pmembench_clos[4].off = clo_field_offset(struct benchmark_args, fname); pmembench_clos[4].def = "/mnt/pmem/testfile"; pmembench_clos[4].ignore_in_res = true; pmembench_clos[5].opt_short = 'm'; pmembench_clos[5].opt_long = "fmode"; pmembench_clos[5].type = CLO_TYPE_UINT; pmembench_clos[5].descr = "File mode"; pmembench_clos[5].off = clo_field_offset(struct benchmark_args, fmode); pmembench_clos[5].def = "0666"; pmembench_clos[5].ignore_in_res = true; pmembench_clos[5].type_uint.size = clo_field_size(struct benchmark_args, fmode); pmembench_clos[5].type_uint.base = CLO_INT_BASE_OCT; pmembench_clos[5].type_uint.min = 0; pmembench_clos[5].type_uint.max = ULONG_MAX; pmembench_clos[6].opt_short = 's'; pmembench_clos[6].opt_long = "seed"; pmembench_clos[6].type = CLO_TYPE_UINT; pmembench_clos[6].descr = "PRNG seed"; pmembench_clos[6].off = clo_field_offset(struct benchmark_args, seed); pmembench_clos[6].def = "0"; pmembench_clos[6].type_uint.size = clo_field_size(struct benchmark_args, seed); pmembench_clos[6].type_uint.base = CLO_INT_BASE_DEC; pmembench_clos[6].type_uint.min = 0; pmembench_clos[6].type_uint.max = ~0; pmembench_clos[7].opt_short = 'r'; pmembench_clos[7].opt_long = "repeats"; pmembench_clos[7].type = CLO_TYPE_UINT; pmembench_clos[7].descr = "Number of repeats of scenario"; pmembench_clos[7].off = clo_field_offset(struct benchmark_args, repeats); pmembench_clos[7].def = "1"; pmembench_clos[7].type_uint.size = clo_field_size(struct benchmark_args, repeats); pmembench_clos[7].type_uint.base = CLO_INT_BASE_DEC | CLO_INT_BASE_HEX; pmembench_clos[7].type_uint.min = 1; pmembench_clos[7].type_uint.max = ULONG_MAX; pmembench_clos[8].opt_short = 'F'; pmembench_clos[8].opt_long = "thread-affinity"; pmembench_clos[8].descr = "Set worker threads CPU affinity mask"; pmembench_clos[8].type = CLO_TYPE_FLAG; pmembench_clos[8].off = clo_field_offset(struct benchmark_args, thread_affinity); pmembench_clos[8].def = "false"; /* * XXX: add link to blog post about optimal affinity * when it will be done */ pmembench_clos[9].opt_short = 'I'; pmembench_clos[9].opt_long = "affinity-list"; pmembench_clos[9].descr = "Set affinity mask as a list of CPUs separated by semicolon"; pmembench_clos[9].type = CLO_TYPE_STR; pmembench_clos[9].off = clo_field_offset(struct benchmark_args, affinity_list); pmembench_clos[9].def = ""; pmembench_clos[9].ignore_in_res = true; pmembench_clos[10].opt_long = "main-affinity"; pmembench_clos[10].descr = "Set affinity for main thread"; pmembench_clos[10].type = CLO_TYPE_INT; pmembench_clos[10].off = clo_field_offset(struct benchmark_args, main_affinity); pmembench_clos[10].def = "-1"; pmembench_clos[10].ignore_in_res = false; pmembench_clos[10].type_int.size = clo_field_size(struct benchmark_args, main_affinity); pmembench_clos[10].type_int.base = CLO_INT_BASE_DEC; pmembench_clos[10].type_int.min = (-1); pmembench_clos[10].type_int.max = LONG_MAX; pmembench_clos[11].opt_short = 'e'; pmembench_clos[11].opt_long = "min-exe-time"; pmembench_clos[11].type = CLO_TYPE_UINT; pmembench_clos[11].descr = "Minimal execution time in seconds"; pmembench_clos[11].off = clo_field_offset(struct benchmark_args, min_exe_time); pmembench_clos[11].def = "0"; pmembench_clos[11].type_uint.size = clo_field_size(struct benchmark_args, min_exe_time); pmembench_clos[11].type_uint.base = CLO_INT_BASE_DEC; pmembench_clos[11].type_uint.min = 0; pmembench_clos[11].type_uint.max = ULONG_MAX; pmembench_clos[12].opt_short = 'p'; pmembench_clos[12].opt_long = "dynamic-poolset"; pmembench_clos[12].type = CLO_TYPE_FLAG; pmembench_clos[12].descr = "Allow benchmark to create poolset and reuse files"; pmembench_clos[12].off = clo_field_offset(struct benchmark_args, is_dynamic_poolset); pmembench_clos[12].ignore_in_res = true; } /* * pmembench_get_priv -- return private structure of benchmark */ void * pmembench_get_priv(struct benchmark *bench) { return bench->priv; } /* * pmembench_set_priv -- set private structure of benchmark */ void pmembench_set_priv(struct benchmark *bench, void *priv) { bench->priv = priv; } /* * pmembench_register -- register benchmark */ int pmembench_register(struct benchmark_info *bench_info) { assert(bench_info->name && bench_info->brief); struct benchmark *bench = (struct benchmark *)calloc(1, sizeof(*bench)); assert(bench != nullptr); bench->info = bench_info; if (!benchmarks.initialized) { PMDK_LIST_INIT(&benchmarks.head); benchmarks.initialized = true; } PMDK_LIST_INSERT_HEAD(&benchmarks.head, bench, next); return 0; } /* * pmembench_get_info -- return structure with information about benchmark */ struct benchmark_info * pmembench_get_info(struct benchmark *bench) { return bench->info; } /* * pmembench_release_clos -- release CLO structure */ static void pmembench_release_clos(struct benchmark *bench) { free(bench->clos); } /* * pmembench_merge_clos -- merge benchmark's CLOs with common CLOs */ static void pmembench_merge_clos(struct benchmark *bench) { size_t size = sizeof(struct benchmark_args); size_t pb_nclos = ARRAY_SIZE(pmembench_clos); size_t nclos = pb_nclos; size_t i; if (bench->info->clos) { size += bench->info->opts_size; nclos += bench->info->nclos; } auto *clos = (struct benchmark_clo *)calloc( nclos, sizeof(struct benchmark_clo)); assert(clos != nullptr); memcpy(clos, pmembench_clos, pb_nclos * sizeof(struct benchmark_clo)); if (bench->info->clos) { memcpy(&clos[pb_nclos], bench->info->clos, bench->info->nclos * sizeof(struct benchmark_clo)); for (i = 0; i < bench->info->nclos; i++) { clos[pb_nclos + i].off += sizeof(struct benchmark_args); } } bench->clos = clos; bench->nclos = nclos; bench->args_size = size; } /* * pmembench_run_worker -- run worker with benchmark operation */ static int pmembench_run_worker(struct benchmark *bench, struct worker_info *winfo) { benchmark_time_get(&winfo->beg); for (size_t i = 0; i < winfo->nops; i++) { if (bench->info->operation(bench, &winfo->opinfo[i])) return -1; benchmark_time_get(&winfo->opinfo[i].end); } benchmark_time_get(&winfo->end); return 0; } /* * pmembench_print_header -- print header of benchmark's results */ static void pmembench_print_header(struct pmembench *pb, struct benchmark *bench, struct clo_vec *clovec) { if (pb->scenario) { printf("%s: %s [%" PRIu64 "]%s%s%s\n", pb->scenario->name, bench->info->name, clovec->nargs, pb->scenario->group ? " [group: " : "", pb->scenario->group ? pb->scenario->group : "", pb->scenario->group ? "]" : ""); } else { printf("%s [%" PRIu64 "]\n", bench->info->name, clovec->nargs); } printf("total-avg[sec];" "ops-per-second[1/sec];" "total-max[sec];" "total-min[sec];" "total-median[sec];" "total-std-dev[sec];" "latency-avg[nsec];" "latency-min[nsec];" "latency-max[nsec];" "latency-std-dev[nsec];" "latency-pctl-50.0%%[nsec];" "latency-pctl-99.0%%[nsec];" "latency-pctl-99.9%%[nsec]"); size_t i; for (i = 0; i < bench->nclos; i++) { if (!bench->clos[i].ignore_in_res) { printf(";%s", bench->clos[i].opt_long); } } if (bench->info->print_bandwidth) printf(";bandwidth[MiB/s]"); if (bench->info->print_extra_headers) bench->info->print_extra_headers(); printf("\n"); } /* * pmembench_print_results -- print benchmark's results */ static void pmembench_print_results(struct benchmark *bench, struct benchmark_args *args, struct total_results *res) { printf("%f;%f;%f;%f;%f;%f;%" PRIu64 ";%" PRIu64 ";%" PRIu64 ";%f;%" PRIu64 ";%" PRIu64 ";%" PRIu64, res->total.avg, res->nopsps, res->total.max, res->total.min, res->total.med, res->total.std_dev, res->latency.avg, res->latency.min, res->latency.max, res->latency.std_dev, res->latency.pctl50_0p, res->latency.pctl99_0p, res->latency.pctl99_9p); size_t i; for (i = 0; i < bench->nclos; i++) { if (!bench->clos[i].ignore_in_res) printf(";%s", benchmark_clo_str(&bench->clos[i], args, bench->args_size)); } if (bench->info->print_bandwidth) printf(";%f", res->nopsps * args->dsize / 1024 / 1024); if (bench->info->print_extra_values) bench->info->print_extra_values(bench, args, res); printf("\n"); } /* * pmembench_parse_clos -- parse command line arguments for benchmark */ static int pmembench_parse_clo(struct pmembench *pb, struct benchmark *bench, struct clo_vec *clovec) { if (!pb->scenario) { return benchmark_clo_parse(pb->argc, pb->argv, bench->clos, bench->nclos, clovec); } if (pb->override_clos) { /* * Use only ARRAY_SIZE(pmembench_clos) clos - these are the * general clos and are placed at the beginning of the * clos array. */ int ret = benchmark_override_clos_in_scenario( pb->scenario, pb->argc, pb->argv, bench->clos, ARRAY_SIZE(pmembench_clos)); /* reset for the next benchmark in the config file */ optind = 1; if (ret) return ret; } return benchmark_clo_parse_scenario(pb->scenario, bench->clos, bench->nclos, clovec); } /* * pmembench_parse_affinity -- parse affinity list */ static int pmembench_parse_affinity(const char *list, char **saveptr) { char *str = nullptr; char *end; int cpu = 0; if (*saveptr) { str = strtok(nullptr, ";"); if (str == nullptr) { /* end of list - we have to start over */ free(*saveptr); *saveptr = nullptr; } } if (!*saveptr) { *saveptr = strdup(list); if (*saveptr == nullptr) { perror("strdup"); return -1; } str = strtok(*saveptr, ";"); if (str == nullptr) goto err; } if ((str == nullptr) || (*str == '\0')) goto err; cpu = strtol(str, &end, 10); if (*end != '\0') goto err; return cpu; err: errno = EINVAL; perror("pmembench_parse_affinity"); free(*saveptr); *saveptr = nullptr; return -1; } /* * pmembench_init_workers -- init benchmark's workers */ static int pmembench_init_workers(struct benchmark_worker **workers, struct benchmark *bench, struct benchmark_args *args) { unsigned i; int ncpus = 0; char *saveptr = nullptr; int ret = 0; if (args->thread_affinity) { ncpus = sysconf(_SC_NPROCESSORS_ONLN); if (ncpus <= 0) return -1; } for (i = 0; i < args->n_threads; i++) { workers[i] = benchmark_worker_alloc(); if (args->thread_affinity) { int cpu; os_cpu_set_t cpuset; if (args->affinity_list && *args->affinity_list != '\0') { cpu = pmembench_parse_affinity( args->affinity_list, &saveptr); if (cpu == -1) { ret = -1; goto end; } } else { cpu = (int)i; } assert(ncpus > 0); cpu %= ncpus; os_cpu_zero(&cpuset); os_cpu_set(cpu, &cpuset); errno = os_thread_setaffinity_np(&workers[i]->thread, sizeof(os_cpu_set_t), &cpuset); if (errno) { perror("os_thread_setaffinity_np"); ret = -1; goto end; } } workers[i]->info.index = i; workers[i]->info.nops = args->n_ops_per_thread; workers[i]->info.opinfo = (struct operation_info *)calloc( args->n_ops_per_thread, sizeof(struct operation_info)); size_t j; for (j = 0; j < args->n_ops_per_thread; j++) { workers[i]->info.opinfo[j].worker = &workers[i]->info; workers[i]->info.opinfo[j].args = args; workers[i]->info.opinfo[j].index = j; } workers[i]->bench = bench; workers[i]->args = args; workers[i]->func = pmembench_run_worker; workers[i]->init = bench->info->init_worker; workers[i]->exit = bench->info->free_worker; if (benchmark_worker_init(workers[i])) { fprintf(stderr, "thread number %u initialization failed\n", i); ret = -1; goto end; } } end: free(saveptr); return ret; } /* * results_store -- store results of a single repeat */ static void results_store(struct bench_results *res, struct benchmark_worker **workers, unsigned nthreads, size_t nops) { for (unsigned i = 0; i < nthreads; i++) { res->thres[i]->beg = workers[i]->info.beg; res->thres[i]->end = workers[i]->info.end; for (size_t j = 0; j < nops; j++) { res->thres[i]->end_op[j] = workers[i]->info.opinfo[j].end; } } } /* * compare_time -- compare time values */ static int compare_time(const void *p1, const void *p2) { const auto *t1 = (const benchmark_time_t *)p1; const auto *t2 = (const benchmark_time_t *)p2; return benchmark_time_compare(t1, t2); } /* * compare_doubles -- comparing function used for sorting */ static int compare_doubles(const void *a1, const void *b1) { const auto *a = (const double *)a1; const auto *b = (const double *)b1; return (*a > *b) - (*a < *b); } /* * compare_uint64t -- comparing function used for sorting */ static int compare_uint64t(const void *a1, const void *b1) { const auto *a = (const uint64_t *)a1; const auto *b = (const uint64_t *)b1; return (*a > *b) - (*a < *b); } /* * results_alloc -- prepare structure to store all benchmark results */ static struct total_results * results_alloc(struct benchmark_args *args) { struct total_results *total = (struct total_results *)malloc(sizeof(*total)); assert(total != nullptr); total->nrepeats = args->repeats; total->nthreads = args->n_threads; total->nops = args->n_ops_per_thread; total->res = (struct bench_results *)malloc(args->repeats * sizeof(*total->res)); assert(total->res != nullptr); for (size_t i = 0; i < args->repeats; i++) { struct bench_results *res = &total->res[i]; assert(args->n_threads != 0); res->thres = (struct thread_results **)malloc( args->n_threads * sizeof(*res->thres)); assert(res->thres != nullptr); for (size_t j = 0; j < args->n_threads; j++) { res->thres[j] = (struct thread_results *)malloc( sizeof(*res->thres[j]) + args->n_ops_per_thread * sizeof(benchmark_time_t)); assert(res->thres[j] != nullptr); } } return total; } /* * results_free -- release results structure */ static void results_free(struct total_results *total) { for (size_t i = 0; i < total->nrepeats; i++) { for (size_t j = 0; j < total->nthreads; j++) free(total->res[i].thres[j]); free(total->res[i].thres); } free(total->res); free(total); } /* * get_total_results -- return results of all repeats of scenario */ static void get_total_results(struct total_results *tres) { assert(tres->nrepeats != 0); assert(tres->nthreads != 0); assert(tres->nops != 0); /* reset results */ memset(&tres->total, 0, sizeof(tres->total)); memset(&tres->latency, 0, sizeof(tres->latency)); tres->total.min = DBL_MAX; tres->total.max = DBL_MIN; tres->latency.min = UINT64_MAX; tres->latency.max = 0; /* allocate helper arrays */ benchmark_time_t *tbeg = (benchmark_time_t *)malloc(tres->nthreads * sizeof(*tbeg)); assert(tbeg != nullptr); benchmark_time_t *tend = (benchmark_time_t *)malloc(tres->nthreads * sizeof(*tend)); assert(tend != nullptr); auto *totals = (double *)malloc(tres->nrepeats * sizeof(double)); assert(totals != nullptr); /* estimate total penalty of getting time from the system */ benchmark_time_t Tget; unsigned long long nsecs = tres->nops * Get_time_avg; benchmark_time_set(&Tget, nsecs); for (size_t i = 0; i < tres->nrepeats; i++) { struct bench_results *res = &tres->res[i]; /* get start and end timestamps of each worker */ for (size_t j = 0; j < tres->nthreads; j++) { tbeg[j] = res->thres[j]->beg; tend[j] = res->thres[j]->end; } /* sort start and end timestamps */ qsort(tbeg, tres->nthreads, sizeof(benchmark_time_t), compare_time); qsort(tend, tres->nthreads, sizeof(benchmark_time_t), compare_time); /* calculating time interval between start and end time */ benchmark_time_t Tbeg = tbeg[0]; benchmark_time_t Tend = tend[tres->nthreads - 1]; benchmark_time_t Ttot_ove; benchmark_time_diff(&Ttot_ove, &Tbeg, &Tend); /* * subtract time used for getting the current time from the * system */ benchmark_time_t Ttot; benchmark_time_diff(&Ttot, &Tget, &Ttot_ove); double Stot = benchmark_time_get_secs(&Ttot); if (Stot > tres->total.max) tres->total.max = Stot; if (Stot < tres->total.min) tres->total.min = Stot; tres->total.avg += Stot; totals[i] = Stot; } /* median */ qsort(totals, tres->nrepeats, sizeof(double), compare_doubles); if (tres->nrepeats % 2) { tres->total.med = totals[tres->nrepeats / 2]; } else { double m1 = totals[tres->nrepeats / 2]; double m2 = totals[tres->nrepeats / 2 - 1]; tres->total.med = (m1 + m2) / 2.0; } /* total average time */ tres->total.avg /= (double)tres->nrepeats; /* number of operations per second */ tres->nopsps = (double)tres->nops * (double)tres->nthreads / tres->total.avg; /* std deviation of total time */ for (size_t i = 0; i < tres->nrepeats; i++) { double dev = (totals[i] - tres->total.avg); dev *= dev; tres->total.std_dev += dev; } tres->total.std_dev = sqrt(tres->total.std_dev / tres->nrepeats); /* latency */ for (size_t i = 0; i < tres->nrepeats; i++) { struct bench_results *res = &tres->res[i]; for (size_t j = 0; j < tres->nthreads; j++) { struct thread_results *thres = res->thres[j]; benchmark_time_t *beg = &thres->beg; for (size_t o = 0; o < tres->nops; o++) { benchmark_time_t lat; benchmark_time_diff(&lat, beg, &thres->end_op[o]); uint64_t nsecs = benchmark_time_get_nsecs(&lat); /* min, max latency */ if (nsecs > tres->latency.max) tres->latency.max = nsecs; if (nsecs < tres->latency.min) tres->latency.min = nsecs; tres->latency.avg += nsecs; beg = &thres->end_op[o]; } } } /* average latency */ size_t count = tres->nrepeats * tres->nthreads * tres->nops; assert(count > 0); tres->latency.avg /= count; auto *ntotals = (uint64_t *)calloc(count, sizeof(uint64_t)); assert(ntotals != nullptr); count = 0; /* std deviation of latency and percentiles */ for (size_t i = 0; i < tres->nrepeats; i++) { struct bench_results *res = &tres->res[i]; for (size_t j = 0; j < tres->nthreads; j++) { struct thread_results *thres = res->thres[j]; benchmark_time_t *beg = &thres->beg; for (size_t o = 0; o < tres->nops; o++) { benchmark_time_t lat; benchmark_time_diff(&lat, beg, &thres->end_op[o]); uint64_t nsecs = benchmark_time_get_nsecs(&lat); uint64_t dev = (nsecs - tres->latency.avg); dev *= dev; tres->latency.std_dev += dev; beg = &thres->end_op[o]; ntotals[count] = nsecs; ++count; } } } tres->latency.std_dev = sqrt(tres->latency.std_dev / count); /* find 50%, 99.0% and 99.9% percentiles */ qsort(ntotals, count, sizeof(uint64_t), compare_uint64t); uint64_t p50_0 = count * 50 / 100; uint64_t p99_0 = count * 99 / 100; uint64_t p99_9 = count * 999 / 1000; tres->latency.pctl50_0p = ntotals[p50_0]; tres->latency.pctl99_0p = ntotals[p99_0]; tres->latency.pctl99_9p = ntotals[p99_9]; free(ntotals); free(totals); free(tend); free(tbeg); } /* * pmembench_print_args -- print arguments for one benchmark */ static void pmembench_print_args(struct benchmark_clo *clos, size_t nclos) { struct benchmark_clo clo; for (size_t i = 0; i < nclos; i++) { clo = clos[i]; if (clo.opt_short != 0) printf("\t-%c,", clo.opt_short); else printf("\t"); printf("\t--%-15s\t\t%s", clo.opt_long, clo.descr); if (clo.type != CLO_TYPE_FLAG) printf(" [default: %s]", clo.def); if (clo.type == CLO_TYPE_INT) { if (clo.type_int.min != LONG_MIN) printf(" [min: %" PRId64 "]", clo.type_int.min); if (clo.type_int.max != LONG_MAX) printf(" [max: %" PRId64 "]", clo.type_int.max); } else if (clo.type == CLO_TYPE_UINT) { if (clo.type_uint.min != 0) printf(" [min: %" PRIu64 "]", clo.type_uint.min); if (clo.type_uint.max != ULONG_MAX) printf(" [max: %" PRIu64 "]", clo.type_uint.max); } printf("\n"); } } /* * pmembench_print_help_single -- prints help for single benchmark */ static void pmembench_print_help_single(struct benchmark *bench) { struct benchmark_info *info = bench->info; printf("%s\n%s\n", info->name, info->brief); printf("\nArguments:\n"); size_t nclos = sizeof(pmembench_clos) / sizeof(struct benchmark_clo); pmembench_print_args(pmembench_clos, nclos); if (info->clos == nullptr) return; pmembench_print_args(info->clos, info->nclos); } /* * pmembench_print_usage -- print usage of framework */ static void pmembench_print_usage() { printf("Usage: $ pmembench [-h|--help] [-v|--version]" "\t[<benchmark>[<args>]]\n"); printf("\t\t\t\t\t\t[<config>[<scenario>]]\n"); printf("\t\t\t\t\t\t[<config>[<scenario>[<common_args>]]]\n"); } /* * pmembench_print_version -- print version of framework */ static void pmembench_print_version() { printf("Benchmark framework - version %u.%u\n", version.major, version.minor); } /* * pmembench_print_examples() -- print examples of using framework */ static void pmembench_print_examples() { printf("\nExamples:\n"); printf("$ pmembench <benchmark_name> <args>\n"); printf(" # runs benchmark of name <benchmark> with arguments <args>\n"); printf("or\n"); printf("$ pmembench <config_file>\n"); printf(" # runs all scenarios from config file\n"); printf("or\n"); printf("$ pmembench [<benchmark_name>] [-h|--help [-v|--version]\n"); printf(" # prints help\n"); printf("or\n"); printf("$ pmembench <config_file> <name_of_scenario>\n"); printf(" # runs the specified scenario from config file\n"); printf("$ pmembench <config_file> <name_of_scenario_1> " "<name_of_scenario_2> <common_args>\n"); printf(" # runs the specified scenarios from config file and overwrites" " the given common_args from the config file\n"); } /* * pmembench_print_help -- print help for framework */ static void pmembench_print_help() { pmembench_print_version(); pmembench_print_usage(); printf("\nCommon arguments:\n"); size_t nclos = sizeof(pmembench_opts) / sizeof(struct benchmark_clo); pmembench_print_args(pmembench_opts, nclos); printf("\nAvaliable benchmarks:\n"); struct benchmark *bench = nullptr; PMDK_LIST_FOREACH(bench, &benchmarks.head, next) printf("\t%-20s\t\t%s\n", bench->info->name, bench->info->brief); printf("\n$ pmembench <benchmark> --help to print detailed information" " about benchmark arguments\n"); pmembench_print_examples(); } /* * pmembench_get_bench -- searching benchmarks by name */ static struct benchmark * pmembench_get_bench(const char *name) { struct benchmark *bench; PMDK_LIST_FOREACH(bench, &benchmarks.head, next) { if (strcmp(name, bench->info->name) == 0) return bench; } return nullptr; } /* * pmembench_parse_opts -- parse arguments for framework */ static int pmembench_parse_opts(struct pmembench *pb) { int ret = 0; int argc = ++pb->argc; char **argv = --pb->argv; struct benchmark_opts *opts = nullptr; struct clo_vec *clovec; size_t size, n_clos; size = sizeof(struct benchmark_opts); n_clos = ARRAY_SIZE(pmembench_opts); clovec = clo_vec_alloc(size); assert(clovec != nullptr); if (benchmark_clo_parse(argc, argv, pmembench_opts, n_clos, clovec)) { ret = -1; goto out; } opts = (struct benchmark_opts *)clo_vec_get_args(clovec, 0); if (opts == nullptr) { ret = -1; goto out; } if (opts->help) pmembench_print_help(); if (opts->version) pmembench_print_version(); out: clo_vec_free(clovec); return ret; } /* * pmembench_remove_file -- remove file or directory if exists */ static int pmembench_remove_file(const char *path) { int ret = 0; os_stat_t status; char *tmp; int exists = util_file_exists(path); if (exists < 0) return -1; if (!exists) return 0; if (os_stat(path, &status) != 0) return 0; if (!(status.st_mode & S_IFDIR)) return pmempool_rm(path, 0); struct dir_handle it; struct file_info info; if (util_file_dir_open(&it, path)) { return -1; } while (util_file_dir_next(&it, &info) == 0) { if (strcmp(info.filename, ".") == 0 || strcmp(info.filename, "..") == 0) continue; tmp = (char *)malloc(strlen(path) + strlen(info.filename) + 2); if (tmp == nullptr) return -1; sprintf(tmp, "%s" OS_DIR_SEP_STR "%s", path, info.filename); ret = info.is_dir ? pmembench_remove_file(tmp) : util_unlink(tmp); free(tmp); if (ret != 0) { util_file_dir_close(&it); return ret; } } util_file_dir_close(&it); return util_file_dir_remove(path); } /* * pmembench_single_repeat -- runs benchmark ones */ static int pmembench_single_repeat(struct benchmark *bench, struct benchmark_args *args, struct bench_results *res) { int ret = 0; if (args->main_affinity != -1) { os_cpu_set_t cpuset; os_cpu_zero(&cpuset); os_thread_t self; os_thread_self(&self); os_cpu_set(args->main_affinity, &cpuset); errno = os_thread_setaffinity_np(&self, sizeof(os_cpu_set_t), &cpuset); if (errno) { perror("os_thread_setaffinity_np"); return -1; } sched_yield(); } if (bench->info->rm_file && !args->is_dynamic_poolset) { ret = pmembench_remove_file(args->fname); if (ret != 0 && errno != ENOENT) { perror("removing file failed"); return ret; } } if (bench->info->init) { if (bench->info->init(bench, args)) { warn("%s: initialization failed", bench->info->name); return -1; } } assert(bench->info->operation != nullptr); assert(args->n_threads != 0); struct benchmark_worker **workers; workers = (struct benchmark_worker **)malloc( args->n_threads * sizeof(struct benchmark_worker *)); assert(workers != nullptr); if ((ret = pmembench_init_workers(workers, bench, args)) != 0) { goto out; } unsigned j; for (j = 0; j < args->n_threads; j++) { benchmark_worker_run(workers[j]); } for (j = 0; j < args->n_threads; j++) { benchmark_worker_join(workers[j]); if (workers[j]->ret != 0) { ret = workers[j]->ret; fprintf(stderr, "thread number %u failed\n", j); } } results_store(res, workers, args->n_threads, args->n_ops_per_thread); for (j = 0; j < args->n_threads; j++) { benchmark_worker_exit(workers[j]); free(workers[j]->info.opinfo); benchmark_worker_free(workers[j]); } out: free(workers); if (bench->info->exit) bench->info->exit(bench, args); return ret; } /* * scale_up_min_exe_time -- scale up the number of operations to obtain an * execution time not smaller than the assumed minimal execution time */ int scale_up_min_exe_time(struct benchmark *bench, struct benchmark_args *args, struct total_results **total_results) { const double min_exe_time = args->min_exe_time; struct total_results *total_res = *total_results; total_res->nrepeats = 1; do { /* * run single benchmark repeat to probe execution time */ int ret = pmembench_single_repeat(bench, args, &total_res->res[0]); if (ret != 0) return 1; get_total_results(total_res); if (min_exe_time < total_res->total.min + MIN_EXE_TIME_E) break; /* * scale up number of operations to get assumed minimal * execution time */ args->n_ops_per_thread = (size_t)( (double)args->n_ops_per_thread * (min_exe_time + MIN_EXE_TIME_E) / total_res->total.min); results_free(total_res); *total_results = results_alloc(args); assert(*total_results != nullptr); total_res = *total_results; total_res->nrepeats = 1; } while (1); total_res->nrepeats = args->repeats; return 0; } /* * is_absolute_path_to_directory -- checks if passed argument is absolute * path to directory */ static bool is_absolute_path_to_directory(const char *path) { os_stat_t sb; return util_is_absolute_path(path) && os_stat(path, &sb) == 0 && S_ISDIR(sb.st_mode); } /* * pmembench_run -- runs one benchmark. Parses arguments and performs * specific functions. */ static int pmembench_run(struct pmembench *pb, struct benchmark *bench) { enum file_type type; char old_wd[PATH_MAX]; int ret = 0; struct benchmark_args *args = nullptr; struct total_results *total_res = nullptr; struct latency *stats = nullptr; double *workers_times = nullptr; struct clo_vec *clovec = nullptr; assert(bench->info != nullptr); pmembench_merge_clos(bench); /* * Check if PMEMBENCH_DIR env var is set and change * the working directory accordingly. */ char *wd = os_getenv("PMEMBENCH_DIR"); if (wd != nullptr) { /* get current dir name */ if (getcwd(old_wd, PATH_MAX) == nullptr) { perror("getcwd"); ret = -1; goto out_release_clos; } os_stat_t stat_buf; if (os_stat(wd, &stat_buf) != 0) { perror("os_stat"); ret = -1; goto out_release_clos; } if (!S_ISDIR(stat_buf.st_mode)) { warn("PMEMBENCH_DIR is not a directory: %s", wd); ret = -1; goto out_release_clos; } if (chdir(wd)) { perror("chdir(wd)"); ret = -1; goto out_release_clos; } } if (bench->info->pre_init) { if (bench->info->pre_init(bench)) { warn("%s: pre-init failed", bench->info->name); ret = -1; goto out_old_wd; } } clovec = clo_vec_alloc(bench->args_size); assert(clovec != nullptr); if (pmembench_parse_clo(pb, bench, clovec)) { warn("%s: parsing command line arguments failed", bench->info->name); ret = -1; goto out_release_args; } args = (struct benchmark_args *)clo_vec_get_args(clovec, 0); if (args == nullptr) { warn("%s: parsing command line arguments failed", bench->info->name); ret = -1; goto out_release_args; } if (args->help) { pmembench_print_help_single(bench); goto out; } if (strlen(args->fname) > PATH_MAX) { warn("Filename too long"); ret = -1; goto out; } type = util_file_get_type(args->fname); if (type == OTHER_ERROR) { fprintf(stderr, "could not check type of file %s\n", args->fname); return -1; } pmembench_print_header(pb, bench, clovec); size_t args_i; for (args_i = 0; args_i < clovec->nargs; args_i++) { args = (struct benchmark_args *)clo_vec_get_args(clovec, args_i); if (args == nullptr) { warn("%s: parsing command line arguments failed", bench->info->name); ret = -1; goto out; } args->opts = (void *)((uintptr_t)args + sizeof(struct benchmark_args)); if (args->is_dynamic_poolset) { if (!bench->info->allow_poolset) { fprintf(stderr, "dynamic poolset not supported\n"); goto out; } if (!is_absolute_path_to_directory(args->fname)) { fprintf(stderr, "path must be absolute and point to a directory\n"); goto out; } } else { args->is_poolset = util_is_poolset_file(args->fname) == 1; if (args->is_poolset) { if (!bench->info->allow_poolset) { fprintf(stderr, "poolset files not supported\n"); goto out; } args->fsize = util_poolset_size(args->fname); if (!args->fsize) { fprintf(stderr, "invalid size of poolset\n"); goto out; } } else if (type == TYPE_DEVDAX) { args->fsize = util_file_get_size(args->fname); if (!args->fsize) { fprintf(stderr, "invalid size of device dax\n"); goto out; } } } unsigned n_threads_copy = args->n_threads; args->n_threads = !bench->info->multithread ? 1 : args->n_threads; size_t n_ops_per_thread_copy = args->n_ops_per_thread; args->n_ops_per_thread = !bench->info->multiops ? 1 : args->n_ops_per_thread; stats = (struct latency *)calloc(args->repeats, sizeof(struct latency)); assert(stats != nullptr); workers_times = (double *)calloc( args->n_threads * args->repeats, sizeof(double)); assert(workers_times != nullptr); total_res = results_alloc(args); assert(total_res != nullptr); unsigned i = 0; if (args->min_exe_time != 0 && bench->info->multiops) { ret = scale_up_min_exe_time(bench, args, &total_res); if (ret != 0) goto out; i = 1; } for (; i < args->repeats; i++) { ret = pmembench_single_repeat(bench, args, &total_res->res[i]); if (ret != 0) goto out; } get_total_results(total_res); pmembench_print_results(bench, args, total_res); args->n_ops_per_thread = n_ops_per_thread_copy; args->n_threads = n_threads_copy; results_free(total_res); free(stats); free(workers_times); total_res = nullptr; stats = nullptr; workers_times = nullptr; } out: if (total_res) results_free(total_res); if (stats) free(stats); if (workers_times) free(workers_times); out_release_args: clo_vec_free(clovec); out_old_wd: /* restore the original working directory */ if (wd != nullptr) { /* Only if PMEMBENCH_DIR env var was defined */ if (chdir(old_wd)) { perror("chdir(old_wd)"); ret = -1; } } out_release_clos: pmembench_release_clos(bench); return ret; } /* * pmembench_free_benchmarks -- release all benchmarks */ static void __attribute__((destructor)) pmembench_free_benchmarks(void) { while (!PMDK_LIST_EMPTY(&benchmarks.head)) { struct benchmark *bench = PMDK_LIST_FIRST(&benchmarks.head); PMDK_LIST_REMOVE(bench, next); free(bench); } } /* * pmembench_run_scenario -- run single benchmark's scenario */ static int pmembench_run_scenario(struct pmembench *pb, struct scenario *scenario) { struct benchmark *bench = pmembench_get_bench(scenario->benchmark); if (nullptr == bench) { fprintf(stderr, "unknown benchmark: %s\n", scenario->benchmark); return -1; } pb->scenario = scenario; return pmembench_run(pb, bench); } /* * pmembench_run_scenarios -- run all scenarios */ static int pmembench_run_scenarios(struct pmembench *pb, struct scenarios *ss) { struct scenario *scenario; FOREACH_SCENARIO(scenario, ss) { if (pmembench_run_scenario(pb, scenario) != 0) return -1; } return 0; } /* * pmembench_run_config -- run one or all scenarios from config file */ static int pmembench_run_config(struct pmembench *pb, const char *config) { struct scenarios *ss = nullptr; struct config_reader *cr = config_reader_alloc(); assert(cr != nullptr); int ret = 0; if ((ret = config_reader_read(cr, config))) goto out; if ((ret = config_reader_get_scenarios(cr, &ss))) goto out; assert(ss != nullptr); if (pb->argc == 1) { if ((ret = pmembench_run_scenarios(pb, ss)) != 0) goto out_scenarios; } else { /* Skip the config file name in cmd line params */ int tmp_argc = pb->argc - 1; char **tmp_argv = pb->argv + 1; if (!contains_scenarios(tmp_argc, tmp_argv, ss)) { /* no scenarios in cmd line arguments - parse params */ pb->override_clos = true; if ((ret = pmembench_run_scenarios(pb, ss)) != 0) goto out_scenarios; } else { /* scenarios in cmd line */ struct scenarios *cmd_ss = scenarios_alloc(); assert(cmd_ss != nullptr); int parsed_scenarios = clo_get_scenarios( tmp_argc, tmp_argv, ss, cmd_ss); if (parsed_scenarios < 0) goto out_cmd; /* * If there are any cmd line args left, treat * them as config file params override. */ if (tmp_argc - parsed_scenarios) pb->override_clos = true; /* * Skip the scenarios in the cmd line, * pmembench_run_scenarios does not expect them and will * fail otherwise. */ pb->argc -= parsed_scenarios; pb->argv += parsed_scenarios; ret = pmembench_run_scenarios(pb, cmd_ss); out_cmd: scenarios_free(cmd_ss); } } out_scenarios: scenarios_free(ss); out: config_reader_free(cr); return ret; } int main(int argc, char *argv[]) { util_init(); util_mmap_init(); /* * Parse common command line arguments and * benchmark's specific ones. */ if (argc < 2) { pmembench_print_usage(); exit(EXIT_FAILURE); } int ret = 0; int fexists; struct benchmark *bench; struct pmembench *pb = (struct pmembench *)calloc(1, sizeof(*pb)); assert(pb != nullptr); Get_time_avg = benchmark_get_avg_get_time(); pb->argc = --argc; pb->argv = ++argv; char *bench_name = pb->argv[0]; if (nullptr == bench_name) { ret = -1; goto out; } fexists = os_access(bench_name, R_OK) == 0; bench = pmembench_get_bench(bench_name); if (nullptr != bench) ret = pmembench_run(pb, bench); else if (fexists) ret = pmembench_run_config(pb, bench_name); else if ((ret = pmembench_parse_opts(pb)) != 0) { pmembench_print_usage(); goto out; } out: free(pb); util_mmap_fini(); return ret; } #ifdef _MSC_VER extern "C" { /* * Since libpmemobj is linked statically, * we need to invoke its ctor/dtor. */ MSVC_CONSTR(libpmemobj_init) MSVC_DESTR(libpmemobj_fini) } #endif
41,103
24.078707
77
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/pmem_memcpy.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * pmem_memcpy.cpp -- benchmark implementation for pmem_memcpy */ #include <cassert> #include <cerrno> #include <climits> #include <cstdio> #include <cstdlib> #include <cstring> #include <fcntl.h> #include <libpmem.h> #include <sys/mman.h> #include <unistd.h> #include "benchmark.hpp" #include "file.h" #define FLUSH_ALIGN 64 #define MAX_OFFSET (FLUSH_ALIGN - 1) struct pmem_bench; typedef size_t (*offset_fn)(struct pmem_bench *pmb, struct operation_info *info); /* * pmem_args -- benchmark specific arguments */ struct pmem_args { /* * Defines the copy operation direction. Whether it is * writing from RAM to PMEM (for argument value "write") * or PMEM to RAM (for argument value "read"). */ char *operation; /* * The source address offset used to test pmem_memcpy() * performance when source address is not aligned. */ size_t src_off; /* * The destination address offset used to test * pmem_memcpy() performance when destination address * is not aligned. */ size_t dest_off; /* The size of data chunk. */ size_t chunk_size; /* * Specifies the order in which data chunks are selected * to be copied. There are three modes supported: * stat, seq, rand. */ char *src_mode; /* * Specifies the order in which data chunks are written * to the destination address. There are three modes * supported: stat, seq, rand. */ char *dest_mode; /* * When this flag is set to true, PMEM is not used. * This option is useful, when comparing performance * of pmem_memcpy() function to regular memcpy(). */ bool memcpy; /* * When this flag is set to true, pmem_persist() * function is used, otherwise pmem_flush() is performed. */ bool persist; /* do not do warmup */ bool no_warmup; }; /* * pmem_bench -- benchmark context */ struct pmem_bench { /* random offsets */ unsigned *rand_offsets; /* number of elements in randoms array */ size_t n_rand_offsets; /* The size of the allocated PMEM */ size_t fsize; /* The size of the allocated buffer */ size_t bsize; /* Pointer to the allocated volatile memory */ unsigned char *buf; /* Pointer to the allocated PMEM */ unsigned char *pmem_addr; /* * This field gets 'buf' or 'pmem_addr' fields assigned, * depending on the prog_args operation direction. */ unsigned char *src_addr; /* * This field gets 'buf' or 'pmem_addr' fields assigned, * depending on the prog_args operation direction. */ unsigned char *dest_addr; /* Stores prog_args structure */ struct pmem_args *pargs; /* * Function which returns src offset. Matches src_mode. */ offset_fn func_src; /* * Function which returns dst offset. Matches dst_mode. */ offset_fn func_dest; /* * The actual operation performed based on benchmark specific * arguments. */ int (*func_op)(void *dest, void *source, size_t len); }; /* * operation_type -- type of operation relative to persistent memory */ enum operation_type { OP_TYPE_UNKNOWN, OP_TYPE_READ, OP_TYPE_WRITE }; /* * operation_mode -- the mode of the copy process * * * static - read/write always the same chunk, * * sequential - read/write chunk by chunk, * * random - read/write to chunks selected randomly. * * It is used to determine source mode as well as the destination mode. */ enum operation_mode { OP_MODE_UNKNOWN, OP_MODE_STAT, OP_MODE_SEQ, OP_MODE_RAND }; /* * parse_op_type -- parses command line "--operation" argument * and returns proper operation type. */ static enum operation_type parse_op_type(const char *arg) { if (strcmp(arg, "read") == 0) return OP_TYPE_READ; else if (strcmp(arg, "write") == 0) return OP_TYPE_WRITE; else return OP_TYPE_UNKNOWN; } /* * parse_op_mode -- parses command line "--src-mode" or "--dest-mode" * and returns proper operation mode. */ static enum operation_mode parse_op_mode(const char *arg) { if (strcmp(arg, "stat") == 0) return OP_MODE_STAT; else if (strcmp(arg, "seq") == 0) return OP_MODE_SEQ; else if (strcmp(arg, "rand") == 0) return OP_MODE_RAND; else return OP_MODE_UNKNOWN; } /* * mode_seq -- if copy mode is sequential mode_seq() returns * index of a chunk. */ static uint64_t mode_seq(struct pmem_bench *pmb, struct operation_info *info) { return info->args->n_ops_per_thread * info->worker->index + info->index; } /* * mode_stat -- if mode is static, the offset is always 0, * as only one block is used. */ static uint64_t mode_stat(struct pmem_bench *pmb, struct operation_info *info) { return 0; } /* * mode_rand -- if mode is random returns index of a random chunk */ static uint64_t mode_rand(struct pmem_bench *pmb, struct operation_info *info) { assert(info->index < pmb->n_rand_offsets); return info->args->n_ops_per_thread * info->worker->index + pmb->rand_offsets[info->index]; } /* * assign_mode_func -- parses "--src-mode" and "--dest-mode" command line * arguments and returns one of the above mode functions. */ static offset_fn assign_mode_func(char *option) { enum operation_mode op_mode = parse_op_mode(option); switch (op_mode) { case OP_MODE_STAT: return mode_stat; case OP_MODE_SEQ: return mode_seq; case OP_MODE_RAND: return mode_rand; default: return nullptr; } } /* * libc_memcpy -- copy using libc memcpy() function * followed by pmem_flush(). */ static int libc_memcpy(void *dest, void *source, size_t len) { memcpy(dest, source, len); pmem_flush(dest, len); return 0; } /* * libc_memcpy_persist -- copy using libc memcpy() function * followed by pmem_persist(). */ static int libc_memcpy_persist(void *dest, void *source, size_t len) { memcpy(dest, source, len); pmem_persist(dest, len); return 0; } /* * lipmem_memcpy_nodrain -- copy using libpmem pmem_memcpy_no_drain() * function without pmem_persist(). */ static int libpmem_memcpy_nodrain(void *dest, void *source, size_t len) { pmem_memcpy_nodrain(dest, source, len); return 0; } /* * libpmem_memcpy_persist -- copy using libpmem pmem_memcpy_persist() function. */ static int libpmem_memcpy_persist(void *dest, void *source, size_t len) { pmem_memcpy_persist(dest, source, len); return 0; } /* * assign_size -- assigns file and buffer size * depending on the operation mode and type. */ static int assign_size(struct pmem_bench *pmb, struct benchmark_args *args, enum operation_type *op_type) { *op_type = parse_op_type(pmb->pargs->operation); if (*op_type == OP_TYPE_UNKNOWN) { fprintf(stderr, "Invalid operation argument '%s'", pmb->pargs->operation); return -1; } enum operation_mode op_mode_src = parse_op_mode(pmb->pargs->src_mode); if (op_mode_src == OP_MODE_UNKNOWN) { fprintf(stderr, "Invalid source mode argument '%s'", pmb->pargs->src_mode); return -1; } enum operation_mode op_mode_dest = parse_op_mode(pmb->pargs->dest_mode); if (op_mode_dest == OP_MODE_UNKNOWN) { fprintf(stderr, "Invalid destination mode argument '%s'", pmb->pargs->dest_mode); return -1; } size_t large = args->n_ops_per_thread * pmb->pargs->chunk_size * args->n_threads; size_t little = pmb->pargs->chunk_size; if (*op_type == OP_TYPE_WRITE) { pmb->bsize = op_mode_src == OP_MODE_STAT ? little : large; pmb->fsize = op_mode_dest == OP_MODE_STAT ? little : large; if (pmb->pargs->src_off != 0) pmb->bsize += MAX_OFFSET; if (pmb->pargs->dest_off != 0) pmb->fsize += MAX_OFFSET; } else { pmb->fsize = op_mode_src == OP_MODE_STAT ? little : large; pmb->bsize = op_mode_dest == OP_MODE_STAT ? little : large; if (pmb->pargs->src_off != 0) pmb->fsize += MAX_OFFSET; if (pmb->pargs->dest_off != 0) pmb->bsize += MAX_OFFSET; } return 0; } /* * pmem_memcpy_init -- benchmark initialization * * Parses command line arguments, allocates persistent memory, and maps it. */ static int pmem_memcpy_init(struct benchmark *bench, struct benchmark_args *args) { assert(bench != nullptr); assert(args != nullptr); int ret = 0; size_t file_size = 0; int flags = 0; enum file_type type = util_file_get_type(args->fname); if (type == OTHER_ERROR) { fprintf(stderr, "could not check type of file %s\n", args->fname); return -1; } auto *pmb = (struct pmem_bench *)malloc(sizeof(struct pmem_bench)); assert(pmb != nullptr); pmb->pargs = (struct pmem_args *)args->opts; assert(pmb->pargs != nullptr); pmb->pargs->chunk_size = args->dsize; enum operation_type op_type; /* * Assign file and buffer size depending on the operation type * (READ from PMEM or WRITE to PMEM) */ if (assign_size(pmb, args, &op_type) != 0) { ret = -1; goto err_free_pmb; } pmb->buf = (unsigned char *)util_aligned_malloc(FLUSH_ALIGN, pmb->bsize); if (pmb->buf == nullptr) { perror("posix_memalign"); ret = -1; goto err_free_pmb; } pmb->n_rand_offsets = args->n_ops_per_thread * args->n_threads; assert(pmb->n_rand_offsets != 0); pmb->rand_offsets = (unsigned *)malloc(pmb->n_rand_offsets * sizeof(*pmb->rand_offsets)); if (pmb->rand_offsets == nullptr) { perror("malloc"); ret = -1; goto err_free_pmb_buf; } for (size_t i = 0; i < pmb->n_rand_offsets; ++i) pmb->rand_offsets[i] = rand() % args->n_ops_per_thread; if (type != TYPE_DEVDAX) { file_size = pmb->fsize; flags = PMEM_FILE_CREATE | PMEM_FILE_EXCL; } /* create a pmem file and memory map it */ pmb->pmem_addr = (unsigned char *)pmem_map_file( args->fname, file_size, flags, args->fmode, nullptr, nullptr); if (pmb->pmem_addr == nullptr) { perror(args->fname); ret = -1; goto err_free_pmb_rand_offsets; } if (op_type == OP_TYPE_READ) { pmb->src_addr = pmb->pmem_addr; pmb->dest_addr = pmb->buf; } else { pmb->src_addr = pmb->buf; pmb->dest_addr = pmb->pmem_addr; } /* set proper func_src() and func_dest() depending on benchmark args */ if ((pmb->func_src = assign_mode_func(pmb->pargs->src_mode)) == nullptr) { fprintf(stderr, "wrong src_mode parameter -- '%s'", pmb->pargs->src_mode); ret = -1; goto err_unmap; } if ((pmb->func_dest = assign_mode_func(pmb->pargs->dest_mode)) == nullptr) { fprintf(stderr, "wrong dest_mode parameter -- '%s'", pmb->pargs->dest_mode); ret = -1; goto err_unmap; } if (pmb->pargs->memcpy) { pmb->func_op = pmb->pargs->persist ? libc_memcpy_persist : libc_memcpy; } else { pmb->func_op = pmb->pargs->persist ? libpmem_memcpy_persist : libpmem_memcpy_nodrain; } if (!pmb->pargs->no_warmup) { memset(pmb->buf, 0, pmb->bsize); pmem_memset_persist(pmb->pmem_addr, 0, pmb->fsize); } pmembench_set_priv(bench, pmb); return 0; err_unmap: pmem_unmap(pmb->pmem_addr, pmb->fsize); err_free_pmb_rand_offsets: free(pmb->rand_offsets); err_free_pmb_buf: util_aligned_free(pmb->buf); err_free_pmb: free(pmb); return ret; } /* * pmem_memcpy_operation -- actual benchmark operation * * Depending on the memcpy flag "-m" tested operation will be memcpy() * or pmem_memcpy_persist(). */ static int pmem_memcpy_operation(struct benchmark *bench, struct operation_info *info) { auto *pmb = (struct pmem_bench *)pmembench_get_priv(bench); size_t src_index = pmb->func_src(pmb, info); size_t dest_index = pmb->func_dest(pmb, info); void *source = pmb->src_addr + src_index * pmb->pargs->chunk_size + pmb->pargs->src_off; void *dest = pmb->dest_addr + dest_index * pmb->pargs->chunk_size + pmb->pargs->dest_off; size_t len = pmb->pargs->chunk_size; pmb->func_op(dest, source, len); return 0; } /* * pmem_memcpy_exit -- benchmark cleanup */ static int pmem_memcpy_exit(struct benchmark *bench, struct benchmark_args *args) { auto *pmb = (struct pmem_bench *)pmembench_get_priv(bench); pmem_unmap(pmb->pmem_addr, pmb->fsize); util_aligned_free(pmb->buf); free(pmb->rand_offsets); free(pmb); return 0; } /* structure to define command line arguments */ static struct benchmark_clo pmem_memcpy_clo[8]; /* Stores information about benchmark. */ static struct benchmark_info pmem_memcpy_bench; CONSTRUCTOR(pmem_memcpy_constructor) void pmem_memcpy_constructor(void) { pmem_memcpy_clo[0].opt_short = 'o'; pmem_memcpy_clo[0].opt_long = "operation"; pmem_memcpy_clo[0].descr = "Operation type - write, read"; pmem_memcpy_clo[0].type = CLO_TYPE_STR; pmem_memcpy_clo[0].off = clo_field_offset(struct pmem_args, operation); pmem_memcpy_clo[0].def = "write"; pmem_memcpy_clo[1].opt_short = 'S'; pmem_memcpy_clo[1].opt_long = "src-offset"; pmem_memcpy_clo[1].descr = "Source cache line alignment" " offset"; pmem_memcpy_clo[1].type = CLO_TYPE_UINT; pmem_memcpy_clo[1].off = clo_field_offset(struct pmem_args, src_off); pmem_memcpy_clo[1].def = "0"; pmem_memcpy_clo[1].type_uint.size = clo_field_size(struct pmem_args, src_off); pmem_memcpy_clo[1].type_uint.base = CLO_INT_BASE_DEC; pmem_memcpy_clo[1].type_uint.min = 0; pmem_memcpy_clo[1].type_uint.max = MAX_OFFSET; pmem_memcpy_clo[2].opt_short = 'D'; pmem_memcpy_clo[2].opt_long = "dest-offset"; pmem_memcpy_clo[2].descr = "Destination cache line " "alignment offset"; pmem_memcpy_clo[2].type = CLO_TYPE_UINT; pmem_memcpy_clo[2].off = clo_field_offset(struct pmem_args, dest_off); pmem_memcpy_clo[2].def = "0"; pmem_memcpy_clo[2].type_uint.size = clo_field_size(struct pmem_args, dest_off); pmem_memcpy_clo[2].type_uint.base = CLO_INT_BASE_DEC; pmem_memcpy_clo[2].type_uint.min = 0; pmem_memcpy_clo[2].type_uint.max = MAX_OFFSET; pmem_memcpy_clo[3].opt_short = 0; pmem_memcpy_clo[3].opt_long = "src-mode"; pmem_memcpy_clo[3].descr = "Source reading mode"; pmem_memcpy_clo[3].type = CLO_TYPE_STR; pmem_memcpy_clo[3].off = clo_field_offset(struct pmem_args, src_mode); pmem_memcpy_clo[3].def = "seq"; pmem_memcpy_clo[4].opt_short = 0; pmem_memcpy_clo[4].opt_long = "dest-mode"; pmem_memcpy_clo[4].descr = "Destination writing mode"; pmem_memcpy_clo[4].type = CLO_TYPE_STR; pmem_memcpy_clo[4].off = clo_field_offset(struct pmem_args, dest_mode); pmem_memcpy_clo[4].def = "seq"; pmem_memcpy_clo[5].opt_short = 'm'; pmem_memcpy_clo[5].opt_long = "libc-memcpy"; pmem_memcpy_clo[5].descr = "Use libc memcpy()"; pmem_memcpy_clo[5].type = CLO_TYPE_FLAG; pmem_memcpy_clo[5].off = clo_field_offset(struct pmem_args, memcpy); pmem_memcpy_clo[5].def = "false"; pmem_memcpy_clo[6].opt_short = 'p'; pmem_memcpy_clo[6].opt_long = "persist"; pmem_memcpy_clo[6].descr = "Use pmem_persist()"; pmem_memcpy_clo[6].type = CLO_TYPE_FLAG; pmem_memcpy_clo[6].off = clo_field_offset(struct pmem_args, persist); pmem_memcpy_clo[6].def = "true"; pmem_memcpy_clo[7].opt_short = 'w'; pmem_memcpy_clo[7].opt_long = "no-warmup"; pmem_memcpy_clo[7].descr = "Don't do warmup"; pmem_memcpy_clo[7].def = "false"; pmem_memcpy_clo[7].type = CLO_TYPE_FLAG; pmem_memcpy_clo[7].off = clo_field_offset(struct pmem_args, no_warmup); pmem_memcpy_bench.name = "pmem_memcpy"; pmem_memcpy_bench.brief = "Benchmark for" "pmem_memcpy_persist() and " "pmem_memcpy_nodrain()" "operations"; pmem_memcpy_bench.init = pmem_memcpy_init; pmem_memcpy_bench.exit = pmem_memcpy_exit; pmem_memcpy_bench.multithread = true; pmem_memcpy_bench.multiops = true; pmem_memcpy_bench.operation = pmem_memcpy_operation; pmem_memcpy_bench.measure_time = true; pmem_memcpy_bench.clos = pmem_memcpy_clo; pmem_memcpy_bench.nclos = ARRAY_SIZE(pmem_memcpy_clo); pmem_memcpy_bench.opts_size = sizeof(struct pmem_args); pmem_memcpy_bench.rm_file = true; pmem_memcpy_bench.allow_poolset = false; pmem_memcpy_bench.print_bandwidth = true; REGISTER_BENCHMARK(pmem_memcpy_bench); };
15,611
24.42671
79
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/pmemobj_persist.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2020, Intel Corporation */ /* * pmemobj_persist.cpp -- pmemobj persist benchmarks definition */ #include <cassert> #include <cerrno> #include <cstddef> #include <cstdio> #include <cstdlib> #include <cstring> #include <fcntl.h> #include <sys/file.h> #include <sys/mman.h> #include <unistd.h> #include "benchmark.hpp" #include "file.h" #include "libpmemobj.h" #include "util.h" /* * The factor used for PMEM pool size calculation, accounts for metadata, * fragmentation and etc. */ #define FACTOR 3 /* The minimum allocation size that pmalloc can perform */ #define ALLOC_MIN_SIZE 64 /* OOB and allocation header size */ #define OOB_HEADER_SIZE 64 #define CONST_B 0xFF /* * prog_args -- benchmark specific command line options */ struct prog_args { size_t minsize; /* minimum size for random allocation size */ bool use_random_size; /* if set, use random size allocations */ bool no_warmup; /* do not do warmup */ unsigned seed; /* seed for random numbers */ }; /* * obj_bench -- benchmark context */ struct obj_bench { PMEMobjpool *pop; /* persistent pool handle */ struct prog_args *pa; /* prog_args structure */ PMEMoid *oids; /* vector of allocated objects */ void **ptrs; /* pointers to allocated objects */ uint64_t nobjs; /* number of allocated objects */ size_t obj_size; /* size of each allocated objects */ int const_b; /* memset() value */ }; /* * init_objects -- allocate persistent objects and obtain direct pointers */ static int init_objects(struct obj_bench *ob) { assert(ob->nobjs != 0); ob->oids = (PMEMoid *)malloc(ob->nobjs * sizeof(*ob->oids)); if (!ob->oids) { perror("malloc"); return -1; } ob->ptrs = (void **)malloc(ob->nobjs * sizeof(*ob->ptrs)); if (!ob->ptrs) { perror("malloc"); goto err_malloc; } for (uint64_t i = 0; i < ob->nobjs; i++) { PMEMoid oid; void *ptr; if (pmemobj_alloc(ob->pop, &oid, ob->obj_size, 0, nullptr, nullptr)) { perror("pmemobj_alloc"); goto err_palloc; } ptr = pmemobj_direct(oid); if (!ptr) { perror("pmemobj_direct"); goto err_palloc; } ob->oids[i] = oid; ob->ptrs[i] = ptr; } return 0; err_palloc: free(ob->ptrs); err_malloc: free(ob->oids); return -1; } /* * do_warmup -- does the warmup by writing the whole pool area */ static void do_warmup(struct obj_bench *ob) { for (uint64_t i = 0; i < ob->nobjs; ++i) { memset(ob->ptrs[i], 0, ob->obj_size); pmemobj_persist(ob->pop, ob->ptrs[i], ob->obj_size); } } /* * obj_persist_op -- actual benchmark operation */ static int obj_persist_op(struct benchmark *bench, struct operation_info *info) { auto *ob = (struct obj_bench *)pmembench_get_priv(bench); uint64_t idx = info->worker->index * info->args->n_ops_per_thread + info->index; assert(idx < ob->nobjs); void *ptr = ob->ptrs[idx]; memset(ptr, ob->const_b, ob->obj_size); pmemobj_persist(ob->pop, ptr, ob->obj_size); return 0; } /* * obj_persist_init -- initialization function */ static int obj_persist_init(struct benchmark *bench, struct benchmark_args *args) { assert(bench != nullptr); assert(args != nullptr); assert(args->opts != nullptr); enum file_type type = util_file_get_type(args->fname); if (type == OTHER_ERROR) { fprintf(stderr, "could not check type of file %s\n", args->fname); return -1; } auto *pa = (struct prog_args *)args->opts; size_t poolsize; if (pa->minsize >= args->dsize) { fprintf(stderr, "Wrong params - allocation size\n"); return -1; } auto *ob = (struct obj_bench *)malloc(sizeof(struct obj_bench)); if (ob == nullptr) { perror("malloc"); return -1; } pmembench_set_priv(bench, ob); ob->pa = pa; /* initialize memset() value */ ob->const_b = CONST_B; ob->nobjs = args->n_ops_per_thread * args->n_threads; /* Create pmemobj pool. */ ob->obj_size = args->dsize; if (ob->obj_size < ALLOC_MIN_SIZE) ob->obj_size = ALLOC_MIN_SIZE; /* For data objects */ poolsize = ob->nobjs * (ob->obj_size + OOB_HEADER_SIZE); /* multiply by FACTOR for metadata, fragmentation, etc. */ poolsize = poolsize * FACTOR; if (args->is_poolset || type == TYPE_DEVDAX) { if (args->fsize < poolsize) { fprintf(stderr, "file size too large\n"); goto free_ob; } poolsize = 0; } else if (poolsize < PMEMOBJ_MIN_POOL) { poolsize = PMEMOBJ_MIN_POOL; } poolsize = PAGE_ALIGNED_UP_SIZE(poolsize); ob->pop = pmemobj_create(args->fname, nullptr, poolsize, args->fmode); if (ob->pop == nullptr) { fprintf(stderr, "%s\n", pmemobj_errormsg()); goto free_ob; } if (init_objects(ob)) { goto free_pop; } if (!ob->pa->no_warmup) { do_warmup(ob); } return 0; free_pop: pmemobj_close(ob->pop); free_ob: free(ob); return -1; } /* * obj_persist_exit -- benchmark cleanup function */ static int obj_persist_exit(struct benchmark *bench, struct benchmark_args *args) { auto *ob = (struct obj_bench *)pmembench_get_priv(bench); for (uint64_t i = 0; i < ob->nobjs; ++i) { pmemobj_free(&ob->oids[i]); } pmemobj_close(ob->pop); free(ob->oids); free(ob->ptrs); free(ob); return 0; } static struct benchmark_clo obj_persist_clo[1]; /* Stores information about benchmark. */ static struct benchmark_info obj_persist_info; CONSTRUCTOR(pmemobj_persist_constructor) void pmemobj_persist_constructor(void) { obj_persist_clo[0].opt_short = 'w'; obj_persist_clo[0].opt_long = "no-warmup"; obj_persist_clo[0].descr = "Don't do warmup"; obj_persist_clo[0].def = "false"; obj_persist_clo[0].type = CLO_TYPE_FLAG; obj_persist_clo[0].off = clo_field_offset(struct prog_args, no_warmup); obj_persist_info.name = "pmemobj_persist"; obj_persist_info.brief = "Benchmark for pmemobj_persist() " "operation"; obj_persist_info.init = obj_persist_init; obj_persist_info.exit = obj_persist_exit; obj_persist_info.multithread = true; obj_persist_info.multiops = true; obj_persist_info.operation = obj_persist_op; obj_persist_info.measure_time = true; obj_persist_info.clos = obj_persist_clo; obj_persist_info.nclos = ARRAY_SIZE(obj_persist_clo); obj_persist_info.opts_size = sizeof(struct prog_args); obj_persist_info.rm_file = true; obj_persist_info.allow_poolset = true; REGISTER_BENCHMARK(obj_persist_info); };
6,293
22.139706
73
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/pmemobj_tx.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * pmemobj_tx.cpp -- pmemobj_tx_alloc(), pmemobj_tx_free(), * pmemobj_tx_realloc(), pmemobj_tx_add_range() benchmarks. */ #include <cassert> #include <cerrno> #include <cstdio> #include <cstdlib> #include <cstring> #include <fcntl.h> #include <unistd.h> #include "benchmark.hpp" #include "file.h" #include "libpmemobj.h" #include "poolset_util.hpp" #define LAYOUT_NAME "benchmark" #define FACTOR 1.2f #define ALLOC_OVERHEAD 64 /* * operations number is limited to prevent stack overflow during * performing recursive functions. */ #define MAX_OPS 10000 TOID_DECLARE(struct item, 0); struct obj_tx_bench; struct obj_tx_worker; int obj_tx_init(struct benchmark *bench, struct benchmark_args *args); int obj_tx_exit(struct benchmark *bench, struct benchmark_args *args); /* * type_num_mode -- type number mode */ enum type_num_mode { NUM_MODE_ONE, NUM_MODE_PER_THREAD, NUM_MODE_RAND, NUM_MODE_UNKNOWN }; /* * op_mode -- operation type */ enum op_mode { OP_MODE_COMMIT, OP_MODE_ABORT, OP_MODE_ABORT_NESTED, OP_MODE_ONE_OBJ, OP_MODE_ONE_OBJ_NESTED, OP_MODE_ONE_OBJ_RANGE, OP_MODE_ONE_OBJ_NESTED_RANGE, OP_MODE_ALL_OBJ, OP_MODE_ALL_OBJ_NESTED, OP_MODE_UNKNOWN }; /* * lib_mode -- operation type */ enum lib_mode { LIB_MODE_DRAM, LIB_MODE_OBJ_TX, LIB_MODE_OBJ_ATOMIC, LIB_MODE_NONE, }; /* * nesting_mode -- nesting type */ enum nesting_mode { NESTING_MODE_SIM, NESTING_MODE_TX, NESTING_MODE_UNKNOWN, }; /* * add_range_mode -- operation type for obj_add_range benchmark */ enum add_range_mode { ADD_RANGE_MODE_ONE_TX, ADD_RANGE_MODE_NESTED_TX }; /* * parse_mode -- parsing function type */ enum parse_mode { PARSE_OP_MODE, PARSE_OP_MODE_ADD_RANGE }; typedef size_t (*fn_type_num_t)(struct obj_tx_bench *obj_bench, size_t worker_idx, size_t op_idx); typedef size_t (*fn_num_t)(size_t idx); typedef int (*fn_op_t)(struct obj_tx_bench *obj_bench, struct worker_info *worker, size_t idx); typedef struct offset (*fn_os_off_t)(struct obj_tx_bench *obj_bench, size_t idx); typedef enum op_mode (*fn_parse_t)(const char *arg); /* * obj_tx_args -- stores command line parsed arguments. */ struct obj_tx_args { /* * operation which will be performed when flag io set to false. * modes for obj_tx_alloc, obj_tx_free and obj_tx_realloc: * - basic - transaction will be committed * - abort - 'external' transaction will be aborted. * - abort-nested - all nested transactions will be * aborted. * * modes for obj_tx_add_range benchmark: * - basic - one object is added to undo log many times in * one transaction. * - range - fields of one object are added to undo * log many times in one transaction. * - all-obj - all objects are added to undo log in * one transaction. * - range-nested - fields of one object are added to undo * log many times in many nested transactions. * - one-obj-nested - one object is added to undo log many * times in many nested transactions. * - all-obj-nested - all objects are added to undo log in * many separate, nested transactions. */ char *operation; /* * type number for each persistent object. There are three modes: * - one - all of objects have the same type number * - per-thread - all of object allocated by the same * thread have the same type number * - rand - type numbers are assigned randomly for * each persistent object */ char *type_num; /* * define s which library will be used in main operations There are * three modes in which benchmark can be run: * - tx - uses PMEM transactions * - pmem - uses PMEM without transactions * - dram - does not use PMEM */ char *lib; unsigned nested; /* number of nested transactions */ unsigned min_size; /* minimum allocation size */ unsigned min_rsize; /* minimum reallocation size */ unsigned rsize; /* reallocation size */ bool change_type; /* change type number in reallocation */ size_t obj_size; /* size of each allocated object */ size_t n_ops; /* number of operations */ int parse_mode; /* type of parsing function */ }; /* * obj_tx_bench -- stores variables used in benchmark, passed within functions. */ static struct obj_tx_bench { PMEMobjpool *pop; /* handle to persistent pool */ struct obj_tx_args *obj_args; /* pointer to benchmark arguments */ size_t *random_types; /* array to store random type numbers */ size_t *sizes; /* array to store size of each allocation */ size_t *resizes; /* array to store size of each reallocation */ size_t n_objs; /* number of objects to allocate */ int type_mode; /* type number mode */ int op_mode; /* type of operation */ int lib_mode; /* type of operation used in initialization */ int lib_op; /* type of main operation */ int lib_op_free; /* type of main operation */ int nesting_mode; /* type of nesting in main operation */ fn_num_t n_oid; /* returns object's number in array */ fn_os_off_t fn_off; /* returns offset for proper operation */ /* * fn_type_num gets proper function assigned, depending on the * value of the type_mode argument, which returns proper type number for * each persistent object. Possible functions are: * - type_mode_one, * - type_mode_rand. */ fn_type_num_t fn_type_num; /* * fn_op gets proper array with functions pointer assigned, depending on * function which is tested by benchmark. Possible arrays are: * -alloc_op * -free_op * -realloc_op */ fn_op_t *fn_op; } obj_bench; /* * item -- TOID's structure */ struct item; /* * obj_tx_worker - stores variables used by one thread. */ struct obj_tx_worker { TOID(struct item) * oids; char **items; unsigned tx_level; unsigned max_level; }; /* * offset - stores offset data used in pmemobj_tx_add_range() */ struct offset { uint64_t off; size_t size; }; /* * alloc_dram -- main operations for obj_tx_alloc benchmark in dram mode */ static int alloc_dram(struct obj_tx_bench *obj_bench, struct worker_info *worker, size_t idx) { auto *obj_worker = (struct obj_tx_worker *)worker->priv; obj_worker->items[idx] = (char *)malloc(obj_bench->sizes[idx]); if (obj_worker->items[idx] == nullptr) { perror("malloc"); return -1; } return 0; } /* * alloc_pmem -- main operations for obj_tx_alloc benchmark in pmem mode */ static int alloc_pmem(struct obj_tx_bench *obj_bench, struct worker_info *worker, size_t idx) { size_t type_num = obj_bench->fn_type_num(obj_bench, worker->index, idx); auto *obj_worker = (struct obj_tx_worker *)worker->priv; if (pmemobj_alloc(obj_bench->pop, &obj_worker->oids[idx].oid, obj_bench->sizes[idx], type_num, nullptr, nullptr) != 0) { perror("pmemobj_alloc"); return -1; } return 0; } /* * alloc_tx -- main operations for obj_tx_alloc benchmark in tx mode */ static int alloc_tx(struct obj_tx_bench *obj_bench, struct worker_info *worker, size_t idx) { size_t type_num = obj_bench->fn_type_num(obj_bench, worker->index, idx); auto *obj_worker = (struct obj_tx_worker *)worker->priv; obj_worker->oids[idx].oid = pmemobj_tx_xalloc( obj_bench->sizes[idx], type_num, POBJ_XALLOC_NO_FLUSH); if (OID_IS_NULL(obj_worker->oids[idx].oid)) { perror("pmemobj_tx_alloc"); return -1; } return 0; } /* * free_dram -- main operations for obj_tx_free benchmark in dram mode */ static int free_dram(struct obj_tx_bench *obj_bench, struct worker_info *worker, size_t idx) { auto *obj_worker = (struct obj_tx_worker *)worker->priv; free(obj_worker->items[idx]); return 0; } /* * free_pmem -- main operations for obj_tx_free benchmark in pmem mode */ static int free_pmem(struct obj_tx_bench *obj_bench, struct worker_info *worker, size_t idx) { auto *obj_worker = (struct obj_tx_worker *)worker->priv; POBJ_FREE(&obj_worker->oids[idx]); return 0; } /* * free_tx -- main operations for obj_tx_free benchmark in tx mode */ static int free_tx(struct obj_tx_bench *obj_bench, struct worker_info *worker, size_t idx) { auto *obj_worker = (struct obj_tx_worker *)worker->priv; TX_FREE(obj_worker->oids[idx]); return 0; } /* * no_free -- exit operation for benchmarks obj_tx_alloc and obj_tx_free * if there is no need to free memory */ static int no_free(struct obj_tx_bench *obj_bench, struct worker_info *worker, size_t idx) { return 0; } /* * realloc_dram -- main operations for obj_tx_realloc benchmark in dram mode */ static int realloc_dram(struct obj_tx_bench *obj_bench, struct worker_info *worker, size_t idx) { auto *obj_worker = (struct obj_tx_worker *)worker->priv; auto *tmp = (char *)realloc(obj_worker->items[idx], obj_bench->resizes[idx]); if (tmp == nullptr) { perror("realloc"); return -1; } obj_worker->items[idx] = tmp; return 0; } /* * realloc_pmem -- main operations for obj_tx_realloc benchmark in pmem mode */ static int realloc_pmem(struct obj_tx_bench *obj_bench, struct worker_info *worker, size_t idx) { auto *obj_worker = (struct obj_tx_worker *)worker->priv; size_t type_num = obj_bench->fn_type_num(obj_bench, worker->index, idx); if (obj_bench->obj_args->change_type) type_num++; if (pmemobj_realloc(obj_bench->pop, &obj_worker->oids[idx].oid, obj_bench->resizes[idx], type_num) != 0) { perror("pmemobj_realloc"); return -1; } return 0; } /* * realloc_tx -- main operations for obj_tx_realloc benchmark in tx mode */ static int realloc_tx(struct obj_tx_bench *obj_bench, struct worker_info *worker, size_t idx) { auto *obj_worker = (struct obj_tx_worker *)worker->priv; size_t type_num = obj_bench->fn_type_num(obj_bench, worker->index, idx); if (obj_bench->obj_args->change_type) type_num++; PMEMoid oid = pmemobj_tx_realloc(obj_worker->oids[idx].oid, obj_bench->sizes[idx], type_num); if (OID_IS_NULL(oid)) { perror("pmemobj_tx_realloc"); return -1; } /* * If OP_MODE_ABORT is set, this TX will get aborted, meaning that the * object allocated as part of the outer transaction will be freed once * this operation finishes. * To avoid a potential use-after-free, we either have to snapshot the * oid pointer or skip this assignment when we know it will abort. * For performance reason, this code does the latter. */ if (obj_bench->op_mode != OP_MODE_ABORT) obj_worker->oids[idx].oid = oid; return 0; } /* * add_range_nested_tx -- main operations of the obj_tx_add_range with nesting. */ static int add_range_nested_tx(struct obj_tx_bench *obj_bench, struct worker_info *worker, size_t idx) { int ret = 0; auto *obj_worker = (struct obj_tx_worker *)worker->priv; TX_BEGIN(obj_bench->pop) { if (obj_bench->obj_args->n_ops != obj_worker->tx_level) { size_t n_oid = obj_bench->n_oid(obj_worker->tx_level); struct offset offset = obj_bench->fn_off( obj_bench, obj_worker->tx_level); pmemobj_tx_add_range(obj_worker->oids[n_oid].oid, offset.off, offset.size); obj_worker->tx_level++; ret = add_range_nested_tx(obj_bench, worker, idx); } } TX_ONABORT { fprintf(stderr, "transaction failed\n"); ret = -1; } TX_END return ret; } /* * add_range_tx -- main operations of the obj_tx_add_range without nesting. */ static int add_range_tx(struct obj_tx_bench *obj_bench, struct worker_info *worker, size_t idx) { int ret = 0; size_t i = 0; auto *obj_worker = (struct obj_tx_worker *)worker->priv; TX_BEGIN(obj_bench->pop) { for (i = 0; i < obj_bench->obj_args->n_ops; i++) { size_t n_oid = obj_bench->n_oid(i); struct offset offset = obj_bench->fn_off(obj_bench, i); ret = pmemobj_tx_add_range(obj_worker->oids[n_oid].oid, offset.off, offset.size); } } TX_ONABORT { fprintf(stderr, "transaction failed\n"); ret = -1; } TX_END return ret; } /* * obj_op_sim -- main function for benchmarks which simulates nested * transactions on dram or pmemobj atomic API by calling function recursively. */ static int obj_op_sim(struct obj_tx_bench *obj_bench, struct worker_info *worker, size_t idx) { int ret = 0; auto *obj_worker = (struct obj_tx_worker *)worker->priv; if (obj_worker->max_level == obj_worker->tx_level) { ret = obj_bench->fn_op[obj_bench->lib_op](obj_bench, worker, idx); } else { obj_worker->tx_level++; ret = obj_op_sim(obj_bench, worker, idx); } return ret; } /* * obj_op_tx -- main recursive function for transactional benchmarks */ static int obj_op_tx(struct obj_tx_bench *obj_bench, struct worker_info *worker, size_t idx) { volatile int ret = 0; auto *obj_worker = (struct obj_tx_worker *)worker->priv; TX_BEGIN(obj_bench->pop) { if (obj_worker->max_level == obj_worker->tx_level) { ret = obj_bench->fn_op[obj_bench->lib_op](obj_bench, worker, idx); if (obj_bench->op_mode == OP_MODE_ABORT_NESTED) pmemobj_tx_abort(-1); } else { obj_worker->tx_level++; ret = obj_op_tx(obj_bench, worker, idx); if (--obj_worker->tx_level == 0 && obj_bench->op_mode == OP_MODE_ABORT) pmemobj_tx_abort(-1); } } TX_ONABORT { if (obj_bench->op_mode != OP_MODE_ABORT && obj_bench->op_mode != OP_MODE_ABORT_NESTED) { fprintf(stderr, "transaction failed\n"); ret = -1; } } TX_END return ret; } /* * type_mode_one -- always returns 0, as in the mode NUM_MODE_ONE * all of the persistent objects have the same type_number value. */ static size_t type_mode_one(struct obj_tx_bench *obj_bench, size_t worker_idx, size_t op_idx) { return 0; } /* * type_mode_per_thread -- always returns worker index to all of the persistent * object allocated by the same thread have the same type number. */ static size_t type_mode_per_thread(struct obj_tx_bench *obj_bench, size_t worker_idx, size_t op_idx) { return worker_idx; } /* * type_mode_rand -- returns the value from the random_types array assigned * for the specific operation in a specific thread. */ static size_t type_mode_rand(struct obj_tx_bench *obj_bench, size_t worker_idx, size_t op_idx) { return obj_bench->random_types[op_idx]; } /* * parse_op_mode_add_range -- parses command line "--operation" argument * and returns proper op_mode enum value for obj_tx_add_range. */ static enum op_mode parse_op_mode_add_range(const char *arg) { if (strcmp(arg, "basic") == 0) return OP_MODE_ONE_OBJ; else if (strcmp(arg, "one-obj-nested") == 0) return OP_MODE_ONE_OBJ_NESTED; else if (strcmp(arg, "range") == 0) return OP_MODE_ONE_OBJ_RANGE; else if (strcmp(arg, "range-nested") == 0) return OP_MODE_ONE_OBJ_NESTED_RANGE; else if (strcmp(arg, "all-obj") == 0) return OP_MODE_ALL_OBJ; else if (strcmp(arg, "all-obj-nested") == 0) return OP_MODE_ALL_OBJ_NESTED; else return OP_MODE_UNKNOWN; } /* * parse_op_mode -- parses command line "--operation" argument * and returns proper op_mode enum value. */ static enum op_mode parse_op_mode(const char *arg) { if (strcmp(arg, "basic") == 0) return OP_MODE_COMMIT; else if (strcmp(arg, "abort") == 0) return OP_MODE_ABORT; else if (strcmp(arg, "abort-nested") == 0) return OP_MODE_ABORT_NESTED; else return OP_MODE_UNKNOWN; } static fn_op_t alloc_op[] = {alloc_dram, alloc_tx, alloc_pmem}; static fn_op_t free_op[] = {free_dram, free_tx, free_pmem, no_free}; static fn_op_t realloc_op[] = {realloc_dram, realloc_tx, realloc_pmem}; static fn_op_t add_range_op[] = {add_range_tx, add_range_nested_tx}; static fn_parse_t parse_op[] = {parse_op_mode, parse_op_mode_add_range}; static fn_op_t nestings[] = {obj_op_sim, obj_op_tx}; /* * parse_type_num_mode -- converts string to type_num_mode enum */ static enum type_num_mode parse_type_num_mode(const char *arg) { if (strcmp(arg, "one") == 0) return NUM_MODE_ONE; else if (strcmp(arg, "per-thread") == 0) return NUM_MODE_PER_THREAD; else if (strcmp(arg, "rand") == 0) return NUM_MODE_RAND; fprintf(stderr, "unknown type number\n"); return NUM_MODE_UNKNOWN; } /* * parse_lib_mode -- converts string to type_num_mode enum */ static enum lib_mode parse_lib_mode(const char *arg) { if (strcmp(arg, "dram") == 0) return LIB_MODE_DRAM; else if (strcmp(arg, "pmem") == 0) return LIB_MODE_OBJ_ATOMIC; else if (strcmp(arg, "tx") == 0) return LIB_MODE_OBJ_TX; fprintf(stderr, "unknown lib mode\n"); return LIB_MODE_NONE; } static fn_type_num_t type_num_fn[] = {type_mode_one, type_mode_per_thread, type_mode_rand, nullptr}; /* * one_num -- returns always the same number. */ static size_t one_num(size_t idx) { return 0; } /* * diff_num -- returns number given as argument. */ static size_t diff_num(size_t idx) { return idx; } /* * off_entire -- returns zero offset. */ static struct offset off_entire(struct obj_tx_bench *obj_bench, size_t idx) { struct offset offset; offset.off = 0; offset.size = obj_bench->sizes[obj_bench->n_oid(idx)]; return offset; } /* * off_range -- returns offset for range in object. */ static struct offset off_range(struct obj_tx_bench *obj_bench, size_t idx) { struct offset offset; offset.size = obj_bench->sizes[0] / obj_bench->obj_args->n_ops; offset.off = offset.size * idx; return offset; } /* * rand_values -- allocates array and if range mode calculates random * values as allocation sizes for each object otherwise populates whole array * with max value. Used only when range flag set. */ static size_t * rand_values(size_t min, size_t max, size_t n_ops) { size_t size = max - min; auto *sizes = (size_t *)calloc(n_ops, sizeof(size_t)); if (sizes == nullptr) { perror("calloc"); return nullptr; } for (size_t i = 0; i < n_ops; i++) sizes[i] = max; if (min) { if (min > max) { fprintf(stderr, "Invalid size\n"); free(sizes); return nullptr; } for (size_t i = 0; i < n_ops; i++) sizes[i] = (rand() % size) + min; } return sizes; } /* * obj_tx_add_range_op -- main operations of the obj_tx_add_range benchmark. */ static int obj_tx_add_range_op(struct benchmark *bench, struct operation_info *info) { auto *obj_bench = (struct obj_tx_bench *)pmembench_get_priv(bench); auto *obj_worker = (struct obj_tx_worker *)info->worker->priv; if (add_range_op[obj_bench->lib_op](obj_bench, info->worker, info->index) != 0) return -1; obj_worker->tx_level = 0; return 0; } /* * obj_tx_op -- main operation for obj_tx_alloc(), obj_tx_free() and * obj_tx_realloc() benchmarks. */ static int obj_tx_op(struct benchmark *bench, struct operation_info *info) { auto *obj_bench = (struct obj_tx_bench *)pmembench_get_priv(bench); auto *obj_worker = (struct obj_tx_worker *)info->worker->priv; int ret = nestings[obj_bench->nesting_mode](obj_bench, info->worker, info->index); obj_worker->tx_level = 0; return ret; } /* * obj_tx_init_worker -- common part for the worker initialization functions * for transactional benchmarks. */ static int obj_tx_init_worker(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { auto *obj_bench = (struct obj_tx_bench *)pmembench_get_priv(bench); auto *obj_worker = (struct obj_tx_worker *)calloc(1, sizeof(struct obj_tx_worker)); if (obj_worker == nullptr) { perror("calloc"); return -1; } worker->priv = obj_worker; obj_worker->tx_level = 0; obj_worker->max_level = obj_bench->obj_args->nested; if (obj_bench->lib_mode != LIB_MODE_DRAM) obj_worker->oids = (TOID(struct item) *)calloc( obj_bench->n_objs, sizeof(TOID(struct item))); else obj_worker->items = (char **)calloc(obj_bench->n_objs, sizeof(char *)); if (obj_worker->oids == nullptr && obj_worker->items == nullptr) { free(obj_worker); perror("calloc"); return -1; } return 0; } /* * obj_tx_free_init_worker_alloc_obj -- special part for the worker * initialization function for benchmarks which needs allocated objects * before operation. */ static int obj_tx_init_worker_alloc_obj(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { unsigned i; if (obj_tx_init_worker(bench, args, worker) != 0) return -1; auto *obj_bench = (struct obj_tx_bench *)pmembench_get_priv(bench); auto *obj_worker = (struct obj_tx_worker *)worker->priv; for (i = 0; i < obj_bench->n_objs; i++) { if (alloc_op[obj_bench->lib_mode](obj_bench, worker, i) != 0) goto out; } return 0; out: for (; i > 0; i--) free_op[obj_bench->lib_mode](obj_bench, worker, i - 1); if (obj_bench->lib_mode == LIB_MODE_DRAM) free(obj_worker->items); else free(obj_worker->oids); free(obj_worker); return -1; } /* * obj_tx_exit_worker -- common part for the worker de-initialization. */ static void obj_tx_exit_worker(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { auto *obj_bench = (struct obj_tx_bench *)pmembench_get_priv(bench); auto *obj_worker = (struct obj_tx_worker *)worker->priv; for (unsigned i = 0; i < obj_bench->n_objs; i++) free_op[obj_bench->lib_op_free](obj_bench, worker, i); if (obj_bench->lib_mode == LIB_MODE_DRAM) free(obj_worker->items); else free(obj_worker->oids); free(obj_worker); } /* * obj_tx_add_range_init -- specific part of the obj_tx_add_range * benchmark initialization. */ static int obj_tx_add_range_init(struct benchmark *bench, struct benchmark_args *args) { auto *obj_args = (struct obj_tx_args *)args->opts; obj_args->parse_mode = PARSE_OP_MODE_ADD_RANGE; if (args->n_ops_per_thread > MAX_OPS) args->n_ops_per_thread = MAX_OPS; if (obj_tx_init(bench, args) != 0) return -1; auto *obj_bench = (struct obj_tx_bench *)pmembench_get_priv(bench); obj_bench->n_oid = diff_num; if (obj_bench->op_mode < OP_MODE_ALL_OBJ) { obj_bench->n_oid = one_num; obj_bench->n_objs = 1; } obj_bench->fn_off = off_entire; if (obj_bench->op_mode == OP_MODE_ONE_OBJ_RANGE || obj_bench->op_mode == OP_MODE_ONE_OBJ_NESTED_RANGE) { obj_bench->fn_off = off_range; if (args->n_ops_per_thread > args->dsize) args->dsize = args->n_ops_per_thread; obj_bench->sizes[0] = args->dsize; } obj_bench->lib_op = (obj_bench->op_mode == OP_MODE_ONE_OBJ || obj_bench->op_mode == OP_MODE_ALL_OBJ) ? ADD_RANGE_MODE_ONE_TX : ADD_RANGE_MODE_NESTED_TX; return 0; } /* * obj_tx_free_init -- specific part of the obj_tx_free initialization. */ static int obj_tx_free_init(struct benchmark *bench, struct benchmark_args *args) { if (obj_tx_init(bench, args) != 0) return -1; auto *obj_bench = (struct obj_tx_bench *)pmembench_get_priv(bench); obj_bench->fn_op = free_op; /* * Generally all objects which were allocated during worker * initialization are released in main operation so there is no need to * free them in exit operation. Only exception is situation where * transaction (inside which object is releasing) is aborted. * Then object is not released so there there is necessary to free it * in exit operation. */ if (!(obj_bench->lib_op == LIB_MODE_OBJ_TX && obj_bench->op_mode != OP_MODE_COMMIT)) obj_bench->lib_op_free = LIB_MODE_NONE; return 0; } /* * obj_tx_alloc_init -- specific part of the obj_tx_alloc initialization. */ static int obj_tx_alloc_init(struct benchmark *bench, struct benchmark_args *args) { if (obj_tx_init(bench, args) != 0) return -1; auto *obj_bench = (struct obj_tx_bench *)pmembench_get_priv(bench); obj_bench->fn_op = alloc_op; /* * Generally all objects which will be allocated during main operation * need to be released. Only exception is situation where transaction * (inside which object is allocating) is aborted. Then object is not * allocated so there is no need to free it in exit operation. */ if (obj_bench->lib_op == LIB_MODE_OBJ_TX && obj_bench->op_mode != OP_MODE_COMMIT) obj_bench->lib_op_free = LIB_MODE_NONE; return 0; } /* * obj_tx_realloc_init -- specific part of the obj_tx_realloc initialization. */ static int obj_tx_realloc_init(struct benchmark *bench, struct benchmark_args *args) { if (obj_tx_init(bench, args) != 0) return -1; auto *obj_bench = (struct obj_tx_bench *)pmembench_get_priv(bench); obj_bench->resizes = rand_values(obj_bench->obj_args->min_rsize, obj_bench->obj_args->rsize, args->n_ops_per_thread); if (obj_bench->resizes == nullptr) { obj_tx_exit(bench, args); return -1; } obj_bench->fn_op = realloc_op; return 0; } /* * obj_tx_init -- common part of the benchmark initialization for transactional * benchmarks in their init functions. Parses command line arguments, set * variables and creates persistent pool. */ int obj_tx_init(struct benchmark *bench, struct benchmark_args *args) { assert(bench != nullptr); assert(args != nullptr); assert(args->opts != nullptr); char path[PATH_MAX]; if (util_safe_strcpy(path, args->fname, sizeof(path)) != 0) return -1; enum file_type type = util_file_get_type(args->fname); if (type == OTHER_ERROR) { fprintf(stderr, "could not check type of file %s\n", args->fname); return -1; } pmembench_set_priv(bench, &obj_bench); obj_bench.obj_args = (struct obj_tx_args *)args->opts; obj_bench.obj_args->obj_size = args->dsize; obj_bench.obj_args->n_ops = args->n_ops_per_thread; obj_bench.n_objs = args->n_ops_per_thread; obj_bench.lib_op = obj_bench.obj_args->lib != nullptr ? parse_lib_mode(obj_bench.obj_args->lib) : LIB_MODE_OBJ_ATOMIC; if (obj_bench.lib_op == LIB_MODE_NONE) return -1; obj_bench.lib_mode = obj_bench.lib_op == LIB_MODE_DRAM ? LIB_MODE_DRAM : LIB_MODE_OBJ_ATOMIC; obj_bench.lib_op_free = obj_bench.lib_mode; obj_bench.nesting_mode = obj_bench.lib_op == LIB_MODE_OBJ_TX ? NESTING_MODE_TX : NESTING_MODE_SIM; /* * Multiplication by FACTOR prevents from out of memory error * as the actual size of the allocated persistent objects * is always larger than requested. */ size_t dsize = obj_bench.obj_args->rsize > args->dsize ? obj_bench.obj_args->rsize : args->dsize; size_t psize = args->n_ops_per_thread * (dsize + ALLOC_OVERHEAD) * args->n_threads; psize += PMEMOBJ_MIN_POOL; psize = (size_t)(psize * FACTOR); /* * When adding all allocated objects to undo log there is necessary * to prepare larger pool to prevent out of memory error. */ if (obj_bench.op_mode == OP_MODE_ALL_OBJ || obj_bench.op_mode == OP_MODE_ALL_OBJ_NESTED) psize *= 2; obj_bench.op_mode = parse_op[obj_bench.obj_args->parse_mode]( obj_bench.obj_args->operation); if (obj_bench.op_mode == OP_MODE_UNKNOWN) { fprintf(stderr, "operation mode unknown\n"); return -1; } obj_bench.type_mode = parse_type_num_mode(obj_bench.obj_args->type_num); if (obj_bench.type_mode == NUM_MODE_UNKNOWN) return -1; obj_bench.fn_type_num = type_num_fn[obj_bench.type_mode]; if (obj_bench.type_mode == NUM_MODE_RAND) { obj_bench.random_types = rand_values(1, UINT32_MAX, args->n_ops_per_thread); if (obj_bench.random_types == nullptr) return -1; } obj_bench.sizes = rand_values(obj_bench.obj_args->min_size, obj_bench.obj_args->obj_size, args->n_ops_per_thread); if (obj_bench.sizes == nullptr) goto free_random_types; if (obj_bench.lib_mode == LIB_MODE_DRAM) return 0; /* Create pmemobj pool. */ if (args->is_poolset || type == TYPE_DEVDAX) { if (args->fsize < psize) { fprintf(stderr, "file size too large\n"); goto free_all; } psize = 0; } else if (args->is_dynamic_poolset) { int ret = dynamic_poolset_create(args->fname, psize); if (ret == -1) goto free_all; if (util_safe_strcpy(path, POOLSET_PATH, sizeof(path)) != 0) goto free_all; psize = 0; } obj_bench.pop = pmemobj_create(path, LAYOUT_NAME, psize, args->fmode); if (obj_bench.pop == nullptr) { perror("pmemobj_create"); goto free_all; } return 0; free_all: free(obj_bench.sizes); free_random_types: if (obj_bench.type_mode == NUM_MODE_RAND) free(obj_bench.random_types); return -1; } /* * obj_tx_exit -- common part for the exit function of the transactional * benchmarks in their exit functions. */ int obj_tx_exit(struct benchmark *bench, struct benchmark_args *args) { auto *obj_bench = (struct obj_tx_bench *)pmembench_get_priv(bench); if (obj_bench->lib_mode != LIB_MODE_DRAM) pmemobj_close(obj_bench->pop); free(obj_bench->sizes); if (obj_bench->type_mode == NUM_MODE_RAND) free(obj_bench->random_types); return 0; } /* * obj_tx_realloc_exit -- common part for the exit function of the transactional * benchmarks in their exit functions. */ static int obj_tx_realloc_exit(struct benchmark *bench, struct benchmark_args *args) { auto *obj_bench = (struct obj_tx_bench *)pmembench_get_priv(bench); free(obj_bench->resizes); return obj_tx_exit(bench, args); } /* Array defining common command line arguments. */ static struct benchmark_clo obj_tx_clo[8]; static struct benchmark_info obj_tx_alloc; static struct benchmark_info obj_tx_free; static struct benchmark_info obj_tx_realloc; static struct benchmark_info obj_tx_add_range; CONSTRUCTOR(pmemobj_tx_constructor) void pmemobj_tx_constructor(void) { obj_tx_clo[0].opt_short = 'T'; obj_tx_clo[0].opt_long = "type-number"; obj_tx_clo[0].descr = "Type number - one, rand, per-thread"; obj_tx_clo[0].def = "one"; obj_tx_clo[0].type = CLO_TYPE_STR; obj_tx_clo[0].off = clo_field_offset(struct obj_tx_args, type_num); obj_tx_clo[1].opt_short = 'O'; obj_tx_clo[1].opt_long = "operation"; obj_tx_clo[1].descr = "Type of operation"; obj_tx_clo[1].def = "basic"; obj_tx_clo[1].off = clo_field_offset(struct obj_tx_args, operation); obj_tx_clo[1].type = CLO_TYPE_STR; obj_tx_clo[2].opt_short = 'm'; obj_tx_clo[2].opt_long = "min-size"; obj_tx_clo[2].type = CLO_TYPE_UINT; obj_tx_clo[2].descr = "Minimum allocation size"; obj_tx_clo[2].off = clo_field_offset(struct obj_tx_args, min_size); obj_tx_clo[2].def = "0"; obj_tx_clo[2].type_uint.size = clo_field_size(struct obj_tx_args, min_size); obj_tx_clo[2].type_uint.base = CLO_INT_BASE_DEC | CLO_INT_BASE_HEX; obj_tx_clo[2].type_uint.min = 0; obj_tx_clo[2].type_uint.max = UINT_MAX; /* * nclos field in benchmark_info structures is decremented to make this * options available only for obj_tx_alloc, obj_tx_free and * obj_tx_realloc benchmarks. */ obj_tx_clo[3].opt_short = 'L'; obj_tx_clo[3].opt_long = "lib"; obj_tx_clo[3].descr = "Type of library"; obj_tx_clo[3].def = "tx"; obj_tx_clo[3].off = clo_field_offset(struct obj_tx_args, lib); obj_tx_clo[3].type = CLO_TYPE_STR; obj_tx_clo[4].opt_short = 'N'; obj_tx_clo[4].opt_long = "nestings"; obj_tx_clo[4].type = CLO_TYPE_UINT; obj_tx_clo[4].descr = "Number of nested transactions"; obj_tx_clo[4].off = clo_field_offset(struct obj_tx_args, nested); obj_tx_clo[4].def = "0"; obj_tx_clo[4].type_uint.size = clo_field_size(struct obj_tx_args, nested); obj_tx_clo[4].type_uint.base = CLO_INT_BASE_DEC | CLO_INT_BASE_HEX; obj_tx_clo[4].type_uint.min = 0; obj_tx_clo[4].type_uint.max = MAX_OPS; obj_tx_clo[5].opt_short = 'r'; obj_tx_clo[5].opt_long = "min-rsize"; obj_tx_clo[5].type = CLO_TYPE_UINT; obj_tx_clo[5].descr = "Minimum reallocation size"; obj_tx_clo[5].off = clo_field_offset(struct obj_tx_args, min_rsize); obj_tx_clo[5].def = "0"; obj_tx_clo[5].type_uint.size = clo_field_size(struct obj_tx_args, min_rsize); obj_tx_clo[5].type_uint.base = CLO_INT_BASE_DEC | CLO_INT_BASE_HEX; obj_tx_clo[5].type_uint.min = 0; obj_tx_clo[5].type_uint.max = UINT_MAX; obj_tx_clo[6].opt_short = 'R'; obj_tx_clo[6].opt_long = "realloc-size"; obj_tx_clo[6].type = CLO_TYPE_UINT; obj_tx_clo[6].descr = "Reallocation size"; obj_tx_clo[6].off = clo_field_offset(struct obj_tx_args, rsize); obj_tx_clo[6].def = "1"; obj_tx_clo[6].type_uint.size = clo_field_size(struct obj_tx_args, rsize); obj_tx_clo[6].type_uint.base = CLO_INT_BASE_DEC | CLO_INT_BASE_HEX; obj_tx_clo[6].type_uint.min = 1; obj_tx_clo[6].type_uint.max = ULONG_MAX; obj_tx_clo[7].opt_short = 'c'; obj_tx_clo[7].opt_long = "changed-type"; obj_tx_clo[7].descr = "Use another type number in " "reallocation than in allocation"; obj_tx_clo[7].type = CLO_TYPE_FLAG; obj_tx_clo[7].off = clo_field_offset(struct obj_tx_args, change_type); obj_tx_alloc.name = "obj_tx_alloc"; obj_tx_alloc.brief = "pmemobj_tx_alloc() benchmark"; obj_tx_alloc.init = obj_tx_alloc_init; obj_tx_alloc.exit = obj_tx_exit; obj_tx_alloc.multithread = true; obj_tx_alloc.multiops = true; obj_tx_alloc.init_worker = obj_tx_init_worker; obj_tx_alloc.free_worker = obj_tx_exit_worker; obj_tx_alloc.operation = obj_tx_op; obj_tx_alloc.measure_time = true; obj_tx_alloc.clos = obj_tx_clo; obj_tx_alloc.nclos = ARRAY_SIZE(obj_tx_clo) - 3; obj_tx_alloc.opts_size = sizeof(struct obj_tx_args); obj_tx_alloc.rm_file = true; obj_tx_alloc.allow_poolset = true; REGISTER_BENCHMARK(obj_tx_alloc); obj_tx_free.name = "obj_tx_free"; obj_tx_free.brief = "pmemobj_tx_free() benchmark"; obj_tx_free.init = obj_tx_free_init; obj_tx_free.exit = obj_tx_exit; obj_tx_free.multithread = true; obj_tx_free.multiops = true; obj_tx_free.init_worker = obj_tx_init_worker_alloc_obj; obj_tx_free.free_worker = obj_tx_exit_worker; obj_tx_free.operation = obj_tx_op; obj_tx_free.measure_time = true; obj_tx_free.clos = obj_tx_clo; obj_tx_free.nclos = ARRAY_SIZE(obj_tx_clo) - 3; obj_tx_free.opts_size = sizeof(struct obj_tx_args); obj_tx_free.rm_file = true; obj_tx_free.allow_poolset = true; REGISTER_BENCHMARK(obj_tx_free); obj_tx_realloc.name = "obj_tx_realloc"; obj_tx_realloc.brief = "pmemobj_tx_realloc() benchmark"; obj_tx_realloc.init = obj_tx_realloc_init; obj_tx_realloc.exit = obj_tx_realloc_exit; obj_tx_realloc.multithread = true; obj_tx_realloc.multiops = true; obj_tx_realloc.init_worker = obj_tx_init_worker_alloc_obj; obj_tx_realloc.free_worker = obj_tx_exit_worker; obj_tx_realloc.operation = obj_tx_op; obj_tx_realloc.measure_time = true; obj_tx_realloc.clos = obj_tx_clo; obj_tx_realloc.nclos = ARRAY_SIZE(obj_tx_clo); obj_tx_realloc.opts_size = sizeof(struct obj_tx_args); obj_tx_realloc.rm_file = true; obj_tx_realloc.allow_poolset = true; REGISTER_BENCHMARK(obj_tx_realloc); obj_tx_add_range.name = "obj_tx_add_range"; obj_tx_add_range.brief = "pmemobj_tx_add_range() benchmark"; obj_tx_add_range.init = obj_tx_add_range_init; obj_tx_add_range.exit = obj_tx_exit; obj_tx_add_range.multithread = true; obj_tx_add_range.multiops = false; obj_tx_add_range.init_worker = obj_tx_init_worker_alloc_obj; obj_tx_add_range.free_worker = obj_tx_exit_worker; obj_tx_add_range.operation = obj_tx_add_range_op; obj_tx_add_range.measure_time = true; obj_tx_add_range.clos = obj_tx_clo; obj_tx_add_range.nclos = ARRAY_SIZE(obj_tx_clo) - 5; obj_tx_add_range.opts_size = sizeof(struct obj_tx_args); obj_tx_add_range.rm_file = true; obj_tx_add_range.allow_poolset = true; REGISTER_BENCHMARK(obj_tx_add_range); }
34,848
27.309504
80
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/pmemobj_atomic_lists.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * pmemobj_atomic_lists.cpp -- benchmark for pmemobj atomic list API */ #include "benchmark.hpp" #include "file.h" #include "libpmemobj.h" #include "queue.h" #include <cassert> #include <cerrno> #include <cstdio> #include <cstdlib> #include <cstring> #include <fcntl.h> #include <unistd.h> #define FACTOR 8 #define LAYOUT_NAME "benchmark" struct obj_bench; struct obj_worker; struct element; TOID_DECLARE(struct item, 0); TOID_DECLARE(struct list, 1); typedef size_t (*fn_type_num_t)(size_t worker_idx, size_t op_idx); typedef struct element (*fn_position_t)(struct obj_worker *obj_worker, size_t op_idx); typedef int (*fn_init_t)(struct worker_info *worker, size_t n_elm, size_t list_len); /* * args -- stores command line parsed arguments. */ struct obj_list_args { char *type_num; /* type_number mode - one, per-thread, rand */ char *position; /* position - head, tail, middle, rand */ unsigned list_len; /* initial list length */ bool queue; /* use circle queue from <sys/queue.h> */ bool range; /* use random allocation size */ unsigned min_size; /* minimum random allocation size */ unsigned seed; /* seed value */ }; /* * obj_bench -- stores variables used in benchmark, passed within functions. */ static struct obj_bench { /* handle to persistent pool */ PMEMobjpool *pop; /* pointer to benchmark specific arguments */ struct obj_list_args *args; /* array to store random type_number values */ size_t *random_types; /* * fn_rpositions array stores random functions returning proper element * from list, if position where operation is performed is random. * Possible function which can be in array are: * - position_head, * - position_tail, * - position_middle. */ size_t *alloc_sizes; /* array to store random sizes of each object */ size_t max_len; /* maximum list length */ size_t min_len; /* initial list length */ int type_mode; /* type_number mode */ int position_mode; /* list destination mode */ /* * fn_type_num gets proper function assigned, depending on the * value of the type_mode argument, which returns proper type number for * each persistent object. Possible functions are: * - type_mode_one, * - type_mode_per_thread, * - type_mode_rand. */ fn_type_num_t fn_type_num; /* * fn_position gets proper function assigned, depending on the value * of the position argument, which returns handle to proper element on * the list. Possible functions are: * - position_head, * - position_tail, * - position_middle, * - position_rand. */ fn_position_t fn_position; /* * fn_init gets proper function assigned, depending on the file_io * flag, which allocates objects and initializes proper list. Possible * functions are: * - obj_init_list, * - queue_init_list. */ fn_init_t fn_init; } obj_bench; /* * item -- structure used to connect elements in lists. */ struct item { POBJ_LIST_ENTRY(struct item) field; PMDK_CIRCLEQ_ENTRY(item) fieldq; }; /* * element -- struct contains one item from list with proper type. */ struct element { struct item *itemq; TOID(struct item) itemp; bool before; }; /* * obj_worker -- stores variables used by one thread, concerning one list. */ struct obj_worker { /* head of the pmemobj list */ POBJ_LIST_HEAD(plist, struct item) head; /* head of the circular queue */ PMDK_CIRCLEQ_HEAD(qlist, item) headq; TOID(struct item) * oids; /* persistent pmemobj list elements */ struct item **items; /* volatile elements */ size_t n_elm; /* number of elements in array */ fn_position_t *fn_positions; /* element access functions */ struct element elm; /* pointer to current element */ /* * list_move is a pointer to structure storing variables used by * second list (used only for obj_move benchmark). */ struct obj_worker *list_move; }; /* * position_mode -- list destination type */ enum position_mode { /* object inserted/removed/moved to/from head of list */ POSITION_MODE_HEAD, /* object inserted/removed/moved to/from tail of list */ POSITION_MODE_TAIL, /* * object inserted/removed/moved to/from second element of the list * or to/from head if list length equal to one */ POSITION_MODE_MIDDLE, /* object inserted/removed/moved to/from head, tail or middle */ POSITION_MODE_RAND, POSITION_MODE_UNKNOWN, }; /* * type_mode -- type number type */ enum type_mode { TYPE_MODE_ONE, /* one type number for all of objects */ /* one type number for objects allocated by the same thread */ TYPE_MODE_PER_THREAD, TYPE_MODE_RAND, /* random type number for each object */ TYPE_MODE_UNKNOWN, }; /* * position_head -- returns head of the persistent list or volatile queue. */ static struct element position_head(struct obj_worker *obj_worker, size_t op_idx) { struct element head = {nullptr, OID_NULL, false}; head.before = true; if (!obj_bench.args->queue) head.itemp = POBJ_LIST_FIRST(&obj_worker->head); else head.itemq = PMDK_CIRCLEQ_FIRST(&obj_worker->headq); return head; } /* * position_tail -- returns tail of the persistent list or volatile queue. */ static struct element position_tail(struct obj_worker *obj_worker, size_t op_idx) { struct element tail = {nullptr, OID_NULL, false}; tail.before = false; if (!obj_bench.args->queue) tail.itemp = POBJ_LIST_LAST(&obj_worker->head, field); else tail.itemq = PMDK_CIRCLEQ_LAST(&obj_worker->headq); return tail; } /* * position_middle -- returns second or first element from the persistent list * or volatile queue. */ static struct element position_middle(struct obj_worker *obj_worker, size_t op_idx) { struct element elm = position_head(obj_worker, op_idx); elm.before = true; if (!obj_bench.args->queue) elm.itemp = POBJ_LIST_NEXT(elm.itemp, field); else elm.itemq = PMDK_CIRCLEQ_NEXT(elm.itemq, fieldq); return elm; } /* * position_rand -- returns first, second or last element from the persistent * list or volatile queue based on r_positions array. */ static struct element position_rand(struct obj_worker *obj_worker, size_t op_idx) { struct element elm; elm = obj_worker->fn_positions[op_idx](obj_worker, op_idx); elm.before = true; return elm; } /* * type_mode_one -- always returns 0, as in the mode TYPE_MODE_ONE * all of the persistent objects have the same type_number value. */ static size_t type_mode_one(size_t worker_idx, size_t op_idx) { return 0; } /* * type_mode_per_thread -- always returns the index of the worker, * as in the TYPE_MODE_PER_THREAD the value of the persistent object * type_number is specific to the thread. */ static size_t type_mode_per_thread(size_t worker_idx, size_t op_idx) { return worker_idx; } /* * type_mode_rand -- returns the value from the random_types array assigned * for the specific operation in a specific thread. */ static size_t type_mode_rand(size_t worker_idx, size_t op_idx) { return obj_bench.random_types[op_idx]; } const char *type_num_names[] = {"one", "per-thread", "rand"}; const char *position_names[] = {"head", "tail", "middle", "rand"}; static fn_type_num_t type_num_modes[] = {type_mode_one, type_mode_per_thread, type_mode_rand}; static fn_position_t positions[] = {position_head, position_tail, position_middle, position_rand}; /* function pointers randomly picked when using rand mode */ static fn_position_t rand_positions[] = {position_head, position_tail, position_middle}; /* * get_item -- common part of initial operation of the all benchmarks. * It gets pointer to element on the list where object will * be inserted/removed/moved to/from. */ static void get_item(struct benchmark *bench, struct operation_info *info) { auto *obj_worker = (struct obj_worker *)info->worker->priv; obj_worker->elm = obj_bench.fn_position(obj_worker, info->index); } /* * get_move_item -- special part of initial operation of the obj_move * benchmarks. It gets pointer to element on the list where object will be * inserted/removed/moved to/from. */ static void get_move_item(struct benchmark *bench, struct operation_info *info) { auto *obj_worker = (struct obj_worker *)info->worker->priv; obj_worker->list_move->elm = obj_bench.fn_position(obj_worker->list_move, info->index); get_item(bench, info); } /* * parse_args -- parse command line string argument */ static int parse_args(char *arg, int max, const char **names) { int i = 0; for (; i < max && strcmp(names[i], arg) != 0; i++) ; if (i == max) fprintf(stderr, "Invalid argument\n"); return i; } /* * obj_init_list -- special part of worker initialization, performed only if * queue flag set false. Allocates proper number of items, and inserts proper * part of them to the pmemobj list. */ static int obj_init_list(struct worker_info *worker, size_t n_oids, size_t list_len) { size_t i; auto *obj_worker = (struct obj_worker *)worker->priv; obj_worker->oids = (TOID(struct item) *)calloc(n_oids, sizeof(TOID(struct item))); if (obj_worker->oids == nullptr) { perror("calloc"); return -1; } for (i = 0; i < n_oids; i++) { size_t type_num = obj_bench.fn_type_num(worker->index, i); size_t size = obj_bench.alloc_sizes[i]; auto *tmp = (PMEMoid *)&obj_worker->oids[i]; if (pmemobj_alloc(obj_bench.pop, tmp, size, type_num, nullptr, nullptr) != 0) goto err_oids; } for (i = 0; i < list_len; i++) POBJ_LIST_INSERT_TAIL(obj_bench.pop, &obj_worker->head, obj_worker->oids[i], field); return 0; err_oids: for (; i > 0; i--) POBJ_FREE(&obj_worker->oids[i - 1]); free(obj_worker->oids); return -1; } /* * queue_init_list -- special part of worker initialization, performed only if * queue flag set. Initiates circle queue, allocates proper number of items and * inserts proper part of them to the queue. */ static int queue_init_list(struct worker_info *worker, size_t n_items, size_t list_len) { size_t i; auto *obj_worker = (struct obj_worker *)worker->priv; PMDK_CIRCLEQ_INIT(&obj_worker->headq); obj_worker->items = (struct item **)malloc(n_items * sizeof(struct item *)); if (obj_worker->items == nullptr) { perror("malloc"); return -1; } for (i = 0; i < n_items; i++) { size_t size = obj_bench.alloc_sizes[i]; obj_worker->items[i] = (struct item *)calloc(1, size); if (obj_worker->items[i] == nullptr) { perror("calloc"); goto err; } } for (i = 0; i < list_len; i++) PMDK_CIRCLEQ_INSERT_TAIL(&obj_worker->headq, obj_worker->items[i], fieldq); return 0; err: for (; i > 0; i--) free(obj_worker->items[i - 1]); free(obj_worker->items); return -1; } /* * queue_free_worker_list -- special part for the worker de-initialization when * queue flag is true. Releases items directly from atomic list. */ static void queue_free_worker_list(struct obj_worker *obj_worker) { while (!PMDK_CIRCLEQ_EMPTY(&obj_worker->headq)) { struct item *tmp = PMDK_CIRCLEQ_LAST(&obj_worker->headq); PMDK_CIRCLEQ_REMOVE(&obj_worker->headq, tmp, fieldq); free(tmp); } free(obj_worker->items); } /* * obj_free_worker_list -- special part for the worker de-initialization when * queue flag is false. Releases items directly from atomic list. */ static void obj_free_worker_list(struct obj_worker *obj_worker) { while (!POBJ_LIST_EMPTY(&obj_worker->head)) { TOID(struct item) tmp = POBJ_LIST_FIRST(&obj_worker->head); POBJ_LIST_REMOVE_FREE(obj_bench.pop, &obj_worker->head, tmp, field); } free(obj_worker->oids); } /* * obj_free_worker_items -- special part for the worker de-initialization when * queue flag is false. Releases items used for create pmemobj list. */ static void obj_free_worker_items(struct obj_worker *obj_worker) { for (size_t i = 0; i < obj_worker->n_elm; i++) POBJ_FREE(&obj_worker->oids[i]); free(obj_worker->oids); } /* * queue_free_worker_items -- special part for the worker de-initialization * when queue flag set. Releases used for create circle queue. */ static void queue_free_worker_items(struct obj_worker *obj_worker) { for (size_t i = 0; i < obj_worker->n_elm; i++) free(obj_worker->items[i]); free(obj_worker->items); } /* * random_positions -- allocates array and calculates random values for * defining positions where each operation will be performed. Used only * in POSITION_MODE_RAND */ static fn_position_t * random_positions(void) { auto *positions = (fn_position_t *)calloc(obj_bench.max_len, sizeof(fn_position_t)); if (positions == nullptr) { perror("calloc"); return nullptr; } if (obj_bench.args->seed != 0) srand(obj_bench.args->seed); size_t rmax = ARRAY_SIZE(rand_positions); for (size_t i = 0; i < obj_bench.max_len; i++) { size_t id = RRAND(rmax, 0); positions[i] = rand_positions[id]; } return positions; } /* * rand_values -- allocates array and if range mode calculates random * values as allocation sizes for each object otherwise populates whole array * with max value. Used only when range flag set. */ static size_t * random_values(size_t min, size_t max, size_t n_ops, size_t min_range) { auto *randoms = (size_t *)calloc(n_ops, sizeof(size_t)); if (randoms == nullptr) { perror("calloc"); return nullptr; } for (size_t i = 0; i < n_ops; i++) randoms[i] = max; if (min > min_range) { if (min > max) { fprintf(stderr, "Invalid size\n"); free(randoms); return nullptr; } for (size_t i = 0; i < n_ops; i++) randoms[i] = RRAND(max, min); } return randoms; } /* * queue_insert_op -- main operations of the obj_insert benchmark when queue * flag set to true. */ static int queue_insert_op(struct operation_info *info) { auto *obj_worker = (struct obj_worker *)info->worker->priv; PMDK_CIRCLEQ_INSERT_AFTER( &obj_worker->headq, obj_worker->elm.itemq, obj_worker->items[info->index + obj_bench.min_len], fieldq); return 0; } /* * obj_insert_op -- main operations of the obj_insert benchmark when queue flag * set to false. */ static int obj_insert_op(struct operation_info *info) { auto *obj_worker = (struct obj_worker *)info->worker->priv; POBJ_LIST_INSERT_AFTER( obj_bench.pop, &obj_worker->head, obj_worker->elm.itemp, obj_worker->oids[info->index + obj_bench.min_len], field); return 0; } /* * queue_remove_op -- main operations of the obj_remove benchmark when queue * flag set to true. */ static int queue_remove_op(struct operation_info *info) { auto *obj_worker = (struct obj_worker *)info->worker->priv; PMDK_CIRCLEQ_REMOVE(&obj_worker->headq, obj_worker->elm.itemq, fieldq); return 0; } /* * obj_remove_op -- main operations of the obj_remove benchmark when queue flag * set to false. */ static int obj_remove_op(struct operation_info *info) { auto *obj_worker = (struct obj_worker *)info->worker->priv; POBJ_LIST_REMOVE(obj_bench.pop, &obj_worker->head, obj_worker->elm.itemp, field); return 0; } /* * insert_op -- main operations of the obj_insert benchmark. */ static int insert_op(struct benchmark *bench, struct operation_info *info) { get_item(bench, info); return obj_bench.args->queue ? queue_insert_op(info) : obj_insert_op(info); } /* * obj_insert_new_op -- main operations of the obj_insert_new benchmark. */ static int obj_insert_new_op(struct benchmark *bench, struct operation_info *info) { get_item(bench, info); auto *obj_worker = (struct obj_worker *)info->worker->priv; PMEMoid tmp; size_t size = obj_bench.alloc_sizes[info->index]; size_t type_num = obj_bench.fn_type_num(info->worker->index, info->index); tmp = pmemobj_list_insert_new( obj_bench.pop, offsetof(struct item, field), &obj_worker->head, obj_worker->elm.itemp.oid, obj_worker->elm.before, size, type_num, nullptr, nullptr); if (OID_IS_NULL(tmp)) { perror("pmemobj_list_insert_new"); return -1; } return 0; } /* * remove_op -- main operations of the obj_remove benchmark. */ static int remove_op(struct benchmark *bench, struct operation_info *info) { get_item(bench, info); return obj_bench.args->queue ? queue_remove_op(info) : obj_remove_op(info); } /* * obj_remove_free_op -- main operation of the obj_remove_free benchmark. */ static int obj_remove_free_op(struct benchmark *bench, struct operation_info *info) { get_item(bench, info); auto *obj_worker = (struct obj_worker *)info->worker->priv; POBJ_LIST_REMOVE_FREE(obj_bench.pop, &obj_worker->head, obj_worker->elm.itemp, field); return 0; } /* * obj_move_op -- main operation of the obj_move benchmark. */ static int obj_move_op(struct benchmark *bench, struct operation_info *info) { get_move_item(bench, info); auto *obj_worker = (struct obj_worker *)info->worker->priv; POBJ_LIST_MOVE_ELEMENT_BEFORE(obj_bench.pop, &obj_worker->head, &obj_worker->list_move->head, obj_worker->list_move->elm.itemp, obj_worker->elm.itemp, field, field); return 0; } /* * free_worker -- free common worker state */ static void free_worker(struct obj_worker *obj_worker) { if (obj_bench.position_mode == POSITION_MODE_RAND) free(obj_worker->fn_positions); free(obj_worker); } /* * free_worker_list -- worker de-initialization function for: obj_insert_new, * obj_remove_free, obj_move. Requires releasing objects directly from list. */ static void free_worker_list(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { auto *obj_worker = (struct obj_worker *)worker->priv; obj_bench.args->queue ? queue_free_worker_list(obj_worker) : obj_free_worker_list(obj_worker); free_worker(obj_worker); } /* * obj_free_worker_items -- worker de-initialization function of obj_insert and * obj_remove benchmarks, where deallocation can't be performed directly on the * list and where is possibility of using queue flag. */ static void free_worker_items(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { auto *obj_worker = (struct obj_worker *)worker->priv; auto *obj_args = (struct obj_list_args *)args->opts; obj_args->queue ? queue_free_worker_items(obj_worker) : obj_free_worker_items(obj_worker); free_worker(obj_worker); } /* * obj_move_free_worker -- special part for the worker de-initialization * function of obj_move benchmarks. */ static void obj_move_free_worker(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { auto *obj_worker = (struct obj_worker *)worker->priv; while (!POBJ_LIST_EMPTY(&obj_worker->list_move->head)) POBJ_LIST_REMOVE_FREE( obj_bench.pop, &obj_worker->list_move->head, POBJ_LIST_LAST(&obj_worker->list_move->head, field), field); if (obj_bench.position_mode == POSITION_MODE_RAND) free(obj_worker->list_move->fn_positions); free(obj_worker->list_move); free_worker_list(bench, args, worker); } /* * obj_init_worker -- common part for the worker initialization for: * obj_insert, obj_insert_new, obj_remove obj_remove_free and obj_move. */ static int obj_init_worker(struct worker_info *worker, size_t n_elm, size_t list_len) { auto *obj_worker = (struct obj_worker *)calloc(1, sizeof(struct obj_worker)); if (obj_worker == nullptr) { perror("calloc"); return -1; } worker->priv = obj_worker; obj_worker->n_elm = obj_bench.max_len; obj_worker->list_move = nullptr; if (obj_bench.position_mode == POSITION_MODE_RAND) { obj_worker->fn_positions = random_positions(); if (obj_worker->fn_positions == nullptr) goto err; } if (obj_bench.fn_init(worker, n_elm, list_len) != 0) goto err_positions; return 0; err_positions: free(obj_worker->fn_positions); err: free(obj_worker); return -1; } /* * obj_insert_init_worker -- worker initialization functions of the obj_insert * benchmark. */ static int obj_insert_init_worker(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { return obj_init_worker(worker, obj_bench.max_len, obj_bench.min_len); } /* * obj_insert_new_init_worker -- worker initialization functions of the * obj_insert_new benchmark. */ static int obj_insert_new_init_worker(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { return obj_init_worker(worker, obj_bench.min_len, obj_bench.min_len); } /* * obj_remove_init_worker -- worker initialization functions of the obj_remove * and obj_remove_free benchmarks. */ static int obj_remove_init_worker(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { return obj_init_worker(worker, obj_bench.max_len, obj_bench.max_len); } /* * obj_move_init_worker -- worker initialization functions of the obj_move * benchmark. */ static int obj_move_init_worker(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { if (obj_init_worker(worker, obj_bench.max_len, obj_bench.max_len) != 0) return -1; auto *obj_worker = (struct obj_worker *)worker->priv; obj_worker->list_move = (struct obj_worker *)calloc(1, sizeof(struct obj_worker)); if (obj_worker->list_move == nullptr) { perror("calloc"); goto free; } size_t i; if (obj_bench.position_mode == POSITION_MODE_RAND) { obj_worker->list_move->fn_positions = random_positions(); if (obj_worker->list_move->fn_positions == nullptr) goto free_list_move; } for (i = 0; i < obj_bench.min_len; i++) { size_t size = obj_bench.alloc_sizes[i]; POBJ_LIST_INSERT_NEW_TAIL(obj_bench.pop, &obj_worker->list_move->head, field, size, nullptr, nullptr); if (TOID_IS_NULL(POBJ_LIST_LAST(&obj_worker->list_move->head, field))) { perror("pmemobj_list_insert_new"); goto free_all; } } return 0; free_all: for (; i > 0; i--) { POBJ_LIST_REMOVE_FREE( obj_bench.pop, &obj_worker->list_move->head, POBJ_LIST_LAST(&obj_worker->list_move->head, field), field); } free(obj_worker->list_move->fn_positions); free_list_move: free(obj_worker->list_move); free: free_worker_list(bench, args, worker); return -1; } /* * obj_init - common part of the benchmark initialization for: obj_insert, * obj_insert_new, obj_remove, obj_remove_free and obj_move used in their init * functions. Parses command line arguments, sets variables and * creates persistent pool. */ static int obj_init(struct benchmark *bench, struct benchmark_args *args) { assert(bench != nullptr); assert(args != nullptr); assert(args->opts != nullptr); enum file_type type = util_file_get_type(args->fname); if (type == OTHER_ERROR) { fprintf(stderr, "could not check type of file %s\n", args->fname); return -1; } obj_bench.args = (struct obj_list_args *)args->opts; obj_bench.min_len = obj_bench.args->list_len + 1; obj_bench.max_len = args->n_ops_per_thread + obj_bench.min_len; obj_bench.fn_init = obj_bench.args->queue ? queue_init_list : obj_init_list; /* Decide if use random or state allocation sizes */ size_t obj_size = args->dsize < sizeof(struct item) ? sizeof(struct item) : args->dsize; size_t min_size = obj_bench.args->min_size < sizeof(struct item) ? sizeof(struct item) : obj_bench.args->min_size; obj_bench.alloc_sizes = random_values( min_size, obj_size, obj_bench.max_len, sizeof(struct item)); if (obj_bench.alloc_sizes == nullptr) goto free_random_types; /* Decide where operations will be performed */ obj_bench.position_mode = parse_args(obj_bench.args->position, POSITION_MODE_UNKNOWN, position_names); if (obj_bench.position_mode == POSITION_MODE_UNKNOWN) goto free_all; obj_bench.fn_position = positions[obj_bench.position_mode]; if (!obj_bench.args->queue) { /* Decide what type number will be used */ obj_bench.type_mode = parse_args(obj_bench.args->type_num, TYPE_MODE_UNKNOWN, type_num_names); if (obj_bench.type_mode == TYPE_MODE_UNKNOWN) return -1; obj_bench.fn_type_num = type_num_modes[obj_bench.type_mode]; if (obj_bench.type_mode == TYPE_MODE_RAND) { obj_bench.random_types = random_values( 1, UINT32_MAX, obj_bench.max_len, 0); if (obj_bench.random_types == nullptr) return -1; } /* * Multiplication by FACTOR prevents from out of memory error * as the actual size of the allocated persistent objects * is always larger than requested. */ size_t psize = (args->n_ops_per_thread + obj_bench.min_len + 1) * obj_size * args->n_threads * FACTOR; if (args->is_poolset || type == TYPE_DEVDAX) { if (args->fsize < psize) { fprintf(stderr, "file size too large\n"); goto free_all; } psize = 0; } else if (psize < PMEMOBJ_MIN_POOL) { psize = PMEMOBJ_MIN_POOL; } /* Create pmemobj pool. */ if ((obj_bench.pop = pmemobj_create(args->fname, LAYOUT_NAME, psize, args->fmode)) == nullptr) { perror(pmemobj_errormsg()); goto free_all; } } return 0; free_all: free(obj_bench.alloc_sizes); free_random_types: if (obj_bench.type_mode == TYPE_MODE_RAND) free(obj_bench.random_types); return -1; } /* * obj_exit -- common part for the exit function for: obj_insert, * obj_insert_new, obj_remove, obj_remove_free and obj_move used in their exit * functions. */ static int obj_exit(struct benchmark *bench, struct benchmark_args *args) { if (!obj_bench.args->queue) { pmemobj_close(obj_bench.pop); if (obj_bench.type_mode == TYPE_MODE_RAND) free(obj_bench.random_types); } free(obj_bench.alloc_sizes); return 0; } /* obj_list_clo -- array defining common command line arguments. */ static struct benchmark_clo obj_list_clo[6]; static struct benchmark_info obj_insert; static struct benchmark_info obj_remove; static struct benchmark_info obj_insert_new; static struct benchmark_info obj_remove_free; static struct benchmark_info obj_move; CONSTRUCTOR(pmem_atomic_list_constructor) void pmem_atomic_list_constructor(void) { obj_list_clo[0].opt_short = 'T'; obj_list_clo[0].opt_long = "type-number"; obj_list_clo[0].descr = "Type number mode - one, per-thread, " "rand"; obj_list_clo[0].def = "one"; obj_list_clo[0].off = clo_field_offset(struct obj_list_args, type_num); obj_list_clo[0].type = CLO_TYPE_STR; obj_list_clo[1].opt_short = 'P'; obj_list_clo[1].opt_long = "position"; obj_list_clo[1].descr = "Place where operation will be " "performed - head, tail, rand, middle"; obj_list_clo[1].def = "middle"; obj_list_clo[1].off = clo_field_offset(struct obj_list_args, position); obj_list_clo[1].type = CLO_TYPE_STR; obj_list_clo[2].opt_short = 'l'; obj_list_clo[2].opt_long = "list-len"; obj_list_clo[2].type = CLO_TYPE_UINT; obj_list_clo[2].descr = "Initial list len"; obj_list_clo[2].off = clo_field_offset(struct obj_list_args, list_len); obj_list_clo[2].def = "1"; obj_list_clo[2].type_uint.size = clo_field_size(struct obj_list_args, list_len); obj_list_clo[2].type_uint.base = CLO_INT_BASE_DEC | CLO_INT_BASE_HEX; obj_list_clo[2].type_uint.min = 1; obj_list_clo[2].type_uint.max = ULONG_MAX; obj_list_clo[3].opt_short = 'm'; obj_list_clo[3].opt_long = "min-size"; obj_list_clo[3].type = CLO_TYPE_UINT; obj_list_clo[3].descr = "Min allocation size"; obj_list_clo[3].off = clo_field_offset(struct obj_list_args, min_size); obj_list_clo[3].def = "0"; obj_list_clo[3].type_uint.size = clo_field_size(struct obj_list_args, min_size); obj_list_clo[3].type_uint.base = CLO_INT_BASE_DEC; obj_list_clo[3].type_uint.min = 0; obj_list_clo[3].type_uint.max = UINT_MAX; obj_list_clo[4].opt_short = 's'; obj_list_clo[4].type_uint.max = INT_MAX; obj_list_clo[4].opt_long = "seed"; obj_list_clo[4].type = CLO_TYPE_UINT; obj_list_clo[4].descr = "Seed value"; obj_list_clo[4].off = clo_field_offset(struct obj_list_args, seed); obj_list_clo[4].def = "0"; obj_list_clo[4].type_uint.size = clo_field_size(struct obj_list_args, seed); obj_list_clo[4].type_uint.base = CLO_INT_BASE_DEC; obj_list_clo[4].type_uint.min = 0; /* * nclos field in benchmark_info structures is decremented to make * queue option available only for obj_isert, obj_remove */ obj_list_clo[5].opt_short = 'q'; obj_list_clo[5].opt_long = "queue"; obj_list_clo[5].descr = "Use circleq from queue.h instead " "pmemobj"; obj_list_clo[5].type = CLO_TYPE_FLAG; obj_list_clo[5].off = clo_field_offset(struct obj_list_args, queue); obj_insert.name = "obj_insert"; obj_insert.brief = "pmemobj_list_insert() benchmark"; obj_insert.init = obj_init; obj_insert.exit = obj_exit; obj_insert.multithread = true; obj_insert.multiops = true; obj_insert.init_worker = obj_insert_init_worker; obj_insert.free_worker = free_worker_items; obj_insert.operation = insert_op; obj_insert.measure_time = true; obj_insert.clos = obj_list_clo; obj_insert.nclos = ARRAY_SIZE(obj_list_clo); obj_insert.opts_size = sizeof(struct obj_list_args); obj_insert.rm_file = true; obj_insert.allow_poolset = true; REGISTER_BENCHMARK(obj_insert); obj_remove.name = "obj_remove"; obj_remove.brief = "pmemobj_list_remove() benchmark " "without freeing element"; obj_remove.init = obj_init; obj_remove.exit = obj_exit; obj_remove.multithread = true; obj_remove.multiops = true; obj_remove.init_worker = obj_remove_init_worker; obj_remove.free_worker = free_worker_items; obj_remove.operation = remove_op; obj_remove.measure_time = true; obj_remove.clos = obj_list_clo; obj_remove.nclos = ARRAY_SIZE(obj_list_clo); obj_remove.opts_size = sizeof(struct obj_list_args); obj_remove.rm_file = true; obj_remove.allow_poolset = true; REGISTER_BENCHMARK(obj_remove); obj_insert_new.name = "obj_insert_new"; obj_insert_new.brief = "pmemobj_list_insert_new() benchmark"; obj_insert_new.init = obj_init; obj_insert_new.exit = obj_exit; obj_insert_new.multithread = true; obj_insert_new.multiops = true; obj_insert_new.init_worker = obj_insert_new_init_worker; obj_insert_new.free_worker = free_worker_list; obj_insert_new.operation = obj_insert_new_op; obj_insert_new.measure_time = true; obj_insert_new.clos = obj_list_clo; obj_insert_new.nclos = ARRAY_SIZE(obj_list_clo) - 1; obj_insert_new.opts_size = sizeof(struct obj_list_args); obj_insert_new.rm_file = true; obj_insert_new.allow_poolset = true; REGISTER_BENCHMARK(obj_insert_new); obj_remove_free.name = "obj_remove_free"; obj_remove_free.brief = "pmemobj_list_remove() benchmark " "with freeing element"; obj_remove_free.init = obj_init; obj_remove_free.exit = obj_exit; obj_remove_free.multithread = true; obj_remove_free.multiops = true; obj_remove_free.init_worker = obj_remove_init_worker; obj_remove_free.free_worker = free_worker_list; obj_remove_free.operation = obj_remove_free_op; obj_remove_free.measure_time = true; obj_remove_free.clos = obj_list_clo; obj_remove_free.nclos = ARRAY_SIZE(obj_list_clo) - 1; obj_remove_free.opts_size = sizeof(struct obj_list_args); obj_remove_free.rm_file = true; obj_remove_free.allow_poolset = true; REGISTER_BENCHMARK(obj_remove_free); obj_move.name = "obj_move"; obj_move.brief = "pmemobj_list_move() benchmark"; obj_move.init = obj_init; obj_move.exit = obj_exit; obj_move.multithread = true; obj_move.multiops = true; obj_move.init_worker = obj_move_init_worker; obj_move.free_worker = obj_move_free_worker; obj_move.operation = obj_move_op; obj_move.measure_time = true; obj_move.clos = obj_list_clo; obj_move.nclos = ARRAY_SIZE(obj_list_clo) - 1; obj_move.opts_size = sizeof(struct obj_list_args); obj_move.rm_file = true; obj_move.allow_poolset = true; REGISTER_BENCHMARK(obj_move); }
31,463
27.734247
80
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/config_reader.hpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * config_reader.hpp -- config reader module declarations */ struct config_reader; struct config_reader *config_reader_alloc(void); int config_reader_read(struct config_reader *cr, const char *fname); void config_reader_free(struct config_reader *cr); int config_reader_get_scenarios(struct config_reader *cr, struct scenarios **scenarios);
436
32.615385
68
hpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/benchmark_worker.hpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * benchmark_worker.hpp -- benchmark_worker module declarations */ #include "benchmark.hpp" #include "os_thread.h" /* * * The following table shows valid state transitions upon specified * API calls and operations performed by the worker thread: * * +========================+==========================+=============+ * | Application | State | Worker | * +========================+==========================+=============+ * | benchmark_worker_alloc | WORKER_STATE_IDLE | wait | * +------------------------+--------------------------+-------------+ * | benchmark_worker_init | WORKER_STATE_INIT | invoke init | * +------------------------+--------------------------+-------------+ * | wait | WORKER_STATE_INITIALIZED | end of init | * +------------------------+--------------------------+-------------+ * | benchmark_worker_run | WORKER_STATE_RUN | invoke func | * +------------------------+--------------------------+-------------+ * | benchmark_worker_join | WORKER_STATE_END | end of func | * +------------------------+--------------------------+-------------+ * | benchmark_worker_exit | WORKER_STATE_EXIT | invoke exit | * +------------------------+--------------------------+-------------+ * | wait | WORKER_STATE_DONE | end of exit | * +------------------------+--------------------------+-------------+ */ enum benchmark_worker_state { WORKER_STATE_IDLE, WORKER_STATE_INIT, WORKER_STATE_INITIALIZED, WORKER_STATE_RUN, WORKER_STATE_END, WORKER_STATE_EXIT, WORKER_STATE_DONE, MAX_WORKER_STATE, }; struct benchmark_worker { os_thread_t thread; struct benchmark *bench; struct benchmark_args *args; struct worker_info info; int ret; int ret_init; int (*func)(struct benchmark *bench, struct worker_info *info); int (*init)(struct benchmark *bench, struct benchmark_args *args, struct worker_info *info); void (*exit)(struct benchmark *bench, struct benchmark_args *args, struct worker_info *info); os_cond_t cond; os_mutex_t lock; enum benchmark_worker_state state; }; struct benchmark_worker *benchmark_worker_alloc(void); void benchmark_worker_free(struct benchmark_worker *); int benchmark_worker_init(struct benchmark_worker *); void benchmark_worker_exit(struct benchmark_worker *); int benchmark_worker_run(struct benchmark_worker *); int benchmark_worker_join(struct benchmark_worker *);
2,576
36.897059
70
hpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/clo_vec.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * clo_vec.cpp -- command line options vector definitions */ #include <cassert> #include <cstdlib> #include <cstring> #include "clo_vec.hpp" /* * clo_vec_alloc -- allocate new CLO vector */ struct clo_vec * clo_vec_alloc(size_t size) { struct clo_vec *clovec = (struct clo_vec *)malloc(sizeof(*clovec)); assert(clovec != nullptr); /* init list of arguments and allocations */ PMDK_TAILQ_INIT(&clovec->allocs); PMDK_TAILQ_INIT(&clovec->args); clovec->nallocs = 0; /* size of each struct */ clovec->size = size; /* add first struct to list */ struct clo_vec_args *args = (struct clo_vec_args *)malloc(sizeof(*args)); assert(args != nullptr); args->args = calloc(1, size); assert(args->args != nullptr); PMDK_TAILQ_INSERT_TAIL(&clovec->args, args, next); clovec->nargs = 1; return clovec; } /* * clo_vec_free -- free CLO vector and all allocations */ void clo_vec_free(struct clo_vec *clovec) { assert(clovec != nullptr); /* free all allocations */ while (!PMDK_TAILQ_EMPTY(&clovec->allocs)) { struct clo_vec_alloc *alloc = PMDK_TAILQ_FIRST(&clovec->allocs); PMDK_TAILQ_REMOVE(&clovec->allocs, alloc, next); free(alloc->ptr); free(alloc); } /* free all arguments */ while (!PMDK_TAILQ_EMPTY(&clovec->args)) { struct clo_vec_args *args = PMDK_TAILQ_FIRST(&clovec->args); PMDK_TAILQ_REMOVE(&clovec->args, args, next); free(args->args); free(args); } free(clovec); } /* * clo_vec_get_args -- return pointer to CLO arguments at specified index */ void * clo_vec_get_args(struct clo_vec *clovec, size_t i) { if (i >= clovec->nargs) return nullptr; size_t c = 0; struct clo_vec_args *args; PMDK_TAILQ_FOREACH(args, &clovec->args, next) { if (c == i) return args->args; c++; } return nullptr; } /* * clo_vec_add_alloc -- add allocation to CLO vector */ int clo_vec_add_alloc(struct clo_vec *clovec, void *ptr) { struct clo_vec_alloc *alloc = (struct clo_vec_alloc *)malloc(sizeof(*alloc)); assert(alloc != nullptr); alloc->ptr = ptr; PMDK_TAILQ_INSERT_TAIL(&clovec->allocs, alloc, next); clovec->nallocs++; return 0; } /* * clo_vec_grow -- (internal) grow in size the CLO vector */ static void clo_vec_grow(struct clo_vec *clovec, size_t new_len) { size_t nargs = new_len - clovec->nargs; size_t i; for (i = 0; i < nargs; i++) { struct clo_vec_args *args = (struct clo_vec_args *)calloc(1, sizeof(*args)); assert(args != nullptr); PMDK_TAILQ_INSERT_TAIL(&clovec->args, args, next); args->args = malloc(clovec->size); assert(args->args != nullptr); void *argscpy = clo_vec_get_args(clovec, i % clovec->nargs); assert(argscpy != nullptr); memcpy(args->args, argscpy, clovec->size); } clovec->nargs = new_len; } /* * clo_vec_vlist_alloc -- allocate list of values */ struct clo_vec_vlist * clo_vec_vlist_alloc(void) { struct clo_vec_vlist *list = (struct clo_vec_vlist *)malloc(sizeof(*list)); assert(list != nullptr); list->nvalues = 0; PMDK_TAILQ_INIT(&list->head); return list; } /* * clo_vec_vlist_free -- release list of values */ void clo_vec_vlist_free(struct clo_vec_vlist *list) { assert(list != nullptr); while (!PMDK_TAILQ_EMPTY(&list->head)) { struct clo_vec_value *val = PMDK_TAILQ_FIRST(&list->head); PMDK_TAILQ_REMOVE(&list->head, val, next); free(val->ptr); free(val); } free(list); } /* * clo_vec_vlist_add -- add value to list */ void clo_vec_vlist_add(struct clo_vec_vlist *list, void *ptr, size_t size) { struct clo_vec_value *val = (struct clo_vec_value *)malloc(sizeof(*val)); assert(val != nullptr); val->ptr = malloc(size); assert(val->ptr != nullptr); memcpy(val->ptr, ptr, size); PMDK_TAILQ_INSERT_TAIL(&list->head, val, next); list->nvalues++; } /* * clo_vec_memcpy -- copy value to CLO vector * * - clovec - CLO vector * - off - offset to value in structure * - size - size of value field * - ptr - pointer to value */ int clo_vec_memcpy(struct clo_vec *clovec, size_t off, size_t size, void *ptr) { if (off + size > clovec->size) return -1; size_t i; for (i = 0; i < clovec->nargs; i++) { auto *args = (char *)clo_vec_get_args(clovec, i); char *dptr = args + off; memcpy(dptr, ptr, size); } return 0; } /* * clo_vec_memcpy_list -- copy values from list to CLO vector * * - clovec - CLO vector * - off - offset to value in structure * - size - size of value field * - list - list of values */ int clo_vec_memcpy_list(struct clo_vec *clovec, size_t off, size_t size, struct clo_vec_vlist *list) { if (off + size > clovec->size) return -1; size_t len = clovec->nargs; if (list->nvalues > 1) clo_vec_grow(clovec, clovec->nargs * list->nvalues); struct clo_vec_value *value; size_t value_i = 0; size_t i; PMDK_TAILQ_FOREACH(value, &list->head, next) { for (i = value_i * len; i < (value_i + 1) * len; i++) { auto *args = (char *)clo_vec_get_args(clovec, i); char *dptr = args + off; memcpy(dptr, value->ptr, size); } value_i++; } return 0; }
5,100
19.322709
74
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/poolset_util.hpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018, Intel Corporation */ /* * poolset_util.hpp -- this file provides interface for creating * poolsets of specified size */ #ifndef POOLSET_UTIL_HPP #define POOLSET_UTIL_HPP #include <stddef.h> #define POOLSET_PATH "pool.set" int dynamic_poolset_create(const char *path, size_t size); #endif
356
18.833333
64
hpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/scenario.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * scenario.cpp -- scenario module definitions */ #include <cassert> #include <cstdlib> #include <cstring> #include "queue.h" #include "scenario.hpp" /* * kv_alloc -- allocate key/value structure */ struct kv * kv_alloc(const char *key, const char *value) { struct kv *kv = (struct kv *)malloc(sizeof(*kv)); assert(kv != nullptr); kv->key = strdup(key); assert(kv->key != nullptr); kv->value = strdup(value); assert(kv->value != nullptr); return kv; } /* * kv_free -- free the key/value structure */ void kv_free(struct kv *kv) { assert(kv != nullptr); free(kv->key); free(kv->value); free(kv); } /* * scenario_alloc -- allocate scenario structure */ struct scenario * scenario_alloc(const char *name, const char *bench) { struct scenario *s = (struct scenario *)malloc(sizeof(*s)); assert(s != nullptr); PMDK_TAILQ_INIT(&s->head); s->name = strdup(name); assert(s->name != nullptr); s->benchmark = strdup(bench); assert(s->benchmark != nullptr); s->group = nullptr; return s; } /* * scenario_free -- free the scenario structure and all its content */ void scenario_free(struct scenario *s) { assert(s != nullptr); while (!PMDK_TAILQ_EMPTY(&s->head)) { struct kv *kv = PMDK_TAILQ_FIRST(&s->head); PMDK_TAILQ_REMOVE(&s->head, kv, next); kv_free(kv); } free(s->group); free(s->name); free(s->benchmark); free(s); } /* * scenario_set_group -- set group of scenario */ void scenario_set_group(struct scenario *s, const char *group) { assert(s != nullptr); s->group = strdup(group); } /* * scenarios_alloc -- allocate scenarios structure */ struct scenarios * scenarios_alloc(void) { struct scenarios *scenarios = (struct scenarios *)malloc(sizeof(*scenarios)); assert(nullptr != scenarios); PMDK_TAILQ_INIT(&scenarios->head); return scenarios; } /* * scenarios_free -- free scenarios structure and all its content */ void scenarios_free(struct scenarios *scenarios) { assert(scenarios != nullptr); while (!PMDK_TAILQ_EMPTY(&scenarios->head)) { struct scenario *sce = PMDK_TAILQ_FIRST(&scenarios->head); PMDK_TAILQ_REMOVE(&scenarios->head, sce, next); scenario_free(sce); } free(scenarios); } /* * scenarios_get_scenario -- get scenario of given name */ struct scenario * scenarios_get_scenario(struct scenarios *ss, const char *name) { struct scenario *scenario; FOREACH_SCENARIO(scenario, ss) { if (strcmp(scenario->name, name) == 0) return scenario; } return nullptr; } /* * contains_scenarios -- check if cmd line args contain any scenarios from ss */ bool contains_scenarios(int argc, char **argv, struct scenarios *ss) { assert(argv != nullptr); assert(argc > 0); assert(ss != nullptr); for (int i = 0; i < argc; i++) { if (scenarios_get_scenario(ss, argv[i])) return true; } return false; } /* * clone_scenario -- alloc a new scenario and copy all data from src scenario */ struct scenario * clone_scenario(struct scenario *src_scenario) { assert(src_scenario != nullptr); struct scenario *new_scenario = scenario_alloc(src_scenario->name, src_scenario->benchmark); assert(new_scenario != nullptr); struct kv *src_kv; FOREACH_KV(src_kv, src_scenario) { struct kv *new_kv = kv_alloc(src_kv->key, src_kv->value); assert(new_kv != nullptr); PMDK_TAILQ_INSERT_TAIL(&new_scenario->head, new_kv, next); } return new_scenario; } /* * find_kv_in_scenario - find a kv in the given scenario with the given key * value. Function returns the pointer to the kv structure containing the key or * nullptr if it is not found */ struct kv * find_kv_in_scenario(const char *key, const struct scenario *scenario) { struct kv *kv; FOREACH_KV(kv, scenario) { if (strcmp(kv->key, key) == 0) return kv; } return nullptr; }
3,844
18.419192
80
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/config_reader.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * config_reader.cpp -- config reader module definitions */ #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <glib.h> #include <sys/queue.h> #include "config_reader.hpp" #include "scenario.hpp" #define SECTION_GLOBAL "global" #define KEY_BENCHMARK "bench" #define KEY_GROUP "group" /* * config_reader -- handle structure */ struct config_reader { GKeyFile *key_file; }; /* * config_reader_alloc -- allocate config reader */ struct config_reader * config_reader_alloc(void) { struct config_reader *cr = (struct config_reader *)malloc(sizeof(*cr)); assert(cr != nullptr); cr->key_file = g_key_file_new(); if (!cr->key_file) goto err; return cr; err: free(cr); return nullptr; } /* * config_reader_read -- read config file */ int config_reader_read(struct config_reader *cr, const char *fname) { if (g_key_file_load_from_file(cr->key_file, fname, G_KEY_FILE_NONE, nullptr) != TRUE) return -1; return 0; } /* * config_reader_free -- free config reader */ void config_reader_free(struct config_reader *cr) { g_key_file_free(cr->key_file); free(cr); } /* * is_scenario -- (internal) return true if _name_ is scenario name * * This filters out the _global_ and _config_ sections. */ static int is_scenario(const char *name) { return strcmp(name, SECTION_GLOBAL); } /* * is_argument -- (internal) return true if _name_ is argument name * * This filters out the _benchmark_ key. */ static int is_argument(const char *name) { return strcmp(name, KEY_BENCHMARK) != 0 && strcmp(name, KEY_GROUP) != 0; } /* * config_reader_get_scenarios -- return scenarios from config file * * This function reads the config file and returns a list of scenarios. * Each scenario contains a list of key/value arguments. * The scenario's arguments are merged with arguments from global section. */ int config_reader_get_scenarios(struct config_reader *cr, struct scenarios **scenarios) { /* * Read all groups. * The config file must have at least one group, otherwise * it is considered as invalid. */ gsize ngroups; gsize g; gchar **groups = g_key_file_get_groups(cr->key_file, &ngroups); assert(nullptr != groups); if (!groups) return -1; /* * Check if global section is present and read keys from it. */ int ret = 0; int has_global = g_key_file_has_group(cr->key_file, SECTION_GLOBAL) == TRUE; gsize ngkeys; gchar **gkeys = nullptr; struct scenarios *s; if (has_global) { gkeys = g_key_file_get_keys(cr->key_file, SECTION_GLOBAL, &ngkeys, nullptr); assert(nullptr != gkeys); if (!gkeys) { ret = -1; goto err_groups; } } s = scenarios_alloc(); assert(nullptr != s); if (!s) { ret = -1; goto err_gkeys; } for (g = 0; g < ngroups; g++) { /* * Check whether a group is a scenario * or global section. */ if (!is_scenario(groups[g])) continue; /* * Check for KEY_BENCHMARK which contains benchmark name. * If not present the benchmark name is the same as the * name of the section. */ struct scenario *scenario = nullptr; if (g_key_file_has_key(cr->key_file, groups[g], KEY_BENCHMARK, nullptr) == FALSE) { scenario = scenario_alloc(groups[g], groups[g]); assert(scenario != nullptr); } else { gchar *benchmark = g_key_file_get_value(cr->key_file, groups[g], KEY_BENCHMARK, nullptr); assert(benchmark != nullptr); if (!benchmark) { ret = -1; goto err_scenarios; } scenario = scenario_alloc(groups[g], benchmark); assert(scenario != nullptr); free(benchmark); } gsize k; if (has_global) { /* * Merge key/values from global section. */ for (k = 0; k < ngkeys; k++) { if (g_key_file_has_key(cr->key_file, groups[g], gkeys[k], nullptr) == TRUE) continue; if (!is_argument(gkeys[k])) continue; char *value = g_key_file_get_value( cr->key_file, SECTION_GLOBAL, gkeys[k], nullptr); assert(nullptr != value); if (!value) { ret = -1; goto err_scenarios; } struct kv *kv = kv_alloc(gkeys[k], value); assert(nullptr != kv); free(value); if (!kv) { ret = -1; goto err_scenarios; } PMDK_TAILQ_INSERT_TAIL(&scenario->head, kv, next); } } /* check for group name */ if (g_key_file_has_key(cr->key_file, groups[g], KEY_GROUP, nullptr) != FALSE) { gchar *group = g_key_file_get_value( cr->key_file, groups[g], KEY_GROUP, nullptr); assert(group != nullptr); scenario_set_group(scenario, group); } else if (g_key_file_has_key(cr->key_file, SECTION_GLOBAL, KEY_GROUP, nullptr) != FALSE) { gchar *group = g_key_file_get_value(cr->key_file, SECTION_GLOBAL, KEY_GROUP, nullptr); scenario_set_group(scenario, group); } gsize nkeys; gchar **keys = g_key_file_get_keys(cr->key_file, groups[g], &nkeys, nullptr); assert(nullptr != keys); if (!keys) { ret = -1; goto err_scenarios; } /* * Read key/values from the scenario's section. */ for (k = 0; k < nkeys; k++) { if (!is_argument(keys[k])) continue; char *value = g_key_file_get_value( cr->key_file, groups[g], keys[k], nullptr); assert(nullptr != value); if (!value) { ret = -1; g_strfreev(keys); goto err_scenarios; } struct kv *kv = kv_alloc(keys[k], value); assert(nullptr != kv); free(value); if (!kv) { g_strfreev(keys); ret = -1; goto err_scenarios; } PMDK_TAILQ_INSERT_TAIL(&scenario->head, kv, next); } g_strfreev(keys); PMDK_TAILQ_INSERT_TAIL(&s->head, scenario, next); } g_strfreev(gkeys); g_strfreev(groups); *scenarios = s; return 0; err_scenarios: scenarios_free(s); err_gkeys: g_strfreev(gkeys); err_groups: g_strfreev(groups); return ret; }
5,961
20.292857
74
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/pmemobj_gen.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * pmemobj_gen.cpp -- benchmark for pmemobj_direct() * and pmemobj_open() functions. */ #include <cassert> #include <cerrno> #include <cstdio> #include <cstdlib> #include <cstring> #include <fcntl.h> #include <file.h> #include <sys/stat.h> #include <unistd.h> #include "benchmark.hpp" #include "libpmemobj.h" #define LAYOUT_NAME "benchmark" #define FACTOR 4 #define DIR_MODE 0700 #define FILE_MODE 0666 #define PART_NAME "/part" #define MAX_DIGITS 2 struct pobj_bench; struct pobj_worker; typedef size_t (*fn_type_num_t)(struct pobj_bench *ob, size_t worker_idx, size_t op_idx); typedef size_t (*fn_size_t)(struct pobj_bench *ob, size_t idx); typedef size_t (*fn_num_t)(size_t idx); /* * Enumeration used to determine the mode of the assigning type_number * value to the persistent objects. */ enum type_mode { TYPE_MODE_ONE, TYPE_MODE_PER_THREAD, TYPE_MODE_RAND, MAX_TYPE_MODE, }; /* * pobj_args - Stores command line parsed arguments. * * rand_type : Use random type number for every new allocated object. * Default, there is one type number for all objects. * * range : Use random allocation size. * * min_size : Minimum allocation size. * * n_objs : Number of objects allocated per thread * * one_pool : Use one common pool for all thread * * one_obj : Create and use one object per thread * * obj_size : Size of each allocated object * * n_ops : Number of operations */ struct pobj_args { char *type_num; bool range; unsigned min_size; size_t n_objs; bool one_pool; bool one_obj; size_t obj_size; size_t n_ops; }; /* * pobj_bench - Stores variables used in benchmark, passed within functions. * * pop : Pointer to the persistent pool. * * pa : Stores pobj_args structure. * * sets : Stores files names using to create pool per thread * * random_types : Random type numbers for persistent objects. * * rand_sizes : random values with allocation sizes. * * n_pools : Number of created pools. * * n_objs : Number of object created per thread. * * type_mode : Type_mode enum value * * fn_type_num : Function returning proper type number for each object. * * fn_size : Function returning proper size of allocation. * * pool : Functions returning number of thread if * one pool per thread created or index 0 if not. * * obj : Function returning number of operation if flag set * to false or index 0 if set to true. */ struct pobj_bench { PMEMobjpool **pop; struct pobj_args *args_priv; const char **sets; size_t *random_types; size_t *rand_sizes; size_t n_pools; int type_mode; fn_type_num_t fn_type_num; fn_size_t fn_size; fn_num_t pool; fn_num_t obj; }; /* * pobj_worker - Stores variables used by one thread. */ struct pobj_worker { PMEMoid *oids; }; /* * type_mode_one -- always returns 0, as in the mode TYPE_MODE_ONE * all of the persistent objects have the same type_number value. */ static size_t type_mode_one(struct pobj_bench *bench_priv, size_t worker_idx, size_t op_idx) { return 0; } /* * type_mode_per_thread -- always returns worker index, as in the mode * TYPE_MODE_PER_THREAD all persistent object allocated by the same thread * have the same type_number value. */ static size_t type_mode_per_thread(struct pobj_bench *bench_priv, size_t worker_idx, size_t op_idx) { return worker_idx; } /* * type_mode_rand -- returns the value from the random_types array assigned * for the specific operation in a specific thread. */ static size_t type_mode_rand(struct pobj_bench *bench_priv, size_t worker_idx, size_t op_idx) { return bench_priv->random_types[op_idx]; } /* * range_size -- returns size of object allocation from rand_sizes array. */ static size_t range_size(struct pobj_bench *bench_priv, size_t idx) { return bench_priv->rand_sizes[idx]; } /* * static_size -- returns always the same size of object allocation. */ static size_t static_size(struct pobj_bench *bench_priv, size_t idx) { return bench_priv->args_priv->obj_size; } /* * diff_num -- returns given index */ static size_t diff_num(size_t idx) { return idx; } /* * one_num -- returns always the same index. */ static size_t one_num(size_t idx) { return 0; } static fn_type_num_t type_mode_func[MAX_TYPE_MODE] = { type_mode_one, type_mode_per_thread, type_mode_rand}; const char *type_mode_names[MAX_TYPE_MODE] = {"one", "per-thread", "rand"}; /* * parse_type_mode -- parses command line "--type-number" argument * and returns proper type_mode enum value. */ static enum type_mode parse_type_mode(const char *arg) { enum type_mode i = TYPE_MODE_ONE; for (; i < MAX_TYPE_MODE && strcmp(arg, type_mode_names[i]) != 0; i = (enum type_mode)(i + 1)) ; return i; } /* * rand_sizes -- allocates array and calculates random values as allocation * sizes for each object. Used only when range flag set. */ static size_t * rand_sizes(size_t min, size_t max, size_t n_ops) { assert(n_ops != 0); auto *rand_sizes = (size_t *)malloc(n_ops * sizeof(size_t)); if (rand_sizes == nullptr) { perror("malloc"); return nullptr; } for (size_t i = 0; i < n_ops; i++) { rand_sizes[i] = RRAND(max, min); } return rand_sizes; } /* * random_types -- allocates array and calculates random values to assign * type_number for each object. */ static int random_types(struct pobj_bench *bench_priv, struct benchmark_args *args) { assert(bench_priv->args_priv->n_objs != 0); bench_priv->random_types = (size_t *)malloc( bench_priv->args_priv->n_objs * sizeof(size_t)); if (bench_priv->random_types == nullptr) { perror("malloc"); return -1; } for (size_t i = 0; i < bench_priv->args_priv->n_objs; i++) bench_priv->random_types[i] = rand() % UINT32_MAX; return 0; } /* * pobj_init - common part of the benchmark initialization functions. * Parses command line arguments, set variables and creates persistent pools. */ static int pobj_init(struct benchmark *bench, struct benchmark_args *args) { unsigned i = 0; size_t psize; size_t n_objs; assert(bench != nullptr); assert(args != nullptr); enum file_type type = util_file_get_type(args->fname); if (type == OTHER_ERROR) { fprintf(stderr, "could not check type of file %s\n", args->fname); return -1; } auto *bench_priv = (struct pobj_bench *)malloc(sizeof(struct pobj_bench)); if (bench_priv == nullptr) { perror("malloc"); return -1; } assert(args->opts != nullptr); bench_priv->args_priv = (struct pobj_args *)args->opts; bench_priv->args_priv->obj_size = args->dsize; bench_priv->args_priv->range = bench_priv->args_priv->min_size > 0 ? true : false; bench_priv->n_pools = !bench_priv->args_priv->one_pool ? args->n_threads : 1; bench_priv->pool = bench_priv->n_pools > 1 ? diff_num : one_num; bench_priv->obj = !bench_priv->args_priv->one_obj ? diff_num : one_num; if ((args->is_poolset || type == TYPE_DEVDAX) && bench_priv->n_pools > 1) { fprintf(stderr, "cannot use poolset nor device dax for multiple pools," " please use -P|--one-pool option instead"); goto free_bench_priv; } /* * Multiplication by FACTOR prevents from out of memory error * as the actual size of the allocated persistent objects * is always larger than requested. */ n_objs = bench_priv->args_priv->n_objs; if (bench_priv->n_pools == 1) n_objs *= args->n_threads; psize = PMEMOBJ_MIN_POOL + n_objs * args->dsize * args->n_threads * FACTOR; /* assign type_number determining function */ bench_priv->type_mode = parse_type_mode(bench_priv->args_priv->type_num); switch (bench_priv->type_mode) { case MAX_TYPE_MODE: fprintf(stderr, "unknown type mode"); goto free_bench_priv; case TYPE_MODE_RAND: if (random_types(bench_priv, args)) goto free_bench_priv; break; default: bench_priv->random_types = nullptr; } bench_priv->fn_type_num = type_mode_func[bench_priv->type_mode]; /* assign size determining function */ bench_priv->fn_size = bench_priv->args_priv->range ? range_size : static_size; bench_priv->rand_sizes = nullptr; if (bench_priv->args_priv->range) { if (bench_priv->args_priv->min_size > args->dsize) { fprintf(stderr, "Invalid allocation size"); goto free_random_types; } bench_priv->rand_sizes = rand_sizes(bench_priv->args_priv->min_size, bench_priv->args_priv->obj_size, bench_priv->args_priv->n_objs); if (bench_priv->rand_sizes == nullptr) goto free_random_types; } assert(bench_priv->n_pools > 0); bench_priv->pop = (PMEMobjpool **)calloc(bench_priv->n_pools, sizeof(PMEMobjpool *)); if (bench_priv->pop == nullptr) { perror("calloc"); goto free_random_sizes; } bench_priv->sets = (const char **)calloc(bench_priv->n_pools, sizeof(const char *)); if (bench_priv->sets == nullptr) { perror("calloc"); goto free_pop; } if (bench_priv->n_pools > 1) { assert(!args->is_poolset); if (util_file_mkdir(args->fname, DIR_MODE) != 0) { fprintf(stderr, "cannot create directory\n"); goto free_sets; } size_t path_len = (strlen(PART_NAME) + strlen(args->fname)) + MAX_DIGITS + 1; for (i = 0; i < bench_priv->n_pools; i++) { bench_priv->sets[i] = (char *)malloc(path_len * sizeof(char)); if (bench_priv->sets[i] == nullptr) { perror("malloc"); goto free_sets; } int ret = util_snprintf((char *)bench_priv->sets[i], path_len, "%s%s%02x", args->fname, PART_NAME, i); if (ret < 0) { perror("snprintf"); goto free_sets; } bench_priv->pop[i] = pmemobj_create(bench_priv->sets[i], LAYOUT_NAME, psize, FILE_MODE); if (bench_priv->pop[i] == nullptr) { perror(pmemobj_errormsg()); goto free_sets; } } } else { if (args->is_poolset || type == TYPE_DEVDAX) { if (args->fsize < psize) { fprintf(stderr, "file size too large\n"); goto free_pools; } psize = 0; } bench_priv->sets[0] = args->fname; bench_priv->pop[0] = pmemobj_create( bench_priv->sets[0], LAYOUT_NAME, psize, FILE_MODE); if (bench_priv->pop[0] == nullptr) { perror(pmemobj_errormsg()); goto free_pools; } } pmembench_set_priv(bench, bench_priv); return 0; free_sets: for (; i > 0; i--) { pmemobj_close(bench_priv->pop[i - 1]); free((char *)bench_priv->sets[i - 1]); } free_pools: free(bench_priv->sets); free_pop: free(bench_priv->pop); free_random_sizes: free(bench_priv->rand_sizes); free_random_types: free(bench_priv->random_types); free_bench_priv: free(bench_priv); return -1; } /* * pobj_direct_init -- special part of pobj_direct benchmark initialization. */ static int pobj_direct_init(struct benchmark *bench, struct benchmark_args *args) { auto *pa = (struct pobj_args *)args->opts; pa->n_objs = pa->one_obj ? 1 : args->n_ops_per_thread; if (pobj_init(bench, args) != 0) return -1; return 0; } /* * pobj_exit -- common part for the benchmarks exit functions */ static int pobj_exit(struct benchmark *bench, struct benchmark_args *args) { size_t i; auto *bench_priv = (struct pobj_bench *)pmembench_get_priv(bench); if (bench_priv->n_pools > 1) { for (i = 0; i < bench_priv->n_pools; i++) { pmemobj_close(bench_priv->pop[i]); free((char *)bench_priv->sets[i]); } } else { pmemobj_close(bench_priv->pop[0]); } free(bench_priv->sets); free(bench_priv->pop); free(bench_priv->rand_sizes); free(bench_priv->random_types); free(bench_priv); return 0; } /* * pobj_init_worker -- worker initialization */ static int pobj_init_worker(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { size_t i, idx = worker->index; auto *bench_priv = (struct pobj_bench *)pmembench_get_priv(bench); auto *pw = (struct pobj_worker *)calloc(1, sizeof(struct pobj_worker)); if (pw == nullptr) { perror("calloc"); return -1; } worker->priv = pw; pw->oids = (PMEMoid *)calloc(bench_priv->args_priv->n_objs, sizeof(PMEMoid)); if (pw->oids == nullptr) { free(pw); perror("calloc"); return -1; } PMEMobjpool *pop = bench_priv->pop[bench_priv->pool(idx)]; for (i = 0; i < bench_priv->args_priv->n_objs; i++) { size_t size = bench_priv->fn_size(bench_priv, i); size_t type = bench_priv->fn_type_num(bench_priv, idx, i); if (pmemobj_alloc(pop, &pw->oids[i], size, type, nullptr, nullptr) != 0) { perror("pmemobj_alloc"); goto out; } } return 0; out: for (; i > 0; i--) pmemobj_free(&pw->oids[i - 1]); free(pw->oids); free(pw); return -1; } /* * pobj_direct_op -- main operations of the obj_direct benchmark. */ static int pobj_direct_op(struct benchmark *bench, struct operation_info *info) { auto *bench_priv = (struct pobj_bench *)pmembench_get_priv(bench); auto *pw = (struct pobj_worker *)info->worker->priv; size_t idx = bench_priv->obj(info->index); /* Query an invalid uuid:off pair to invalidate the cache. */ PMEMoid bad = {1, 1}; #define OBJ_DIRECT_NITER 1024 /* * As we measure a very fast operation, we need a loop inside the * test harness. */ for (int i = 0; i < OBJ_DIRECT_NITER; i++) { if (pmemobj_direct(pw->oids[idx]) == nullptr) return -1; if (pmemobj_direct(bad) != nullptr) return -1; } return 0; #undef OBJ_DIRECT_NITER } /* * pobj_open_op -- main operations of the obj_open benchmark. */ static int pobj_open_op(struct benchmark *bench, struct operation_info *info) { auto *bench_priv = (struct pobj_bench *)pmembench_get_priv(bench); size_t idx = bench_priv->pool(info->worker->index); pmemobj_close(bench_priv->pop[idx]); bench_priv->pop[idx] = pmemobj_open(bench_priv->sets[idx], LAYOUT_NAME); if (bench_priv->pop[idx] == nullptr) return -1; return 0; } /* * pobj_free_worker -- worker exit function */ static void pobj_free_worker(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { auto *pw = (struct pobj_worker *)worker->priv; auto *bench_priv = (struct pobj_bench *)pmembench_get_priv(bench); for (size_t i = 0; i < bench_priv->args_priv->n_objs; i++) pmemobj_free(&pw->oids[i]); free(pw->oids); free(pw); } static struct benchmark_info obj_open; static struct benchmark_info obj_direct; /* Array defining common command line arguments. */ static struct benchmark_clo pobj_direct_clo[4]; static struct benchmark_clo pobj_open_clo[3]; CONSTRUCTOR(pmemobj_gen_constructor) void pmemobj_gen_constructor(void) { pobj_direct_clo[0].opt_short = 'T'; pobj_direct_clo[0].opt_long = "type-number"; pobj_direct_clo[0].descr = "Type number mode - one, per-thread, " "rand"; pobj_direct_clo[0].def = "one"; pobj_direct_clo[0].off = clo_field_offset(struct pobj_args, type_num); pobj_direct_clo[0].type = CLO_TYPE_STR; pobj_direct_clo[1].opt_short = 'm'; pobj_direct_clo[1].opt_long = "min-size"; pobj_direct_clo[1].type = CLO_TYPE_UINT; pobj_direct_clo[1].descr = "Minimum allocation size"; pobj_direct_clo[1].off = clo_field_offset(struct pobj_args, min_size); pobj_direct_clo[1].def = "0"; pobj_direct_clo[1].type_uint.size = clo_field_size(struct pobj_args, min_size); pobj_direct_clo[1].type_uint.base = CLO_INT_BASE_DEC | CLO_INT_BASE_HEX; pobj_direct_clo[1].type_uint.min = 0; pobj_direct_clo[1].type_uint.max = UINT_MAX; pobj_direct_clo[2].opt_short = 'P'; pobj_direct_clo[2].opt_long = "one-pool"; pobj_direct_clo[2].descr = "Create one pool for all threads"; pobj_direct_clo[2].type = CLO_TYPE_FLAG; pobj_direct_clo[2].off = clo_field_offset(struct pobj_args, one_pool); pobj_direct_clo[3].opt_short = 'O'; pobj_direct_clo[3].opt_long = "one-object"; pobj_direct_clo[3].descr = "Use only one object per thread"; pobj_direct_clo[3].type = CLO_TYPE_FLAG; pobj_direct_clo[3].off = clo_field_offset(struct pobj_args, one_obj); pobj_open_clo[0].opt_short = 'T', pobj_open_clo[0].opt_long = "type-number", pobj_open_clo[0].descr = "Type number mode - one, " "per-thread, rand", pobj_open_clo[0].def = "one", pobj_open_clo[0].off = clo_field_offset(struct pobj_args, type_num), pobj_open_clo[0].type = CLO_TYPE_STR, pobj_open_clo[1].opt_short = 'm', pobj_open_clo[1].opt_long = "min-size", pobj_open_clo[1].type = CLO_TYPE_UINT, pobj_open_clo[1].descr = "Minimum allocation size", pobj_open_clo[1].off = clo_field_offset(struct pobj_args, min_size), pobj_open_clo[1].def = "0", pobj_open_clo[1].type_uint.size = clo_field_size(struct pobj_args, min_size), pobj_open_clo[1].type_uint.base = CLO_INT_BASE_DEC | CLO_INT_BASE_HEX, pobj_open_clo[1].type_uint.min = 0, pobj_open_clo[1].type_uint.max = UINT_MAX, pobj_open_clo[2].opt_short = 'o'; pobj_open_clo[2].opt_long = "objects"; pobj_open_clo[2].type = CLO_TYPE_UINT; pobj_open_clo[2].descr = "Number of objects in each pool"; pobj_open_clo[2].off = clo_field_offset(struct pobj_args, n_objs); pobj_open_clo[2].def = "1"; pobj_open_clo[2].type_uint.size = clo_field_size(struct pobj_args, n_objs); pobj_open_clo[2].type_uint.base = CLO_INT_BASE_DEC | CLO_INT_BASE_HEX; pobj_open_clo[2].type_uint.min = 1; pobj_open_clo[2].type_uint.max = UINT_MAX; obj_open.name = "obj_open"; obj_open.brief = "pmemobj_open() benchmark"; obj_open.init = pobj_init; obj_open.exit = pobj_exit; obj_open.multithread = true; obj_open.multiops = true; obj_open.init_worker = pobj_init_worker; obj_open.free_worker = pobj_free_worker; obj_open.operation = pobj_open_op; obj_open.measure_time = true; obj_open.clos = pobj_open_clo; obj_open.nclos = ARRAY_SIZE(pobj_open_clo); obj_open.opts_size = sizeof(struct pobj_args); obj_open.rm_file = true; obj_open.allow_poolset = true; REGISTER_BENCHMARK(obj_open); obj_direct.name = "obj_direct"; obj_direct.brief = "pmemobj_direct() benchmark"; obj_direct.init = pobj_direct_init; obj_direct.exit = pobj_exit; obj_direct.multithread = true; obj_direct.multiops = true; obj_direct.init_worker = pobj_init_worker; obj_direct.free_worker = pobj_free_worker; obj_direct.operation = pobj_direct_op; obj_direct.measure_time = true; obj_direct.clos = pobj_direct_clo; obj_direct.nclos = ARRAY_SIZE(pobj_direct_clo); obj_direct.opts_size = sizeof(struct pobj_args); obj_direct.rm_file = true; obj_direct.allow_poolset = true; REGISTER_BENCHMARK(obj_direct); };
18,229
26.127976
79
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/rpmem.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2020, Intel Corporation */ /* * rpmem.cpp -- rpmem benchmarks definition */ #include <cassert> #include <cerrno> #include <cstddef> #include <cstdio> #include <cstdlib> #include <cstring> #include <fcntl.h> #include <sys/file.h> #include <sys/mman.h> #include <unistd.h> #include "benchmark.hpp" #include "libpmem.h" #include "librpmem.h" #include "os.h" #include "set.h" #include "util.h" #define CL_ALIGNMENT 64 #define MAX_OFFSET (CL_ALIGNMENT - 1) #define ALIGN_CL(x) (((x) + CL_ALIGNMENT - 1) & ~(CL_ALIGNMENT - 1)) #define BENCH_RPMEM_FLUSH_NAME "rpmem_flush_drain" #define BENCH_RPMEM_PERSIST_NAME "rpmem_persist" #define BENCH_RPMEM_MIXED_NAME "rpmem_mixed" /* * rpmem_args -- benchmark specific command line options */ struct rpmem_args { char *mode; /* operation mode: stat, seq, rand */ bool no_warmup; /* do not do warmup */ bool no_memset; /* do not call memset before each persist */ size_t chunk_size; /* elementary chunk size */ size_t dest_off; /* destination address offset */ bool relaxed; /* use RPMEM_PERSIST_RELAXED / RPMEM_FLUSH_RELAXED flag */ char *workload; /* workload */ int flushes_per_drain; /* # of flushes between drains */ }; /* * rpmem_bench -- benchmark context */ struct rpmem_bench { struct rpmem_args *pargs; /* benchmark specific arguments */ size_t *offsets; /* random/sequential address offsets */ size_t n_offsets; /* number of random elements */ size_t *offsets_pos; /* position within offsets */ int const_b; /* memset() value */ size_t min_size; /* minimum file size */ void *addrp; /* mapped file address */ void *pool; /* memory pool address */ size_t pool_size; /* size of memory pool */ size_t mapped_len; /* mapped length */ RPMEMpool **rpp; /* rpmem pool pointers */ unsigned *nlanes; /* number of lanes for each remote replica */ unsigned nreplicas; /* number of remote replicas */ size_t csize_align; /* aligned elementary chunk size */ unsigned *flags; /* flags for ops */ size_t workload_len; /* length of the workload */ unsigned n_flushing_ops_per_thread; /* # of operation which require offsets per thread */ }; /* * operation_mode -- mode of operation */ enum operation_mode { OP_MODE_UNKNOWN, OP_MODE_STAT, /* always use the same chunk */ OP_MODE_SEQ, /* use consecutive chunks */ OP_MODE_RAND, /* use random chunks */ OP_MODE_SEQ_WRAP, /* use consecutive chunks, but use file size */ OP_MODE_RAND_WRAP, /* use random chunks, but use file size */ }; /* * parse_op_mode -- parse operation mode from string */ static enum operation_mode parse_op_mode(const char *arg) { if (strcmp(arg, "stat") == 0) return OP_MODE_STAT; else if (strcmp(arg, "seq") == 0) return OP_MODE_SEQ; else if (strcmp(arg, "rand") == 0) return OP_MODE_RAND; else if (strcmp(arg, "seq-wrap") == 0) return OP_MODE_SEQ_WRAP; else if (strcmp(arg, "rand-wrap") == 0) return OP_MODE_RAND_WRAP; else return OP_MODE_UNKNOWN; } /* * get_flushing_op_num -- return # of operations in the workload which require * offsets */ static unsigned get_flushing_op_num(struct benchmark *bench, struct rpmem_bench *mb) { assert(bench); struct benchmark_info *info = pmembench_get_info(bench); assert(info); /* * The rpmem_persist benchmark does one rpmem_persist() per worker op. * The rpmem_flush_drain benchmark does one rpmem_flush() or * rpmem_flush() + rpmem_drain() per worker op. Either way, it * requires one offset per worker op. */ if (strcmp(info->name, BENCH_RPMEM_PERSIST_NAME) == 0 || strcmp(info->name, BENCH_RPMEM_FLUSH_NAME) == 0) return 1; assert(strcmp(info->name, BENCH_RPMEM_MIXED_NAME) == 0); assert(mb); assert(mb->pargs); assert(mb->pargs->workload); assert(mb->workload_len > 0); unsigned num = 0; /* * The rpmem_mixed benchmark performs multiple API calls per worker * op some of them flushes ergo requires its own offset. */ for (size_t i = 0; i < mb->workload_len; ++i) { switch (mb->pargs->workload[i]) { case 'f': /* rpmem_flush */ case 'g': /* rpmem_flush + RPMEM_FLUSH_RELAXED */ case 'p': /* rpmem_persist */ case 'r': /* rpmem_persist + RPMEM_PERSIST_RELAXED */ ++num; break; } } /* * To simplify checks it is assumed each worker op requires at least one * flushing operation even though it doesn't have to use it. */ if (num < 1) num = 1; return num; } /* * init_offsets -- initialize offsets[] array depending on the selected mode */ static int init_offsets(struct benchmark_args *args, struct rpmem_bench *mb, enum operation_mode op_mode) { size_t n_ops_by_size = (mb->pool_size - POOL_HDR_SIZE) / (args->n_threads * mb->csize_align); mb->n_offsets = mb->n_flushing_ops_per_thread * args->n_threads; mb->offsets = (size_t *)malloc(mb->n_offsets * sizeof(*mb->offsets)); if (!mb->offsets) { perror("malloc"); return -1; } mb->offsets_pos = (size_t *)calloc(args->n_threads, sizeof(size_t)); if (!mb->offsets_pos) { perror("calloc"); free(mb->offsets); return -1; } rng_t rng; randomize_r(&rng, args->seed); for (size_t i = 0; i < args->n_threads; i++) { for (size_t j = 0; j < mb->n_flushing_ops_per_thread; j++) { size_t off_idx = i * mb->n_flushing_ops_per_thread + j; size_t chunk_idx; switch (op_mode) { case OP_MODE_STAT: chunk_idx = i; break; case OP_MODE_SEQ: chunk_idx = i * mb->n_flushing_ops_per_thread + j; break; case OP_MODE_RAND: chunk_idx = i * mb->n_flushing_ops_per_thread + rnd64_r(&rng) % mb->n_flushing_ops_per_thread; break; case OP_MODE_SEQ_WRAP: chunk_idx = i * n_ops_by_size + j % n_ops_by_size; break; case OP_MODE_RAND_WRAP: chunk_idx = i * n_ops_by_size + rnd64_r(&rng) % n_ops_by_size; break; default: assert(0); return -1; } mb->offsets[off_idx] = POOL_HDR_SIZE + chunk_idx * mb->csize_align + mb->pargs->dest_off; } } return 0; } /* * do_warmup -- does the warmup by writing the whole pool area */ static int do_warmup(struct rpmem_bench *mb) { /* clear the entire pool */ memset((char *)mb->pool + POOL_HDR_SIZE, 0, mb->pool_size - POOL_HDR_SIZE); for (unsigned r = 0; r < mb->nreplicas; ++r) { int ret = rpmem_persist(mb->rpp[r], POOL_HDR_SIZE, mb->pool_size - POOL_HDR_SIZE, 0, RPMEM_PERSIST_RELAXED); if (ret) return ret; } /* if no memset for each operation, do one big memset */ if (mb->pargs->no_memset) { memset((char *)mb->pool + POOL_HDR_SIZE, 0xFF, mb->pool_size - POOL_HDR_SIZE); } return 0; } /* * rpmem_mixed_op_flush -- perform rpmem_flush */ static inline int rpmem_mixed_op_flush(struct rpmem_bench *mb, struct operation_info *info) { size_t *pos = &mb->offsets_pos[info->worker->index]; uint64_t idx = info->worker->index * mb->n_flushing_ops_per_thread + *pos; assert(idx < mb->n_offsets); size_t offset = mb->offsets[idx]; size_t len = mb->pargs->chunk_size; if (!mb->pargs->no_memset) { void *dest = (char *)mb->pool + offset; /* thread id on MS 4 bits and operation id on LS 4 bits */ int c = ((info->worker->index & 0xf) << 4) + ((0xf & info->index)); memset(dest, c, len); } int ret = 0; for (unsigned r = 0; r < mb->nreplicas; ++r) { assert(info->worker->index < mb->nlanes[r]); ret = rpmem_flush(mb->rpp[r], offset, len, info->worker->index, mb->flags[info->worker->index]); if (ret) { fprintf(stderr, "rpmem_flush replica #%u: %s\n", r, rpmem_errormsg()); return ret; } } ++*pos; return 0; } /* * rpmem_mixed_op_drain -- perform rpmem_drain */ static inline int rpmem_mixed_op_drain(struct rpmem_bench *mb, struct operation_info *info) { int ret = 0; for (unsigned r = 0; r < mb->nreplicas; ++r) { ret = rpmem_drain(mb->rpp[r], info->worker->index, 0); if (unlikely(ret)) { fprintf(stderr, "rpmem_drain replica #%u: %s\n", r, rpmem_errormsg()); return ret; } } return 0; } /* * rpmem_flush_drain_op -- actual benchmark operation for the rpmem_flush_drain * benchmark */ static int rpmem_flush_drain_op(struct benchmark *bench, struct operation_info *info) { auto *mb = (struct rpmem_bench *)pmembench_get_priv(bench); int ret = 0; if (mb->pargs->flushes_per_drain != 0) { ret |= rpmem_mixed_op_flush(mb, info); /* no rpmem_drain() required */ if (mb->pargs->flushes_per_drain < 0) return ret; /* more rpmem_flush() required before rpmem_drain() */ if ((info->index + 1) % mb->pargs->flushes_per_drain != 0) return ret; /* rpmem_drain() required */ } ret |= rpmem_mixed_op_drain(mb, info); return ret; } /* * rpmem_persist_op -- actual benchmark operation for the rpmem_persist * benchmark */ static int rpmem_persist_op(struct benchmark *bench, struct operation_info *info) { auto *mb = (struct rpmem_bench *)pmembench_get_priv(bench); size_t *pos = &mb->offsets_pos[info->worker->index]; uint64_t idx = info->worker->index * mb->n_flushing_ops_per_thread + *pos; assert(idx < mb->n_offsets); size_t offset = mb->offsets[idx]; size_t len = mb->pargs->chunk_size; if (!mb->pargs->no_memset) { void *dest = (char *)mb->pool + offset; /* thread id on MS 4 bits and operation id on LS 4 bits */ int c = ((info->worker->index & 0xf) << 4) + ((0xf & info->index)); memset(dest, c, len); } int ret = 0; for (unsigned r = 0; r < mb->nreplicas; ++r) { assert(info->worker->index < mb->nlanes[r]); ret = rpmem_persist(mb->rpp[r], offset, len, info->worker->index, mb->flags[info->worker->index]); if (ret) { fprintf(stderr, "rpmem_persist replica #%u: %s\n", r, rpmem_errormsg()); return ret; } } ++*pos; return 0; } /* * rpmem_mixed_op -- actual benchmark operation for the rpmem_mixed * benchmark */ static int rpmem_mixed_op(struct benchmark *bench, struct operation_info *info) { auto *mb = (struct rpmem_bench *)pmembench_get_priv(bench); assert(mb->workload_len != 0); int ret = 0; for (size_t i = 0; i < mb->workload_len; ++i) { char op = mb->pargs->workload[i]; mb->flags[info->worker->index] = 0; switch (op) { case 'g': mb->flags[info->worker->index] = RPMEM_FLUSH_RELAXED; /* FALLTHROUGH */ case 'f': ret |= rpmem_mixed_op_flush(mb, info); break; case 'd': ret |= rpmem_mixed_op_drain(mb, info); break; case 'r': mb->flags[info->worker->index] = RPMEM_PERSIST_RELAXED; /* FALLTHROUGH */ case 'p': ret |= rpmem_persist_op(bench, info); break; default: fprintf(stderr, "unknown operation: %c", op); return 1; } } return ret; } /* * rpmem_map_file -- map local file */ static int rpmem_map_file(const char *path, struct rpmem_bench *mb, size_t size) { int mode; #ifndef _WIN32 mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; #else mode = S_IWRITE | S_IREAD; #endif mb->addrp = pmem_map_file(path, size, PMEM_FILE_CREATE, mode, &mb->mapped_len, nullptr); if (!mb->addrp) return -1; return 0; } /* * rpmem_unmap_file -- unmap local file */ static int rpmem_unmap_file(struct rpmem_bench *mb) { return pmem_unmap(mb->addrp, mb->mapped_len); } /* * rpmem_poolset_init -- read poolset file and initialize benchmark accordingly */ static int rpmem_poolset_init(const char *path, struct rpmem_bench *mb, struct benchmark_args *args) { struct pool_set *set; struct pool_replica *rep; struct remote_replica *remote; struct pool_set_part *part; struct rpmem_pool_attr attr; memset(&attr, 0, sizeof(attr)); memcpy(attr.signature, "PMEMBNCH", sizeof(attr.signature)); /* read and validate poolset */ if (util_poolset_read(&set, path)) { fprintf(stderr, "Invalid poolset file '%s'\n", path); return -1; } assert(set); if (set->nreplicas < 2) { fprintf(stderr, "No replicas defined\n"); goto err_poolset_free; } if (set->remote == 0) { fprintf(stderr, "No remote replicas defined\n"); goto err_poolset_free; } for (unsigned i = 1; i < set->nreplicas; ++i) { if (!set->replica[i]->remote) { fprintf(stderr, "Local replicas are not supported\n"); goto err_poolset_free; } } /* read and validate master replica */ rep = set->replica[0]; assert(rep); assert(rep->remote == nullptr); if (rep->nparts != 1) { fprintf(stderr, "Multipart master replicas are not supported\n"); goto err_poolset_free; } if (rep->repsize < mb->min_size) { fprintf(stderr, "A master replica is too small (%zu < %zu)\n", rep->repsize, mb->min_size); goto err_poolset_free; } part = (struct pool_set_part *)&rep->part[0]; if (rpmem_map_file(part->path, mb, rep->repsize)) { perror(part->path); goto err_poolset_free; } mb->pool_size = mb->mapped_len; mb->pool = (void *)((uintptr_t)mb->addrp); /* prepare remote replicas */ mb->nreplicas = set->nreplicas - 1; mb->nlanes = (unsigned *)malloc(mb->nreplicas * sizeof(unsigned)); if (mb->nlanes == nullptr) { perror("malloc"); goto err_unmap_file; } mb->rpp = (RPMEMpool **)malloc(mb->nreplicas * sizeof(RPMEMpool *)); if (mb->rpp == nullptr) { perror("malloc"); goto err_free_lanes; } unsigned r; for (r = 0; r < mb->nreplicas; ++r) { remote = set->replica[r + 1]->remote; assert(remote); mb->nlanes[r] = args->n_threads; /* Temporary WA for librpmem issue */ ++mb->nlanes[r]; mb->rpp[r] = rpmem_create(remote->node_addr, remote->pool_desc, mb->addrp, mb->pool_size, &mb->nlanes[r], &attr); if (!mb->rpp[r]) { perror("rpmem_create"); goto err_rpmem_close; } if (mb->nlanes[r] < args->n_threads) { fprintf(stderr, "Number of threads too large for replica #%u (max: %u)\n", r, mb->nlanes[r]); r++; /* close current replica */ goto err_rpmem_close; } } util_poolset_free(set); return 0; err_rpmem_close: for (unsigned i = 0; i < r; i++) rpmem_close(mb->rpp[i]); free(mb->rpp); err_free_lanes: free(mb->nlanes); err_unmap_file: rpmem_unmap_file(mb); err_poolset_free: util_poolset_free(set); return -1; } /* * rpmem_poolset_fini -- close opened local and remote replicas */ static void rpmem_poolset_fini(struct rpmem_bench *mb) { for (unsigned r = 0; r < mb->nreplicas; ++r) { rpmem_close(mb->rpp[r]); } free(mb->rpp); rpmem_unmap_file(mb); } /* * rpmem_set_min_size -- compute minimal file size based on benchmark arguments */ static void rpmem_set_min_size(struct rpmem_bench *mb, enum operation_mode op_mode, struct benchmark_args *args) { mb->csize_align = ALIGN_CL(mb->pargs->chunk_size); switch (op_mode) { case OP_MODE_STAT: mb->min_size = mb->csize_align * args->n_threads; break; case OP_MODE_SEQ: case OP_MODE_RAND: mb->min_size = mb->csize_align * args->n_ops_per_thread * args->n_threads; break; case OP_MODE_SEQ_WRAP: case OP_MODE_RAND_WRAP: /* * at least one chunk per thread to avoid false sharing */ mb->min_size = mb->csize_align * args->n_threads; break; default: assert(0); } mb->min_size += POOL_HDR_SIZE; } /* * rpmem_flags_init -- initialize flags[] array depending on the selected mode */ static int rpmem_flags_init(struct benchmark *bench, struct benchmark_args *args, struct rpmem_bench *mb) { assert(bench); struct benchmark_info *info = pmembench_get_info(bench); assert(info); mb->flags = (unsigned *)calloc(args->n_threads, sizeof(unsigned)); if (!mb->flags) { perror("calloc"); return -1; } unsigned relaxed_flag = 0; if (strcmp(info->name, BENCH_RPMEM_PERSIST_NAME) == 0) relaxed_flag = RPMEM_PERSIST_RELAXED; else if (strcmp(info->name, BENCH_RPMEM_FLUSH_NAME) == 0) relaxed_flag = RPMEM_FLUSH_RELAXED; /* for rpmem_mixed benchmark flags are set during the benchmark */ /* for rpmem_persist and rpmem_flush_drain benchmark all ops have the * same flags */ if (mb->pargs->relaxed) { for (unsigned i = 0; i < args->n_threads; ++i) mb->flags[i] = relaxed_flag; } return 0; } /* * rpmem_init -- initialization function */ static int rpmem_init(struct benchmark *bench, struct benchmark_args *args) { assert(bench != nullptr); assert(args != nullptr); assert(args->opts != nullptr); auto *mb = (struct rpmem_bench *)malloc(sizeof(struct rpmem_bench)); if (!mb) { perror("malloc"); return -1; } mb->pargs = (struct rpmem_args *)args->opts; mb->pargs->chunk_size = args->dsize; enum operation_mode op_mode = parse_op_mode(mb->pargs->mode); if (op_mode == OP_MODE_UNKNOWN) { fprintf(stderr, "Invalid operation mode argument '%s'\n", mb->pargs->mode); goto err_parse_mode; } if (rpmem_flags_init(bench, args, mb)) goto err_flags_init; mb->workload_len = 0; if (mb->pargs->workload) { mb->workload_len = strlen(mb->pargs->workload); assert(mb->workload_len > 0); } rpmem_set_min_size(mb, op_mode, args); if (rpmem_poolset_init(args->fname, mb, args)) { goto err_poolset_init; } /* initialize offsets[] array depending on benchmark args */ mb->n_flushing_ops_per_thread = get_flushing_op_num(bench, mb) * args->n_ops_per_thread; if (init_offsets(args, mb, op_mode) < 0) { goto err_init_offsets; } if (!mb->pargs->no_warmup) { if (do_warmup(mb) != 0) { fprintf(stderr, "do_warmup() function failed.\n"); goto err_warmup; } } pmembench_set_priv(bench, mb); return 0; err_warmup: free(mb->offsets_pos); free(mb->offsets); err_init_offsets: rpmem_poolset_fini(mb); err_poolset_init: free(mb->flags); err_flags_init: err_parse_mode: free(mb); return -1; } /* * rpmem_exit -- benchmark cleanup function */ static int rpmem_exit(struct benchmark *bench, struct benchmark_args *args) { auto *mb = (struct rpmem_bench *)pmembench_get_priv(bench); rpmem_poolset_fini(mb); free(mb->offsets_pos); free(mb->offsets); free(mb->flags); free(mb); return 0; } static struct benchmark_clo rpmem_flush_clo[6]; static struct benchmark_clo rpmem_persist_clo[5]; static struct benchmark_clo rpmem_mixed_clo[5]; /* Stores information about benchmark. */ static struct benchmark_info rpmem_flush_info; static struct benchmark_info rpmem_persist_info; static struct benchmark_info rpmem_mixed_info; CONSTRUCTOR(rpmem_constructor) void rpmem_constructor(void) { static struct benchmark_clo common_clo[4]; static struct benchmark_info common_info; memset(&common_info, 0, sizeof(common_info)); /* common benchmarks definitions */ common_clo[0].opt_short = 'M'; common_clo[0].opt_long = "mem-mode"; common_clo[0].descr = "Memory writing mode :" " stat, seq[-wrap], rand[-wrap]"; common_clo[0].def = "seq"; common_clo[0].off = clo_field_offset(struct rpmem_args, mode); common_clo[0].type = CLO_TYPE_STR; common_clo[1].opt_short = 'D'; common_clo[1].opt_long = "dest-offset"; common_clo[1].descr = "Destination cache line " "alignment offset"; common_clo[1].def = "0"; common_clo[1].off = clo_field_offset(struct rpmem_args, dest_off); common_clo[1].type = CLO_TYPE_UINT; common_clo[1].type_uint.size = clo_field_size(struct rpmem_args, dest_off); common_clo[1].type_uint.base = CLO_INT_BASE_DEC; common_clo[1].type_uint.min = 0; common_clo[1].type_uint.max = MAX_OFFSET; common_clo[2].opt_short = 'w'; common_clo[2].opt_long = "no-warmup"; common_clo[2].descr = "Don't do warmup"; common_clo[2].def = "false"; common_clo[2].type = CLO_TYPE_FLAG; common_clo[2].off = clo_field_offset(struct rpmem_args, no_warmup); common_clo[3].opt_short = 'T'; common_clo[3].opt_long = "no-memset"; common_clo[3].descr = "Don't call memset for all rpmem_persist"; common_clo[3].def = "false"; common_clo[3].off = clo_field_offset(struct rpmem_args, no_memset); common_clo[3].type = CLO_TYPE_FLAG; common_info.init = rpmem_init; common_info.exit = rpmem_exit; common_info.multithread = true; common_info.multiops = true; common_info.measure_time = true; common_info.opts_size = sizeof(struct rpmem_args); common_info.rm_file = true; common_info.allow_poolset = true; common_info.print_bandwidth = true; /* rpmem_flush_drain benchmark definitions */ assert(sizeof(rpmem_flush_clo) >= sizeof(common_clo)); memcpy(rpmem_flush_clo, common_clo, sizeof(common_clo)); rpmem_flush_clo[4].opt_short = 0; rpmem_flush_clo[4].opt_long = "flushes-per-drain"; rpmem_flush_clo[4].descr = "Number of flushes between drains (-1 means flushes only)"; rpmem_flush_clo[4].def = "-1"; rpmem_flush_clo[4].off = clo_field_offset(struct rpmem_args, flushes_per_drain); rpmem_flush_clo[4].type = CLO_TYPE_INT; rpmem_flush_clo[4].type_int.size = clo_field_size(struct rpmem_args, flushes_per_drain); rpmem_flush_clo[4].type_int.base = CLO_INT_BASE_DEC; rpmem_flush_clo[4].type_int.min = -1; rpmem_flush_clo[4].type_int.max = INT_MAX; rpmem_flush_clo[5].opt_short = 0; rpmem_flush_clo[5].opt_long = "flush-relaxed"; rpmem_flush_clo[5].descr = "Use RPMEM_FLUSH_RELAXED flag"; rpmem_flush_clo[5].def = "false"; rpmem_flush_clo[5].off = clo_field_offset(struct rpmem_args, relaxed); rpmem_flush_clo[5].type = CLO_TYPE_FLAG; memcpy(&rpmem_flush_info, &common_info, sizeof(common_info)); rpmem_flush_info.name = BENCH_RPMEM_FLUSH_NAME; rpmem_flush_info.brief = "Benchmark for rpmem_flush() and rpmem_drain() operations"; rpmem_flush_info.operation = rpmem_flush_drain_op; rpmem_flush_info.clos = rpmem_flush_clo; rpmem_flush_info.nclos = ARRAY_SIZE(rpmem_flush_clo); REGISTER_BENCHMARK(rpmem_flush_info); /* rpmem_persist benchmark definitions */ assert(sizeof(rpmem_persist_clo) >= sizeof(common_clo)); memcpy(rpmem_persist_clo, common_clo, sizeof(common_clo)); rpmem_persist_clo[4].opt_short = 0; rpmem_persist_clo[4].opt_long = "persist-relaxed"; rpmem_persist_clo[4].descr = "Use RPMEM_PERSIST_RELAXED flag"; rpmem_persist_clo[4].def = "false"; rpmem_persist_clo[4].off = clo_field_offset(struct rpmem_args, relaxed); rpmem_persist_clo[4].type = CLO_TYPE_FLAG; memcpy(&rpmem_persist_info, &common_info, sizeof(common_info)); rpmem_persist_info.name = BENCH_RPMEM_PERSIST_NAME; rpmem_persist_info.brief = "Benchmark for rpmem_persist() operation"; rpmem_persist_info.operation = rpmem_persist_op; rpmem_persist_info.clos = rpmem_persist_clo; rpmem_persist_info.nclos = ARRAY_SIZE(rpmem_persist_clo); REGISTER_BENCHMARK(rpmem_persist_info); /* rpmem_mixed benchmark definitions */ assert(sizeof(rpmem_mixed_clo) >= sizeof(common_clo)); memcpy(rpmem_mixed_clo, common_clo, sizeof(common_clo)); rpmem_mixed_clo[4].opt_short = 0; rpmem_mixed_clo[4].opt_long = "workload"; rpmem_mixed_clo[4].descr = "Workload e.g.: 'prfgd' means " "rpmem_persist(), " "rpmem_persist() + RPMEM_PERSIST_RELAXED, " "rpmem_flush()," "rpmem_flush() + RPMEM_FLUSH_RELAXED " "and rpmem_drain()"; rpmem_mixed_clo[4].def = "fd"; rpmem_mixed_clo[4].off = clo_field_offset(struct rpmem_args, workload); rpmem_mixed_clo[4].type = CLO_TYPE_STR; memcpy(&rpmem_mixed_info, &common_info, sizeof(common_info)); rpmem_mixed_info.name = BENCH_RPMEM_MIXED_NAME; rpmem_mixed_info.brief = "Benchmark for mixed rpmem workloads"; rpmem_mixed_info.operation = rpmem_mixed_op; rpmem_mixed_info.clos = rpmem_mixed_clo; rpmem_mixed_info.nclos = ARRAY_SIZE(rpmem_mixed_clo); REGISTER_BENCHMARK(rpmem_mixed_info); };
23,301
25.50967
79
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/config_reader_win.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * config_reader_win.cpp -- config reader module definitions */ #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <tchar.h> #include "config_reader.hpp" #include "queue.h" #include "scenario.hpp" #define SECTION_GLOBAL TEXT("global") #define KEY_BENCHMARK TEXT("bench") #define KEY_GROUP TEXT("group") /* * Maximum section size according to MSDN documentation */ #define SIZEOF_SECTION 32767 #define NULL_LIST_EMPTY(x) (_tcslen(x) == 0) #define NULL_LIST_NEXT(x) ((x) += (_tcslen(x) + 1)) #define KV_LIST_EMPTY(x) (_tcslen(x) == 0) #define KV_FIRST(x) #define KV_LIST_NEXT(x) \ ((x) += (_tcslen(x) + 1), (x) += (_tcslen(x) + 1), \ (x) = kv_list_skip_comment(x)) #define KV_LIST_KEY(x) (x) #define KV_LIST_VALUE(x) ((x) + _tcslen(x) + 1) #define KV_LIST_INIT(x) kv_list_init(x) #define LIST LPTSTR #define KV_LIST LPTSTR /* * kv_list_skip_comment -- skip comment lines in ini file */ static KV_LIST kv_list_skip_comment(KV_LIST list) { while (list[0] == TEXT('#')) list += (_tcslen(list) + 1); return list; } /* * kv_list_init -- init KV list */ static KV_LIST kv_list_init(LPTSTR list) { list = kv_list_skip_comment(list); for (KV_LIST it = list; !KV_LIST_EMPTY(it); KV_LIST_NEXT(it)) { LPTSTR c = _tcsstr(it, TEXT("=")); if (c == NULL) return NULL; *c = TEXT('\0'); } return list; } /* * config_reader -- handle structure */ struct config_reader { LPTSTR lpFileName; }; /* * config_reader_alloc -- allocate config reader */ struct config_reader * config_reader_alloc(void) { struct config_reader *cr = (struct config_reader *)malloc(sizeof(*cr)); if (cr == NULL) return NULL; return cr; } /* * config_reader_read -- read config file */ int config_reader_read(struct config_reader *cr, const char *fname) { DWORD len = 0; LPTSTR buf = TEXT(" "); /* get the length of the full pathname incl. terminating null char */ len = GetFullPathName((LPTSTR)fname, 0, buf, NULL); if (len == 0) { /* the function failed */ return -1; } else { /* allocate a buffer large enough to store the pathname */ LPTSTR buffer = (LPTSTR)malloc(len * sizeof(TCHAR)); DWORD ret = GetFullPathName((LPTSTR)fname, len, buffer, NULL); if (_taccess(buffer, 0) != 0) { printf("%s", strerror(errno)); return -1; } cr->lpFileName = (LPTSTR)buffer; } return 0; } /* * config_reader_free -- free config reader */ void config_reader_free(struct config_reader *cr) { free(cr); } /* * is_scenario -- (internal) return true if _name_ is scenario name * * This filters out the _global_ and _config_ sections. */ static int is_scenario(LPTSTR name) { return _tcscmp(name, SECTION_GLOBAL); } /* * is_argument -- (internal) return true if _name_ is argument name * * This filters out the _benchmark_ key. */ static int is_argument(LPTSTR name) { return _tcscmp(name, KEY_BENCHMARK) != 0 && _tcscmp(name, KEY_GROUP) != 0; } /* * config_reader_get_scenarios -- return scenarios from config file * * This function reads the config file and returns a list of scenarios. * Each scenario contains a list of key/value arguments. * The scenario's arguments are merged with arguments from global section. */ int config_reader_get_scenarios(struct config_reader *cr, struct scenarios **scenarios) { /* * Read all groups. * The config file must have at least one group, otherwise * it is considered as invalid. */ int ret = 0; TCHAR *sections = (TCHAR *)malloc(sizeof(TCHAR) * SIZEOF_SECTION); if (!sections) return -1; GetPrivateProfileSectionNames(sections, SIZEOF_SECTION, cr->lpFileName); if (NULL_LIST_EMPTY(sections)) { ret = -1; goto err_sections; } /* * Check if global section is present and read it. */ TCHAR *global = (TCHAR *)malloc(sizeof(TCHAR) * SIZEOF_SECTION); if (!global) return -1; GetPrivateProfileSection(SECTION_GLOBAL, global, SIZEOF_SECTION, cr->lpFileName); KV_LIST global_kv = KV_LIST_INIT(global); int has_global = !KV_LIST_EMPTY(global_kv); struct scenarios *s = scenarios_alloc(); assert(NULL != s); if (!s) { ret = -1; goto err_gkeys; } LPTSTR global_group = NULL; for (KV_LIST it = global_kv; !KV_LIST_EMPTY(it); KV_LIST_NEXT(it)) { if (_tcscmp(KV_LIST_KEY(it), KEY_GROUP) == 0) { global_group = KV_LIST_VALUE(it); break; } } TCHAR *section; for (LPTSTR group_name = sections; !NULL_LIST_EMPTY(group_name); group_name = NULL_LIST_NEXT(group_name)) { /* * Check whether a group is a scenario * or global section. */ if (!is_scenario(group_name)) continue; /* * Check for KEY_BENCHMARK which contains benchmark name. * If not present the benchmark name is the same as the * name of the section. */ section = (TCHAR *)malloc(sizeof(TCHAR) * SIZEOF_SECTION); if (!section) ret = -1; GetPrivateProfileSection(group_name, section, SIZEOF_SECTION, cr->lpFileName); KV_LIST section_kv = KV_LIST_INIT(section); struct scenario *scenario = NULL; LPTSTR name = NULL; LPTSTR group = NULL; for (KV_LIST it = section_kv; !KV_LIST_EMPTY(it); KV_LIST_NEXT(it)) { if (_tcscmp(KV_LIST_KEY(it), KEY_BENCHMARK) == 0) { name = KV_LIST_VALUE(it); } if (_tcscmp(KV_LIST_KEY(it), KEY_GROUP) == 0) { group = KV_LIST_VALUE(it); } } if (name == NULL) { scenario = scenario_alloc((const char *)group_name, (const char *)group_name); } else { scenario = scenario_alloc((const char *)group_name, (const char *)name); } assert(scenario != NULL); if (has_global) { /* * Merge key/values from global section. */ for (KV_LIST it = global_kv; !KV_LIST_EMPTY(it); KV_LIST_NEXT(it)) { LPTSTR key = KV_LIST_KEY(it); if (!is_argument(key)) continue; LPTSTR value = KV_LIST_VALUE(it); assert(NULL != value); if (!value) { ret = -1; goto err_scenarios; } struct kv *kv = kv_alloc((const char *)key, (const char *)value); assert(NULL != kv); if (!kv) { ret = -1; goto err_scenarios; } PMDK_TAILQ_INSERT_TAIL(&scenario->head, kv, next); } } /* check for group name */ if (group) { scenario_set_group(scenario, (const char *)group); } else if (global_group) { scenario_set_group(scenario, (const char *)global_group); } for (KV_LIST it = section_kv; !KV_LIST_EMPTY(it); KV_LIST_NEXT(it)) { LPTSTR key = KV_LIST_KEY(it); if (!is_argument(key)) continue; LPTSTR value = KV_LIST_VALUE(it); assert(NULL != value); if (!value) { ret = -1; goto err_scenarios; } struct kv *kv = kv_alloc((const char *)key, (const char *)value); assert(NULL != kv); if (!kv) { ret = -1; goto err_scenarios; } PMDK_TAILQ_INSERT_TAIL(&scenario->head, kv, next); } PMDK_TAILQ_INSERT_TAIL(&s->head, scenario, next); free(section); } *scenarios = s; free(global); free(sections); return 0; err_scenarios: free(section); scenarios_free(s); err_gkeys: free(global); err_sections: free(sections); return ret; }
7,258
20.99697
80
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/obj_pmalloc.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * obj_pmalloc.cpp -- pmalloc benchmarks definition */ #include <cassert> #include <cerrno> #include <cinttypes> #include <cstdio> #include <cstdlib> #include <cstring> #include <fcntl.h> #include <unistd.h> #include "benchmark.hpp" #include "file.h" #include "libpmemobj.h" #include "memops.h" #include "os.h" #include "pmalloc.h" #include "poolset_util.hpp" #include "valgrind_internal.h" /* * The factor used for PMEM pool size calculation, accounts for metadata, * fragmentation and etc. */ #define FACTOR 1.2f /* The minimum allocation size that pmalloc can perform */ #define ALLOC_MIN_SIZE 64 /* OOB and allocation header size */ #define OOB_HEADER_SIZE 64 /* * prog_args - command line parsed arguments */ struct prog_args { size_t minsize; /* minimum size for random allocation size */ bool use_random_size; /* if set, use random size allocations */ unsigned seed; /* PRNG seed */ }; POBJ_LAYOUT_BEGIN(pmalloc_layout); POBJ_LAYOUT_ROOT(pmalloc_layout, struct my_root); POBJ_LAYOUT_TOID(pmalloc_layout, uint64_t); POBJ_LAYOUT_END(pmalloc_layout); /* * my_root - root object */ struct my_root { TOID(uint64_t) offs; /* vector of the allocated object offsets */ }; /* * obj_bench - variables used in benchmark, passed within functions */ struct obj_bench { PMEMobjpool *pop; /* persistent pool handle */ struct prog_args *pa; /* prog_args structure */ size_t *sizes; /* sizes for allocations */ TOID(struct my_root) root; /* root object's OID */ uint64_t *offs; /* pointer to the vector of offsets */ }; /* * obj_init -- common part of the benchmark initialization for pmalloc and * pfree. It allocates the PMEM memory pool and the necessary offset vector. */ static int obj_init(struct benchmark *bench, struct benchmark_args *args) { struct my_root *root = nullptr; assert(bench != nullptr); assert(args != nullptr); assert(args->opts != nullptr); char path[PATH_MAX]; if (util_safe_strcpy(path, args->fname, sizeof(path)) != 0) return -1; enum file_type type = util_file_get_type(args->fname); if (type == OTHER_ERROR) { fprintf(stderr, "could not check type of file %s\n", args->fname); return -1; } if (((struct prog_args *)(args->opts))->minsize >= args->dsize) { fprintf(stderr, "Wrong params - allocation size\n"); return -1; } auto *ob = (struct obj_bench *)malloc(sizeof(struct obj_bench)); if (ob == nullptr) { perror("malloc"); return -1; } pmembench_set_priv(bench, ob); ob->pa = (struct prog_args *)args->opts; size_t n_ops_total = args->n_ops_per_thread * args->n_threads; assert(n_ops_total != 0); /* Create pmemobj pool. */ size_t alloc_size = args->dsize; if (alloc_size < ALLOC_MIN_SIZE) alloc_size = ALLOC_MIN_SIZE; /* For data objects */ size_t poolsize = PMEMOBJ_MIN_POOL + (n_ops_total * (alloc_size + OOB_HEADER_SIZE)) /* for offsets */ + n_ops_total * sizeof(uint64_t); /* multiply by FACTOR for metadata, fragmentation, etc. */ poolsize = (size_t)(poolsize * FACTOR); if (args->is_poolset || type == TYPE_DEVDAX) { if (args->fsize < poolsize) { fprintf(stderr, "file size too large\n"); goto free_ob; } poolsize = 0; } else if (poolsize < PMEMOBJ_MIN_POOL) { poolsize = PMEMOBJ_MIN_POOL; } if (args->is_dynamic_poolset) { int ret = dynamic_poolset_create(args->fname, poolsize); if (ret == -1) goto free_ob; if (util_safe_strcpy(path, POOLSET_PATH, sizeof(path)) != 0) goto free_ob; poolsize = 0; } ob->pop = pmemobj_create(path, POBJ_LAYOUT_NAME(pmalloc_layout), poolsize, args->fmode); if (ob->pop == nullptr) { fprintf(stderr, "%s\n", pmemobj_errormsg()); goto free_ob; } ob->root = POBJ_ROOT(ob->pop, struct my_root); if (TOID_IS_NULL(ob->root)) { fprintf(stderr, "POBJ_ROOT: %s\n", pmemobj_errormsg()); goto free_pop; } root = D_RW(ob->root); assert(root != nullptr); POBJ_ZALLOC(ob->pop, &root->offs, uint64_t, n_ops_total * sizeof(PMEMoid)); if (TOID_IS_NULL(root->offs)) { fprintf(stderr, "POBJ_ZALLOC off_vect: %s\n", pmemobj_errormsg()); goto free_pop; } ob->offs = D_RW(root->offs); ob->sizes = (size_t *)malloc(n_ops_total * sizeof(size_t)); if (ob->sizes == nullptr) { fprintf(stderr, "malloc rand size vect err\n"); goto free_pop; } if (ob->pa->use_random_size) { size_t width = args->dsize - ob->pa->minsize; for (size_t i = 0; i < n_ops_total; i++) { auto hr = (uint32_t)os_rand_r(&ob->pa->seed); auto lr = (uint32_t)os_rand_r(&ob->pa->seed); uint64_t r64 = (uint64_t)hr << 32 | lr; ob->sizes[i] = r64 % width + ob->pa->minsize; } } else { for (size_t i = 0; i < n_ops_total; i++) ob->sizes[i] = args->dsize; } return 0; free_pop: pmemobj_close(ob->pop); free_ob: free(ob); return -1; } /* * obj_exit -- common part for the exit function for pmalloc and pfree * benchmarks. It frees the allocated offset vector and the memory pool. */ static int obj_exit(struct benchmark *bench, struct benchmark_args *args) { auto *ob = (struct obj_bench *)pmembench_get_priv(bench); free(ob->sizes); POBJ_FREE(&D_RW(ob->root)->offs); pmemobj_close(ob->pop); return 0; } /* * pmalloc_init -- initialization for the pmalloc benchmark. Performs only the * common initialization. */ static int pmalloc_init(struct benchmark *bench, struct benchmark_args *args) { return obj_init(bench, args); } /* * pmalloc_op -- actual benchmark operation. Performs the pmalloc allocations. */ static int pmalloc_op(struct benchmark *bench, struct operation_info *info) { auto *ob = (struct obj_bench *)pmembench_get_priv(bench); uint64_t i = info->index + info->worker->index * info->args->n_ops_per_thread; int ret = pmalloc(ob->pop, &ob->offs[i], ob->sizes[i], 0, 0); if (ret) { fprintf(stderr, "pmalloc ret: %d\n", ret); return ret; } return 0; } struct pmix_worker { size_t nobjects; size_t shuffle_start; rng_t rng; }; /* * pmix_worker_init -- initialization of the worker structure */ static int pmix_worker_init(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { struct pmix_worker *w = (struct pmix_worker *)calloc(1, sizeof(*w)); auto *ob = (struct obj_bench *)pmembench_get_priv(bench); if (w == nullptr) return -1; randomize_r(&w->rng, ob->pa->seed); worker->priv = w; return 0; } /* * pmix_worker_fini -- destruction of the worker structure */ static void pmix_worker_fini(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { auto *w = (struct pmix_worker *)worker->priv; free(w); } /* * shuffle_objects -- randomly shuffle elements on a list * * Ideally, we wouldn't count the time this function takes, but for all * practical purposes this is fast enough and isn't visible on the results. * Just make sure the amount of objects to shuffle is not large. */ static void shuffle_objects(uint64_t *objects, size_t start, size_t nobjects, rng_t *rng) { uint64_t tmp; size_t dest; for (size_t n = start; n < nobjects; ++n) { dest = RRAND_R(rng, nobjects - 1, 0); tmp = objects[n]; objects[n] = objects[dest]; objects[dest] = tmp; } } #define FREE_PCT 10 #define FREE_OPS 10 /* * pmix_op -- mixed workload benchmark */ static int pmix_op(struct benchmark *bench, struct operation_info *info) { auto *ob = (struct obj_bench *)pmembench_get_priv(bench); auto *w = (struct pmix_worker *)info->worker->priv; uint64_t idx = info->worker->index * info->args->n_ops_per_thread; uint64_t *objects = &ob->offs[idx]; if (w->nobjects > FREE_OPS && FREE_PCT > RRAND_R(&w->rng, 100, 0)) { shuffle_objects(objects, w->shuffle_start, w->nobjects, &w->rng); for (int i = 0; i < FREE_OPS; ++i) { uint64_t off = objects[--w->nobjects]; pfree(ob->pop, &off); } w->shuffle_start = w->nobjects; } else { int ret = pmalloc(ob->pop, &objects[w->nobjects++], ob->sizes[idx + info->index], 0, 0); if (ret) { fprintf(stderr, "pmalloc ret: %d\n", ret); return ret; } } return 0; } /* * pmalloc_exit -- the end of the pmalloc benchmark. Frees the memory allocated * during pmalloc_op and performs the common exit operations. */ static int pmalloc_exit(struct benchmark *bench, struct benchmark_args *args) { auto *ob = (struct obj_bench *)pmembench_get_priv(bench); for (size_t i = 0; i < args->n_ops_per_thread * args->n_threads; i++) { if (ob->offs[i]) pfree(ob->pop, &ob->offs[i]); } return obj_exit(bench, args); } /* * pfree_init -- initialization for the pfree benchmark. Performs the common * initialization and allocates the memory to be freed during pfree_op. */ static int pfree_init(struct benchmark *bench, struct benchmark_args *args) { int ret = obj_init(bench, args); if (ret) return ret; auto *ob = (struct obj_bench *)pmembench_get_priv(bench); for (size_t i = 0; i < args->n_ops_per_thread * args->n_threads; i++) { ret = pmalloc(ob->pop, &ob->offs[i], ob->sizes[i], 0, 0); if (ret) { fprintf(stderr, "pmalloc at idx %" PRIu64 " ret: %s\n", i, pmemobj_errormsg()); /* free the allocated memory */ while (i != 0) { pfree(ob->pop, &ob->offs[i - 1]); i--; } obj_exit(bench, args); return ret; } } return 0; } /* * pmalloc_op -- actual benchmark operation. Performs the pfree operation. */ static int pfree_op(struct benchmark *bench, struct operation_info *info) { auto *ob = (struct obj_bench *)pmembench_get_priv(bench); uint64_t i = info->index + info->worker->index * info->args->n_ops_per_thread; pfree(ob->pop, &ob->offs[i]); return 0; } /* command line options definition */ static struct benchmark_clo pmalloc_clo[3]; /* * Stores information about pmalloc benchmark. */ static struct benchmark_info pmalloc_info; /* * Stores information about pfree benchmark. */ static struct benchmark_info pfree_info; /* * Stores information about pmix benchmark. */ static struct benchmark_info pmix_info; CONSTRUCTOR(obj_pmalloc_constructor) void obj_pmalloc_constructor(void) { pmalloc_clo[0].opt_short = 'r'; pmalloc_clo[0].opt_long = "random"; pmalloc_clo[0].descr = "Use random size allocations - " "from min-size to data-size"; pmalloc_clo[0].off = clo_field_offset(struct prog_args, use_random_size); pmalloc_clo[0].type = CLO_TYPE_FLAG; pmalloc_clo[1].opt_short = 'm'; pmalloc_clo[1].opt_long = "min-size"; pmalloc_clo[1].descr = "Minimum size of allocation for " "random mode"; pmalloc_clo[1].type = CLO_TYPE_UINT; pmalloc_clo[1].off = clo_field_offset(struct prog_args, minsize); pmalloc_clo[1].def = "1"; pmalloc_clo[1].type_uint.size = clo_field_size(struct prog_args, minsize); pmalloc_clo[1].type_uint.base = CLO_INT_BASE_DEC; pmalloc_clo[1].type_uint.min = 1; pmalloc_clo[1].type_uint.max = UINT64_MAX; pmalloc_clo[2].opt_short = 'S'; pmalloc_clo[2].opt_long = "seed"; pmalloc_clo[2].descr = "Random mode seed value"; pmalloc_clo[2].off = clo_field_offset(struct prog_args, seed); pmalloc_clo[2].def = "1"; pmalloc_clo[2].type = CLO_TYPE_UINT; pmalloc_clo[2].type_uint.size = clo_field_size(struct prog_args, seed); pmalloc_clo[2].type_uint.base = CLO_INT_BASE_DEC; pmalloc_clo[2].type_uint.min = 1; pmalloc_clo[2].type_uint.max = UINT_MAX; pmalloc_info.name = "pmalloc", pmalloc_info.brief = "Benchmark for internal pmalloc() " "operation"; pmalloc_info.init = pmalloc_init; pmalloc_info.exit = pmalloc_exit; pmalloc_info.multithread = true; pmalloc_info.multiops = true; pmalloc_info.operation = pmalloc_op; pmalloc_info.measure_time = true; pmalloc_info.clos = pmalloc_clo; pmalloc_info.nclos = ARRAY_SIZE(pmalloc_clo); pmalloc_info.opts_size = sizeof(struct prog_args); pmalloc_info.rm_file = true; pmalloc_info.allow_poolset = true; REGISTER_BENCHMARK(pmalloc_info); pfree_info.name = "pfree"; pfree_info.brief = "Benchmark for internal pfree() " "operation"; pfree_info.init = pfree_init; pfree_info.exit = pmalloc_exit; /* same as for pmalloc */ pfree_info.multithread = true; pfree_info.multiops = true; pfree_info.operation = pfree_op; pfree_info.measure_time = true; pfree_info.clos = pmalloc_clo; pfree_info.nclos = ARRAY_SIZE(pmalloc_clo); pfree_info.opts_size = sizeof(struct prog_args); pfree_info.rm_file = true; pfree_info.allow_poolset = true; REGISTER_BENCHMARK(pfree_info); pmix_info.name = "pmix"; pmix_info.brief = "Benchmark for mixed alloc/free workload"; pmix_info.init = pmalloc_init; pmix_info.exit = pmalloc_exit; /* same as for pmalloc */ pmix_info.multithread = true; pmix_info.multiops = true; pmix_info.operation = pmix_op; pmix_info.init_worker = pmix_worker_init; pmix_info.free_worker = pmix_worker_fini; pmix_info.measure_time = true; pmix_info.clos = pmalloc_clo; pmix_info.nclos = ARRAY_SIZE(pmalloc_clo); pmix_info.opts_size = sizeof(struct prog_args); pmix_info.rm_file = true; pmix_info.allow_poolset = true; REGISTER_BENCHMARK(pmix_info); };
13,031
24.805941
79
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/map_bench.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * map_bench.cpp -- benchmarks for: ctree, btree, rtree, rbtree, hashmap_atomic * and hashmap_tx from examples. */ #include <cassert> #include "benchmark.hpp" #include "file.h" #include "os.h" #include "os_thread.h" #include "poolset_util.hpp" #include "map.h" #include "map_btree.h" #include "map_ctree.h" #include "map_hashmap_atomic.h" #include "map_hashmap_rp.h" #include "map_hashmap_tx.h" #include "map_rbtree.h" #include "map_rtree.h" /* Values less than 3 is not suitable for current rtree implementation */ #define FACTOR 3 #define ALLOC_OVERHEAD 64 TOID_DECLARE_ROOT(struct root); struct root { TOID(struct map) map; }; #define OBJ_TYPE_NUM 1 #define swap(a, b) \ do { \ __typeof__(a) _tmp = (a); \ (a) = (b); \ (b) = _tmp; \ } while (0) /* Values less than 2048 is not suitable for current rtree implementation */ #define SIZE_PER_KEY 2048 static const struct { const char *str; const struct map_ops *ops; } map_types[] = { {"ctree", MAP_CTREE}, {"btree", MAP_BTREE}, {"rtree", MAP_RTREE}, {"rbtree", MAP_RBTREE}, {"hashmap_tx", MAP_HASHMAP_TX}, {"hashmap_atomic", MAP_HASHMAP_ATOMIC}, {"hashmap_rp", MAP_HASHMAP_RP}}; #define MAP_TYPES_NUM (sizeof(map_types) / sizeof(map_types[0])) struct map_bench_args { unsigned seed; uint64_t max_key; char *type; bool ext_tx; bool alloc; }; struct map_bench_worker { uint64_t *keys; size_t nkeys; }; struct map_bench { struct map_ctx *mapc; os_mutex_t lock; PMEMobjpool *pop; size_t pool_size; size_t nkeys; size_t init_nkeys; uint64_t *keys; struct benchmark_args *args; struct map_bench_args *margs; TOID(struct root) root; PMEMoid root_oid; TOID(struct map) map; int (*insert)(struct map_bench *, uint64_t); int (*remove)(struct map_bench *, uint64_t); int (*get)(struct map_bench *, uint64_t); }; /* * mutex_lock_nofail -- locks mutex and aborts if locking failed */ static void mutex_lock_nofail(os_mutex_t *lock) { errno = os_mutex_lock(lock); if (errno) { perror("os_mutex_lock"); abort(); } } /* * mutex_unlock_nofail -- unlocks mutex and aborts if unlocking failed */ static void mutex_unlock_nofail(os_mutex_t *lock) { errno = os_mutex_unlock(lock); if (errno) { perror("os_mutex_unlock"); abort(); } } /* * get_key -- return 64-bit random key */ static uint64_t get_key(unsigned *seed, uint64_t max_key) { unsigned key_lo = os_rand_r(seed); unsigned key_hi = os_rand_r(seed); uint64_t key = (((uint64_t)key_hi) << 32) | ((uint64_t)key_lo); if (max_key) key = key % max_key; return key; } /* * parse_map_type -- parse type of map */ static const struct map_ops * parse_map_type(const char *str) { for (unsigned i = 0; i < MAP_TYPES_NUM; i++) { if (strcmp(str, map_types[i].str) == 0) return map_types[i].ops; } return nullptr; } /* * map_remove_free_op -- remove and free object from map */ static int map_remove_free_op(struct map_bench *map_bench, uint64_t key) { volatile int ret = 0; TX_BEGIN(map_bench->pop) { PMEMoid val = map_remove(map_bench->mapc, map_bench->map, key); if (OID_IS_NULL(val)) ret = -1; else pmemobj_tx_free(val); } TX_ONABORT { ret = -1; } TX_END return ret; } /* * map_remove_root_op -- remove root object from map */ static int map_remove_root_op(struct map_bench *map_bench, uint64_t key) { PMEMoid val = map_remove(map_bench->mapc, map_bench->map, key); return !OID_EQUALS(val, map_bench->root_oid); } /* * map_remove_op -- main operation for map_remove benchmark */ static int map_remove_op(struct benchmark *bench, struct operation_info *info) { auto *map_bench = (struct map_bench *)pmembench_get_priv(bench); auto *tworker = (struct map_bench_worker *)info->worker->priv; uint64_t key = tworker->keys[info->index]; mutex_lock_nofail(&map_bench->lock); int ret = map_bench->remove(map_bench, key); mutex_unlock_nofail(&map_bench->lock); return ret; } /* * map_insert_alloc_op -- allocate an object and insert to map */ static int map_insert_alloc_op(struct map_bench *map_bench, uint64_t key) { int ret = 0; TX_BEGIN(map_bench->pop) { PMEMoid oid = pmemobj_tx_alloc(map_bench->args->dsize, OBJ_TYPE_NUM); ret = map_insert(map_bench->mapc, map_bench->map, key, oid); } TX_ONABORT { ret = -1; } TX_END return ret; } /* * map_insert_root_op -- insert root object to map */ static int map_insert_root_op(struct map_bench *map_bench, uint64_t key) { return map_insert(map_bench->mapc, map_bench->map, key, map_bench->root_oid); } /* * map_insert_op -- main operation for map_insert benchmark */ static int map_insert_op(struct benchmark *bench, struct operation_info *info) { auto *map_bench = (struct map_bench *)pmembench_get_priv(bench); auto *tworker = (struct map_bench_worker *)info->worker->priv; uint64_t key = tworker->keys[info->index]; mutex_lock_nofail(&map_bench->lock); int ret = map_bench->insert(map_bench, key); mutex_unlock_nofail(&map_bench->lock); return ret; } /* * map_get_obj_op -- get object from map at specified key */ static int map_get_obj_op(struct map_bench *map_bench, uint64_t key) { PMEMoid val = map_get(map_bench->mapc, map_bench->map, key); return OID_IS_NULL(val); } /* * map_get_root_op -- get root object from map at specified key */ static int map_get_root_op(struct map_bench *map_bench, uint64_t key) { PMEMoid val = map_get(map_bench->mapc, map_bench->map, key); return !OID_EQUALS(val, map_bench->root_oid); } /* * map_get_op -- main operation for map_get benchmark */ static int map_get_op(struct benchmark *bench, struct operation_info *info) { auto *map_bench = (struct map_bench *)pmembench_get_priv(bench); auto *tworker = (struct map_bench_worker *)info->worker->priv; uint64_t key = tworker->keys[info->index]; mutex_lock_nofail(&map_bench->lock); int ret = map_bench->get(map_bench, key); mutex_unlock_nofail(&map_bench->lock); return ret; } /* * map_common_init_worker -- common init worker function for map_* benchmarks */ static int map_common_init_worker(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { struct map_bench_worker *tworker = (struct map_bench_worker *)calloc(1, sizeof(*tworker)); struct map_bench *tree; struct map_bench_args *targs; if (!tworker) { perror("calloc"); return -1; } tworker->nkeys = args->n_ops_per_thread; tworker->keys = (uint64_t *)malloc(tworker->nkeys * sizeof(*tworker->keys)); if (!tworker->keys) { perror("malloc"); goto err_free_worker; } tree = (struct map_bench *)pmembench_get_priv(bench); targs = (struct map_bench_args *)args->opts; if (targs->ext_tx) { int ret = pmemobj_tx_begin(tree->pop, nullptr); if (ret) { (void)pmemobj_tx_end(); goto err_free_keys; } } worker->priv = tworker; return 0; err_free_keys: free(tworker->keys); err_free_worker: free(tworker); return -1; } /* * map_common_free_worker -- common cleanup worker function for map_* * benchmarks */ static void map_common_free_worker(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { auto *tworker = (struct map_bench_worker *)worker->priv; auto *targs = (struct map_bench_args *)args->opts; if (targs->ext_tx) { pmemobj_tx_commit(); (void)pmemobj_tx_end(); } free(tworker->keys); free(tworker); } /* * map_insert_init_worker -- init worker function for map_insert benchmark */ static int map_insert_init_worker(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { int ret = map_common_init_worker(bench, args, worker); if (ret) return ret; auto *targs = (struct map_bench_args *)args->opts; assert(targs); auto *tworker = (struct map_bench_worker *)worker->priv; assert(tworker); for (size_t i = 0; i < tworker->nkeys; i++) tworker->keys[i] = get_key(&targs->seed, targs->max_key); return 0; } /* * map_global_rand_keys_init -- assign random keys from global keys array */ static int map_global_rand_keys_init(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { auto *tree = (struct map_bench *)pmembench_get_priv(bench); assert(tree); auto *targs = (struct map_bench_args *)args->opts; assert(targs); auto *tworker = (struct map_bench_worker *)worker->priv; assert(tworker); assert(tree->init_nkeys); /* * Assign random keys from global tree->keys array without repetitions. */ for (size_t i = 0; i < tworker->nkeys; i++) { uint64_t index = get_key(&targs->seed, tree->init_nkeys); tworker->keys[i] = tree->keys[index]; swap(tree->keys[index], tree->keys[tree->init_nkeys - 1]); tree->init_nkeys--; } return 0; } /* * map_remove_init_worker -- init worker function for map_remove benchmark */ static int map_remove_init_worker(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { int ret = map_common_init_worker(bench, args, worker); if (ret) return ret; ret = map_global_rand_keys_init(bench, args, worker); if (ret) goto err_common_free_worker; return 0; err_common_free_worker: map_common_free_worker(bench, args, worker); return -1; } /* * map_bench_get_init_worker -- init worker function for map_get benchmark */ static int map_bench_get_init_worker(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { int ret = map_common_init_worker(bench, args, worker); if (ret) return ret; ret = map_global_rand_keys_init(bench, args, worker); if (ret) goto err_common_free_worker; return 0; err_common_free_worker: map_common_free_worker(bench, args, worker); return -1; } /* * map_common_init -- common init function for map_* benchmarks */ static int map_common_init(struct benchmark *bench, struct benchmark_args *args) { assert(bench); assert(args); assert(args->opts); char path[PATH_MAX]; if (util_safe_strcpy(path, args->fname, sizeof(path)) != 0) return -1; enum file_type type = util_file_get_type(args->fname); if (type == OTHER_ERROR) { fprintf(stderr, "could not check type of file %s\n", args->fname); return -1; } size_t size_per_key; struct map_bench *map_bench = (struct map_bench *)calloc(1, sizeof(*map_bench)); if (!map_bench) { perror("calloc"); return -1; } map_bench->args = args; map_bench->margs = (struct map_bench_args *)args->opts; const struct map_ops *ops = parse_map_type(map_bench->margs->type); if (!ops) { fprintf(stderr, "invalid map type value specified -- '%s'\n", map_bench->margs->type); goto err_free_bench; } if (map_bench->margs->ext_tx && args->n_threads > 1) { fprintf(stderr, "external transaction requires single thread\n"); goto err_free_bench; } if (map_bench->margs->alloc) { map_bench->insert = map_insert_alloc_op; map_bench->remove = map_remove_free_op; map_bench->get = map_get_obj_op; } else { map_bench->insert = map_insert_root_op; map_bench->remove = map_remove_root_op; map_bench->get = map_get_root_op; } map_bench->nkeys = args->n_threads * args->n_ops_per_thread; map_bench->init_nkeys = map_bench->nkeys; size_per_key = map_bench->margs->alloc ? SIZE_PER_KEY + map_bench->args->dsize + ALLOC_OVERHEAD : SIZE_PER_KEY; map_bench->pool_size = map_bench->nkeys * size_per_key * FACTOR; if (args->is_poolset || type == TYPE_DEVDAX) { if (args->fsize < map_bench->pool_size) { fprintf(stderr, "file size too large\n"); goto err_free_bench; } map_bench->pool_size = 0; } else if (map_bench->pool_size < 2 * PMEMOBJ_MIN_POOL) { map_bench->pool_size = 2 * PMEMOBJ_MIN_POOL; } if (args->is_dynamic_poolset) { int ret = dynamic_poolset_create(args->fname, map_bench->pool_size); if (ret == -1) goto err_free_bench; if (util_safe_strcpy(path, POOLSET_PATH, sizeof(path)) != 0) goto err_free_bench; map_bench->pool_size = 0; } map_bench->pop = pmemobj_create(path, "map_bench", map_bench->pool_size, args->fmode); if (!map_bench->pop) { fprintf(stderr, "pmemobj_create: %s\n", pmemobj_errormsg()); goto err_free_bench; } errno = os_mutex_init(&map_bench->lock); if (errno) { perror("os_mutex_init"); goto err_close; } map_bench->mapc = map_ctx_init(ops, map_bench->pop); if (!map_bench->mapc) { perror("map_ctx_init"); goto err_destroy_lock; } map_bench->root = POBJ_ROOT(map_bench->pop, struct root); if (TOID_IS_NULL(map_bench->root)) { fprintf(stderr, "pmemobj_root: %s\n", pmemobj_errormsg()); goto err_free_map; } map_bench->root_oid = map_bench->root.oid; if (map_create(map_bench->mapc, &D_RW(map_bench->root)->map, nullptr)) { perror("map_new"); goto err_free_map; } map_bench->map = D_RO(map_bench->root)->map; pmembench_set_priv(bench, map_bench); return 0; err_free_map: map_ctx_free(map_bench->mapc); err_destroy_lock: os_mutex_destroy(&map_bench->lock); err_close: pmemobj_close(map_bench->pop); err_free_bench: free(map_bench); return -1; } /* * map_common_exit -- common cleanup function for map_* benchmarks */ static int map_common_exit(struct benchmark *bench, struct benchmark_args *args) { auto *tree = (struct map_bench *)pmembench_get_priv(bench); os_mutex_destroy(&tree->lock); map_ctx_free(tree->mapc); pmemobj_close(tree->pop); free(tree); return 0; } /* * map_keys_init -- initialize array with keys */ static int map_keys_init(struct benchmark *bench, struct benchmark_args *args) { auto *map_bench = (struct map_bench *)pmembench_get_priv(bench); assert(map_bench); auto *targs = (struct map_bench_args *)args->opts; assert(targs); assert(map_bench->nkeys != 0); map_bench->keys = (uint64_t *)malloc(map_bench->nkeys * sizeof(*map_bench->keys)); if (!map_bench->keys) { perror("malloc"); return -1; } int ret = 0; mutex_lock_nofail(&map_bench->lock); TX_BEGIN(map_bench->pop) { for (size_t i = 0; i < map_bench->nkeys; i++) { uint64_t key; PMEMoid oid; do { key = get_key(&targs->seed, targs->max_key); oid = map_get(map_bench->mapc, map_bench->map, key); } while (!OID_IS_NULL(oid)); if (targs->alloc) oid = pmemobj_tx_alloc(args->dsize, OBJ_TYPE_NUM); else oid = map_bench->root_oid; ret = map_insert(map_bench->mapc, map_bench->map, key, oid); if (ret) break; map_bench->keys[i] = key; } } TX_ONABORT { ret = -1; } TX_END mutex_unlock_nofail(&map_bench->lock); if (!ret) return 0; free(map_bench->keys); return ret; } /* * map_keys_exit -- cleanup of keys array */ static int map_keys_exit(struct benchmark *bench, struct benchmark_args *args) { auto *tree = (struct map_bench *)pmembench_get_priv(bench); free(tree->keys); return 0; } /* * map_remove_init -- init function for map_remove benchmark */ static int map_remove_init(struct benchmark *bench, struct benchmark_args *args) { int ret = map_common_init(bench, args); if (ret) return ret; ret = map_keys_init(bench, args); if (ret) goto err_exit_common; return 0; err_exit_common: map_common_exit(bench, args); return -1; } /* * map_remove_exit -- cleanup function for map_remove benchmark */ static int map_remove_exit(struct benchmark *bench, struct benchmark_args *args) { map_keys_exit(bench, args); return map_common_exit(bench, args); } /* * map_bench_get_init -- init function for map_get benchmark */ static int map_bench_get_init(struct benchmark *bench, struct benchmark_args *args) { int ret = map_common_init(bench, args); if (ret) return ret; ret = map_keys_init(bench, args); if (ret) goto err_exit_common; return 0; err_exit_common: map_common_exit(bench, args); return -1; } /* * map_get_exit -- exit function for map_get benchmark */ static int map_get_exit(struct benchmark *bench, struct benchmark_args *args) { map_keys_exit(bench, args); return map_common_exit(bench, args); } static struct benchmark_clo map_bench_clos[5]; static struct benchmark_info map_insert_info; static struct benchmark_info map_remove_info; static struct benchmark_info map_get_info; CONSTRUCTOR(map_bench_constructor) void map_bench_constructor(void) { map_bench_clos[0].opt_short = 'T'; map_bench_clos[0].opt_long = "type"; map_bench_clos[0].descr = "Type of container " "[ctree|btree|rtree|rbtree|hashmap_tx|hashmap_atomic]"; map_bench_clos[0].off = clo_field_offset(struct map_bench_args, type); map_bench_clos[0].type = CLO_TYPE_STR; map_bench_clos[0].def = "ctree"; map_bench_clos[1].opt_short = 's'; map_bench_clos[1].opt_long = "seed"; map_bench_clos[1].descr = "PRNG seed"; map_bench_clos[1].off = clo_field_offset(struct map_bench_args, seed); map_bench_clos[1].type = CLO_TYPE_UINT; map_bench_clos[1].def = "1"; map_bench_clos[1].type_uint.size = clo_field_size(struct map_bench_args, seed); map_bench_clos[1].type_uint.base = CLO_INT_BASE_DEC; map_bench_clos[1].type_uint.min = 1; map_bench_clos[1].type_uint.max = UINT_MAX; map_bench_clos[2].opt_short = 'M'; map_bench_clos[2].opt_long = "max-key"; map_bench_clos[2].descr = "maximum key (0 means no limit)"; map_bench_clos[2].off = clo_field_offset(struct map_bench_args, max_key); map_bench_clos[2].type = CLO_TYPE_UINT; map_bench_clos[2].def = "0"; map_bench_clos[2].type_uint.size = clo_field_size(struct map_bench_args, seed); map_bench_clos[2].type_uint.base = CLO_INT_BASE_DEC; map_bench_clos[2].type_uint.min = 0; map_bench_clos[2].type_uint.max = UINT64_MAX; map_bench_clos[3].opt_short = 'x'; map_bench_clos[3].opt_long = "external-tx"; map_bench_clos[3].descr = "Use external transaction for all " "operations (works with single " "thread only)"; map_bench_clos[3].off = clo_field_offset(struct map_bench_args, ext_tx); map_bench_clos[3].type = CLO_TYPE_FLAG; map_bench_clos[4].opt_short = 'A'; map_bench_clos[4].opt_long = "alloc"; map_bench_clos[4].descr = "Allocate object of specified size " "when inserting"; map_bench_clos[4].off = clo_field_offset(struct map_bench_args, alloc); map_bench_clos[4].type = CLO_TYPE_FLAG; map_insert_info.name = "map_insert"; map_insert_info.brief = "Inserting to tree map"; map_insert_info.init = map_common_init; map_insert_info.exit = map_common_exit; map_insert_info.multithread = true; map_insert_info.multiops = true; map_insert_info.init_worker = map_insert_init_worker; map_insert_info.free_worker = map_common_free_worker; map_insert_info.operation = map_insert_op; map_insert_info.measure_time = true; map_insert_info.clos = map_bench_clos; map_insert_info.nclos = ARRAY_SIZE(map_bench_clos); map_insert_info.opts_size = sizeof(struct map_bench_args); map_insert_info.rm_file = true; map_insert_info.allow_poolset = true; REGISTER_BENCHMARK(map_insert_info); map_remove_info.name = "map_remove"; map_remove_info.brief = "Removing from tree map"; map_remove_info.init = map_remove_init; map_remove_info.exit = map_remove_exit; map_remove_info.multithread = true; map_remove_info.multiops = true; map_remove_info.init_worker = map_remove_init_worker; map_remove_info.free_worker = map_common_free_worker; map_remove_info.operation = map_remove_op; map_remove_info.measure_time = true; map_remove_info.clos = map_bench_clos; map_remove_info.nclos = ARRAY_SIZE(map_bench_clos); map_remove_info.opts_size = sizeof(struct map_bench_args); map_remove_info.rm_file = true; map_remove_info.allow_poolset = true; REGISTER_BENCHMARK(map_remove_info); map_get_info.name = "map_get"; map_get_info.brief = "Tree lookup"; map_get_info.init = map_bench_get_init; map_get_info.exit = map_get_exit; map_get_info.multithread = true; map_get_info.multiops = true; map_get_info.init_worker = map_bench_get_init_worker; map_get_info.free_worker = map_common_free_worker; map_get_info.operation = map_get_op; map_get_info.measure_time = true; map_get_info.clos = map_bench_clos; map_get_info.nclos = ARRAY_SIZE(map_bench_clos); map_get_info.opts_size = sizeof(struct map_bench_args); map_get_info.rm_file = true; map_get_info.allow_poolset = true; REGISTER_BENCHMARK(map_get_info); }
20,418
23.107438
80
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/clo.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * clo.cpp -- command line options module definitions */ #include <cassert> #include <cerrno> #include <cinttypes> #include <cstring> #include <err.h> #include <getopt.h> #include "benchmark.hpp" #include "clo.hpp" #include "clo_vec.hpp" #include "queue.h" #include "scenario.hpp" #ifndef min #define min(a, b) ((a) < (b) ? (a) : (b)) #endif #ifndef max #define max(a, b) ((a) > (b) ? (a) : (b)) #endif typedef int (*clo_parse_fn)(struct benchmark_clo *clo, const char *arg, struct clo_vec *clovec); typedef int (*clo_parse_single_fn)(struct benchmark_clo *clo, const char *arg, void *ptr); typedef int (*clo_eval_range_fn)(struct benchmark_clo *clo, void *first, void *step, void *last, char type, struct clo_vec_vlist *vlist); typedef const char *(*clo_str_fn)(struct benchmark_clo *clo, void *addr, size_t size); #define STR_BUFF_SIZE 1024 static char str_buff[STR_BUFF_SIZE]; /* * clo_parse_flag -- (internal) parse flag */ static int clo_parse_flag(struct benchmark_clo *clo, const char *arg, struct clo_vec *clovec) { bool flag = true; if (arg != nullptr) { if (strcmp(arg, "true") == 0) flag = true; else if (strcmp(arg, "false") == 0) flag = false; else return -1; } return clo_vec_memcpy(clovec, clo->off, sizeof(flag), &flag); } /* * clo_parse_str -- (internal) parse string value */ static int clo_parse_str(struct benchmark_clo *clo, const char *arg, struct clo_vec *clovec) { struct clo_vec_vlist *vlist = clo_vec_vlist_alloc(); assert(vlist != nullptr); char *str = strdup(arg); assert(str != nullptr); clo_vec_add_alloc(clovec, str); char *next = strtok(str, ","); while (next) { clo_vec_vlist_add(vlist, &next, sizeof(next)); next = strtok(nullptr, ","); } int ret = clo_vec_memcpy_list(clovec, clo->off, sizeof(str), vlist); clo_vec_vlist_free(vlist); return ret; } /* * is_oct -- check if string may be octal number */ static int is_oct(const char *arg, size_t len) { return (arg[0] == '0' || (len > 1 && arg[0] == '-' && arg[1] == '0')); } /* * is_hex -- check if string may be hexadecimal number */ static int is_hex(const char *arg, size_t len) { if (arg[0] == '-') { arg++; len--; } return (len > 2 && arg[0] == '0' && (arg[1] == 'x' || arg[1] == 'X')); } /* * parse_number_base -- parse string as integer of given sign and base */ static int parse_number_base(const char *arg, void *value, int s, int base) { char *end; errno = 0; if (s) { auto *v = (int64_t *)value; *v = strtoll(arg, &end, base); } else { auto *v = (uint64_t *)value; *v = strtoull(arg, &end, base); } if (errno || *end != '\0') return -1; return 0; } /* * parse_number -- parse string as integer of given sign and allowed bases */ static int parse_number(const char *arg, size_t len, void *value, int s, int base) { if ((base & CLO_INT_BASE_HEX) && is_hex(arg, len)) { if (!parse_number_base(arg, value, s, 16)) return 0; } if ((base & CLO_INT_BASE_OCT) && is_oct(arg, len)) { if (!parse_number_base(arg, value, s, 8)) return 0; } if (base & CLO_INT_BASE_DEC) { if (!parse_number_base(arg, value, s, 10)) return 0; } return -1; } /* * clo_parse_single_int -- (internal) parse single int value */ static int clo_parse_single_int(struct benchmark_clo *clo, const char *arg, void *ptr) { int64_t value = 0; size_t len = strlen(arg); if (parse_number(arg, len, &value, 1, clo->type_int.base)) { errno = EINVAL; return -1; } int64_t tmax = ((int64_t)1 << (8 * clo->type_int.size - 1)) - 1; int64_t tmin = -((int64_t)1 << (8 * clo->type_int.size - 1)); tmax = min(tmax, clo->type_int.max); tmin = max(tmin, clo->type_int.min); if (value > tmax || value < tmin) { errno = ERANGE; return -1; } memcpy(ptr, &value, clo->type_int.size); return 0; } /* * clo_parse_single_uint -- (internal) parse single uint value */ static int clo_parse_single_uint(struct benchmark_clo *clo, const char *arg, void *ptr) { if (arg[0] == '-') { errno = EINVAL; return -1; } uint64_t value = 0; size_t len = strlen(arg); if (parse_number(arg, len, &value, 0, clo->type_uint.base)) { errno = EINVAL; return -1; } uint64_t tmax = ~0 >> (64 - 8 * clo->type_uint.size); uint64_t tmin = 0; tmax = min(tmax, clo->type_uint.max); tmin = max(tmin, clo->type_uint.min); if (value > tmax || value < tmin) { errno = ERANGE; return -1; } memcpy(ptr, &value, clo->type_uint.size); return 0; } /* * clo_eval_range_uint -- (internal) evaluate range for uint values */ static int clo_eval_range_uint(struct benchmark_clo *clo, void *first, void *step, void *last, char type, struct clo_vec_vlist *vlist) { uint64_t curr = *(uint64_t *)first; uint64_t l = *(uint64_t *)last; int64_t s = *(int64_t *)step; while (1) { clo_vec_vlist_add(vlist, &curr, clo->type_uint.size); switch (type) { case '+': curr += s; if (curr > l) return 0; break; case '-': if (curr < (uint64_t)s) return 0; curr -= s; if (curr < l) return 0; break; case '*': curr *= s; if (curr > l) return 0; break; case '/': curr /= s; if (curr < l) return 0; break; default: return -1; } } return -1; } /* * clo_eval_range_int -- (internal) evaluate range for int values */ static int clo_eval_range_int(struct benchmark_clo *clo, void *first, void *step, void *last, char type, struct clo_vec_vlist *vlist) { int64_t curr = *(int64_t *)first; int64_t l = *(int64_t *)last; uint64_t s = *(uint64_t *)step; while (1) { clo_vec_vlist_add(vlist, &curr, clo->type_int.size); switch (type) { case '+': curr += s; if (curr > l) return 0; break; case '-': curr -= s; if (curr < l) return 0; break; case '*': curr *= s; if (curr > l) return 0; break; case '/': curr /= s; if (curr < l) return 0; break; default: return -1; } } return -1; } /* * clo_check_range_params -- (internal) validate step and step type */ static int clo_check_range_params(uint64_t step, char step_type) { switch (step_type) { /* * Cannot construct range with step equal to 0 * for '+' or '-' range. */ case '+': case '-': if (step == 0) return -1; break; /* * Cannot construct range with step equal to 0 or 1 * for '*' or '/' range. */ case '*': case '/': if (step == 0 || step == 1) return -1; break; default: return -1; } return 0; } /* * clo_parse_range -- (internal) parse range or value * * The range may be in the following format: * <first>:<step type><step>:<last> * * Step type must be one of the following: +, -, *, /. */ static int clo_parse_range(struct benchmark_clo *clo, const char *arg, clo_parse_single_fn parse_single, clo_eval_range_fn eval_range, struct clo_vec_vlist *vlist) { auto *str_first = (char *)malloc(strlen(arg) + 1); assert(str_first != nullptr); auto *str_step = (char *)malloc(strlen(arg) + 1); assert(str_step != nullptr); char step_type = '\0'; auto *str_last = (char *)malloc(strlen(arg) + 1); assert(str_last != nullptr); int ret = sscanf(arg, "%[^:]:%c%[^:]:%[^:]", str_first, &step_type, str_step, str_last); if (ret == 1) { /* single value */ uint64_t value; if (parse_single(clo, arg, &value)) { ret = -1; } else { if (clo->type == CLO_TYPE_UINT) clo_vec_vlist_add(vlist, &value, clo->type_uint.size); else clo_vec_vlist_add(vlist, &value, clo->type_int.size); ret = 0; } } else if (ret == 4) { /* range */ uint64_t first = 0; uint64_t last = 0; uint64_t step = 0; if (parse_single(clo, str_first, &first)) { ret = -1; goto out; } char *end; errno = 0; step = strtoull(str_step, &end, 10); if (errno || !end || *end != '\0') { ret = -1; goto out; } if (parse_single(clo, str_last, &last)) { ret = -1; goto out; } if (clo_check_range_params(step, step_type)) { ret = -1; goto out; } /* evaluate the range */ if (eval_range(clo, &first, &step, &last, step_type, vlist)) { ret = -1; goto out; } ret = 0; } else { ret = -1; } out: free(str_first); free(str_step); free(str_last); return ret; } /* * clo_parse_ranges -- (internal) parse ranges/values separated by commas */ static int clo_parse_ranges(struct benchmark_clo *clo, const char *arg, struct clo_vec *clovec, clo_parse_single_fn parse_single, clo_eval_range_fn eval_range) { struct clo_vec_vlist *vlist = clo_vec_vlist_alloc(); assert(vlist != nullptr); int ret = 0; char *args = strdup(arg); assert(args != nullptr); char *curr = args; char *next; /* iterate through all values separated by comma */ while ((next = strchr(curr, ',')) != nullptr) { *next = '\0'; next++; /* parse each comma separated value as range or single value */ if ((ret = clo_parse_range(clo, curr, parse_single, eval_range, vlist))) goto out; curr = next; } /* parse each comma separated value as range or single value */ if ((ret = clo_parse_range(clo, curr, parse_single, eval_range, vlist))) goto out; /* add list of values to CLO vector */ if (clo->type == CLO_TYPE_UINT) ret = clo_vec_memcpy_list(clovec, clo->off, clo->type_uint.size, vlist); else ret = clo_vec_memcpy_list(clovec, clo->off, clo->type_int.size, vlist); out: free(args); clo_vec_vlist_free(vlist); return ret; } /* * clo_parse_int -- (internal) parse int value */ static int clo_parse_int(struct benchmark_clo *clo, const char *arg, struct clo_vec *clovec) { return clo_parse_ranges(clo, arg, clovec, clo_parse_single_int, clo_eval_range_int); } /* * clo_parse_uint -- (internal) parse uint value */ static int clo_parse_uint(struct benchmark_clo *clo, const char *arg, struct clo_vec *clovec) { return clo_parse_ranges(clo, arg, clovec, clo_parse_single_uint, clo_eval_range_uint); } /* * clo_str_flag -- (internal) convert flag value to string */ static const char * clo_str_flag(struct benchmark_clo *clo, void *addr, size_t size) { if (clo->off + sizeof(bool) > size) return nullptr; bool flag = *(bool *)((char *)addr + clo->off); return flag ? "true" : "false"; } /* * clo_str_str -- (internal) convert str value to string */ static const char * clo_str_str(struct benchmark_clo *clo, void *addr, size_t size) { if (clo->off + sizeof(char *) > size) return nullptr; return *(char **)((char *)addr + clo->off); } /* * clo_str_int -- (internal) convert int value to string */ static const char * clo_str_int(struct benchmark_clo *clo, void *addr, size_t size) { if (clo->off + clo->type_int.size > size) return nullptr; void *val = (char *)addr + clo->off; int ret = 0; switch (clo->type_int.size) { case 1: ret = util_snprintf(str_buff, STR_BUFF_SIZE, "%" PRId8, *(int8_t *)val); break; case 2: ret = util_snprintf(str_buff, STR_BUFF_SIZE, "%" PRId16, *(int16_t *)val); break; case 4: ret = util_snprintf(str_buff, STR_BUFF_SIZE, "%" PRId32, *(int32_t *)val); break; case 8: ret = util_snprintf(str_buff, STR_BUFF_SIZE, "%" PRId64, *(int64_t *)val); break; default: return nullptr; } if (ret < 0) return nullptr; return str_buff; } /* * clo_str_uint -- (internal) convert uint value to string */ static const char * clo_str_uint(struct benchmark_clo *clo, void *addr, size_t size) { if (clo->off + clo->type_uint.size > size) return nullptr; void *val = (char *)addr + clo->off; int ret = 0; switch (clo->type_uint.size) { case 1: ret = util_snprintf(str_buff, STR_BUFF_SIZE, "%" PRIu8, *(uint8_t *)val); break; case 2: ret = util_snprintf(str_buff, STR_BUFF_SIZE, "%" PRIu16, *(uint16_t *)val); break; case 4: ret = util_snprintf(str_buff, STR_BUFF_SIZE, "%" PRIu32, *(uint32_t *)val); break; case 8: ret = util_snprintf(str_buff, STR_BUFF_SIZE, "%" PRIu64, *(uint64_t *)val); break; default: return nullptr; } if (ret < 0) return nullptr; return str_buff; } /* * clo_parse -- (internal) array with functions for parsing CLOs */ static clo_parse_fn clo_parse[CLO_TYPE_MAX] = { /* [CLO_TYPE_FLAG] = */ clo_parse_flag, /* [CLO_TYPE_STR] = */ clo_parse_str, /* [CLO_TYPE_INT] = */ clo_parse_int, /* [CLO_TYPE_UINT] = */ clo_parse_uint, }; /* * clo_str -- (internal) array with functions for converting to string */ static clo_str_fn clo_str[CLO_TYPE_MAX] = { /* [CLO_TYPE_FLAG] = */ clo_str_flag, /* [CLO_TYPE_STR] = */ clo_str_str, /* [CLO_TYPE_INT] = */ clo_str_int, /* [CLO_TYPE_UINT] = */ clo_str_uint, }; /* * clo_get_by_short -- (internal) return CLO with specified short opt */ static struct benchmark_clo * clo_get_by_short(struct benchmark_clo *clos, size_t nclo, char opt_short) { size_t i; for (i = 0; i < nclo; i++) { if (clos[i].opt_short == opt_short) return &clos[i]; } return nullptr; } /* * clo_get_by_long -- (internal) return CLO with specified long opt */ static struct benchmark_clo * clo_get_by_long(struct benchmark_clo *clos, size_t nclo, const char *opt_long) { size_t i; for (i = 0; i < nclo; i++) { if (strcmp(clos[i].opt_long, opt_long) == 0) return &clos[i]; } return nullptr; } /* * clo_get_optstr -- (internal) returns option string from CLOs * * This function returns option string which contains all short * options from CLO structure. * The returned value must be freed by caller. */ static char * clo_get_optstr(struct benchmark_clo *clos, size_t nclo) { size_t i; char *optstr; char *ptr; /* * In worst case every option requires an argument * so we need space for ':' character + terminating * NULL. */ size_t optstrlen = nclo * 2 + 1; optstr = (char *)calloc(1, optstrlen); assert(optstr != nullptr); ptr = optstr; for (i = 0; i < nclo; i++) { if (clos[i].opt_short) { *(ptr++) = clos[i].opt_short; if (clos[i].type != CLO_TYPE_FLAG) *(ptr++) = ':'; } } return optstr; } /* * clo_get_long_options -- (internal) allocate long options structure * * This function allocates structure for long options and fills all * entries according to values from becnhmark_clo. This is essentially * conversion from struct benchmark_clo to struct option. * The returned value must be freed by caller. */ static struct option * clo_get_long_options(struct benchmark_clo *clos, size_t nclo) { size_t i; struct option *options; options = (struct option *)calloc(nclo + 1, sizeof(struct option)); assert(options != nullptr); for (i = 0; i < nclo; i++) { options[i].name = clos[i].opt_long; options[i].val = clos[i].opt_short; /* no optional arguments */ if (clos[i].type == CLO_TYPE_FLAG) { options[i].has_arg = no_argument; } else { options[i].has_arg = required_argument; } } return options; } /* * clo_set_defaults -- (internal) set default values * * Default values are stored as strings in CLO * structure so this function parses default values in * the same manner as values passed by user. Returns -1 * if argument was not passed by user and default value * is missing. */ static int clo_set_defaults(struct benchmark_clo *clos, size_t nclo, struct clo_vec *clovec) { size_t i; for (i = 0; i < nclo; i++) { if (clos[i].used) continue; /* * If option was not used and default value * is not specified, return error. Otherwise * parse the default value in the same way as * values passed by user. Except for the flag. * If the flag default value was not specified * assign "false" value to it. */ if (clos[i].def) { if (clo_parse[clos[i].type](&clos[i], clos[i].def, clovec)) return -1; } else if (clos[i].type == CLO_TYPE_FLAG) { if (clo_parse[clos[i].type](&clos[i], "false", clovec)) return -1; } else { printf("'%s' is required option\n", clos[i].opt_long); return -1; } } return 0; } /* * benchmark_clo_parse -- parse CLOs and store values in desired structure * * This function parses command line arguments according to information * from CLOs structure. The parsed values are stored in CLO vector * pointed by clovec. If any of command line options are not passed by user, * the default value is stored if exists. Otherwise it means the argument is * required and error is returned. * * - argc - number of command line options passed by user * - argv - command line options passed by user * - clos - description of available command line options * - nclos - number of available command line options * - clovec - vector of arguments */ int benchmark_clo_parse(int argc, char *argv[], struct benchmark_clo *clos, ssize_t nclos, struct clo_vec *clovec) { char *optstr; struct option *options; int ret = 0; int opt; int optindex; /* convert CLOs to option string and long options structure */ optstr = clo_get_optstr(clos, nclos); options = clo_get_long_options(clos, nclos); /* parse CLOs as long and/or short options */ while ((opt = getopt_long(argc, argv, optstr, options, &optindex)) != -1) { struct benchmark_clo *clo = nullptr; if (opt) { clo = clo_get_by_short(clos, nclos, opt); } else { assert(optindex < nclos); clo = &clos[optindex]; } if (!clo) { ret = -1; goto out; } /* invoke parser according to type of CLO */ assert(clo->type < CLO_TYPE_MAX); ret = clo_parse[clo->type](clo, optarg, clovec); if (ret) goto out; /* mark CLO as used */ clo->used = optarg != nullptr || clo->type == CLO_TYPE_FLAG; } if (optind < argc) { fprintf(stderr, "Unknown option: %s\n", argv[optind]); ret = -1; goto out; } /* parse unused CLOs with default values */ ret = clo_set_defaults(clos, nclos, clovec); out: free(options); free(optstr); if (ret) errno = EINVAL; return ret; } /* * benchmark_clo_parse_scenario -- parse CLOs from scenario * * This function parses command line arguments according to information * from CLOs structure. The parsed values are stored in CLO vector * pointed by clovec. If any of command line options are not passed by user, * the default value is stored if exists. Otherwise it means the argument is * required and error is returned. * * - scenario - scenario with key value arguments * - clos - description of available command line options * - nclos - number of available command line options * - clovec - vector of arguments */ int benchmark_clo_parse_scenario(struct scenario *scenario, struct benchmark_clo *clos, size_t nclos, struct clo_vec *clovec) { struct kv *kv; FOREACH_KV(kv, scenario) { struct benchmark_clo *clo = clo_get_by_long(clos, nclos, kv->key); if (!clo) { fprintf(stderr, "unrecognized option -- '%s'\n", kv->key); return -1; } assert(clo->type < CLO_TYPE_MAX); if (clo_parse[clo->type](clo, kv->value, clovec)) { fprintf(stderr, "parsing option -- '%s' failed\n", kv->value); return -1; } /* mark CLO as used */ clo->used = 1; } return clo_set_defaults(clos, nclos, clovec); } /* * benchmark_override_clos_in_scenario - parse the command line arguments and * override/add the parameters in/to the scenario by replacing/adding the kv * struct in/to the scenario. * * - scenario - scenario with key value arguments * - argc - command line arguments number * - argv - command line arguments vector * - clos - description of available command line options * - nclos - number of available command line options */ int benchmark_override_clos_in_scenario(struct scenario *scenario, int argc, char *argv[], struct benchmark_clo *clos, int nclos) { char *optstr; struct option *options; int ret = 0; int opt; int optindex; const char *true_str = "true"; /* convert CLOs to option string and long options structure */ optstr = clo_get_optstr(clos, nclos); options = clo_get_long_options(clos, nclos); /* parse CLOs as long and/or short options */ while ((opt = getopt_long(argc, argv, optstr, options, &optindex)) != -1) { struct benchmark_clo *clo = nullptr; if (opt) { clo = clo_get_by_short(clos, nclos, opt); } else { assert(optindex < nclos); clo = &clos[optindex]; } if (!clo) { ret = -1; goto out; } /* Check if the given clo is defined in the scenario */ struct kv *kv = find_kv_in_scenario(clo->opt_long, scenario); if (kv) { /* replace the value in the scenario */ if (optarg != nullptr && clo->type != CLO_TYPE_FLAG) { free(kv->value); kv->value = strdup(optarg); } else if (optarg == nullptr && clo->type == CLO_TYPE_FLAG) { free(kv->value); kv->value = strdup(true_str); } else { ret = -1; goto out; } } else { /* add a new param to the scenario */ if (optarg != nullptr && clo->type != CLO_TYPE_FLAG) { kv = kv_alloc(clo->opt_long, optarg); PMDK_TAILQ_INSERT_TAIL(&scenario->head, kv, next); } else if (optarg == nullptr && clo->type == CLO_TYPE_FLAG) { kv = kv_alloc(clo->opt_long, true_str); PMDK_TAILQ_INSERT_TAIL(&scenario->head, kv, next); } else { ret = -1; goto out; } } } if (optind < argc) { fprintf(stderr, "Unknown option: %s\n", argv[optind]); ret = -1; goto out; } out: free(options); free(optstr); if (ret) errno = EINVAL; return ret; } /* * benchmark_clo_str -- converts command line option to string * * According to command line option type and parameters, converts * the value from structure pointed by args of size size. */ const char * benchmark_clo_str(struct benchmark_clo *clo, void *args, size_t size) { assert(clo->type < CLO_TYPE_MAX); return clo_str[clo->type](clo, args, size); } /* * clo_get_scenarios - search the command line arguments for scenarios listed in * available_scenarios and put them in found_scenarios. Returns the number of * found scenarios in the cmd line or -1 on error. The passed cmd line * args should contain the scenario name(s) as the first argument(s) - starting * from index 0 */ int clo_get_scenarios(int argc, char *argv[], struct scenarios *available_scenarios, struct scenarios *found_scenarios) { assert(argv != nullptr); assert(available_scenarios != nullptr); assert(found_scenarios != nullptr); if (argc <= 0) { fprintf(stderr, "clo get scenarios, argc invalid value: %d\n", argc); return -1; } int tmp_argc = argc; char **tmp_argv = argv; do { struct scenario *scenario = scenarios_get_scenario(available_scenarios, *tmp_argv); if (!scenario) { fprintf(stderr, "unknown scenario: %s\n", *tmp_argv); return -1; } struct scenario *new_scenario = clone_scenario(scenario); assert(new_scenario != nullptr); PMDK_TAILQ_INSERT_TAIL(&found_scenarios->head, new_scenario, next); tmp_argc--; tmp_argv++; } while (tmp_argc && contains_scenarios(tmp_argc, tmp_argv, available_scenarios)); return argc - tmp_argc; }
23,174
21.609756
80
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/poolset_util.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018, Intel Corporation */ #include <cassert> #include <fcntl.h> #include <file.h> #include "os.h" #include "poolset_util.hpp" #include "set.h" #define PART_TEMPLATE "part." #define POOL_PART_SIZE (1UL << 30) /* * dynamic_poolset_clear -- clears header in first part if it exists */ static int dynamic_poolset_clear(const char *dir) { char path[PATH_MAX]; int count = snprintf(path, sizeof(path), "%s" OS_DIR_SEP_STR PART_TEMPLATE "0", dir); assert(count > 0); if ((size_t)count >= sizeof(path)) { fprintf(stderr, "path to a poolset part too long\n"); return -1; } int exists = util_file_exists(path); if (exists < 0) return -1; if (!exists) return 0; return util_file_zero(path, 0, POOL_HDR_SIZE); } /* * dynamic_poolset_create -- clear pool's header and create new poolset */ int dynamic_poolset_create(const char *path, size_t size) { /* buffer for part's path and size */ char buff[PATH_MAX + 20]; int ret; int fd; int count; int curr_part = 0; ret = dynamic_poolset_clear(path); if (ret == -1) return -1; fd = os_open(POOLSET_PATH, O_RDWR | O_CREAT, 0644); if (fd == -1) { perror("open"); return -1; } char header[] = "PMEMPOOLSET\nOPTION SINGLEHDR\n"; ret = util_write_all(fd, header, sizeof(header) - 1); if (ret == -1) goto err; while (curr_part * POOL_PART_SIZE < size + POOL_HDR_SIZE) { count = snprintf(buff, sizeof(buff), "%lu %s" OS_DIR_SEP_STR PART_TEMPLATE "%d\n", POOL_PART_SIZE, path, curr_part); assert(count > 0); if ((size_t)count >= sizeof(buff)) { fprintf(stderr, "path to a poolset part too long\n"); goto err; } ret = util_write_all(fd, buff, count); if (ret == -1) goto err; curr_part++; } close(fd); return 0; err: close(fd); return -1; }
1,827
18.446809
71
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/benchmark_time.hpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * benchmark_time.hpp -- declarations of benchmark_time module */ #include <ctime> typedef struct timespec benchmark_time_t; void benchmark_time_get(benchmark_time_t *time); void benchmark_time_diff(benchmark_time_t *d, benchmark_time_t *t1, benchmark_time_t *t2); double benchmark_time_get_secs(benchmark_time_t *t); unsigned long long benchmark_time_get_nsecs(benchmark_time_t *t); int benchmark_time_compare(const benchmark_time_t *t1, const benchmark_time_t *t2); void benchmark_time_set(benchmark_time_t *time, unsigned long long nsecs); unsigned long long benchmark_get_avg_get_time(void);
698
35.789474
74
hpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/blk.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * blk.cpp -- pmemblk benchmarks definitions */ #include "benchmark.hpp" #include "file.h" #include "libpmem.h" #include "libpmemblk.h" #include "libpmempool.h" #include "os.h" #include "poolset_util.hpp" #include "rand.h" #include <cassert> #include <cerrno> #include <cstdint> #include <cstdio> #include <cstdlib> #include <cstring> #include <fcntl.h> #include <unistd.h> struct blk_bench; struct blk_worker; /* * op_type -- type of operation */ enum op_type { OP_TYPE_UNKNOWN, OP_TYPE_BLK, OP_TYPE_FILE, OP_TYPE_MEMCPY, }; /* * op_mode -- mode of the copy process */ enum op_mode { OP_MODE_UNKNOWN, OP_MODE_STAT, /* read/write always the same chunk */ OP_MODE_SEQ, /* read/write chunk by chunk */ OP_MODE_RAND /* read/write to chunks selected randomly */ }; /* * typedef for the worker function */ typedef int (*worker_fn)(struct blk_bench *, struct benchmark_args *, struct blk_worker *, os_off_t); /* * blk_args -- benchmark specific arguments */ struct blk_args { size_t fsize; /* requested file size */ bool no_warmup; /* don't do warmup */ unsigned seed; /* seed for randomization */ char *type_str; /* type: blk, file, memcpy */ char *mode_str; /* mode: stat, seq, rand */ }; /* * blk_bench -- pmemblk benchmark context */ struct blk_bench { PMEMblkpool *pbp; /* pmemblk handle */ char *addr; /* address of user data (memcpy) */ int fd; /* file descr. for file io */ size_t nblocks; /* actual number of blocks */ size_t blocks_per_thread; /* number of blocks per thread */ worker_fn worker; /* worker function */ enum op_type type; enum op_mode mode; }; /* * struct blk_worker -- pmemblk worker context */ struct blk_worker { os_off_t *blocks; /* array with block numbers */ char *buff; /* buffer for read/write */ rng_t rng; /* worker RNG state */ }; /* * parse_op_type -- parse command line "--operation" argument * * Returns proper operation type. */ static enum op_type parse_op_type(const char *arg) { if (strcmp(arg, "blk") == 0) return OP_TYPE_BLK; else if (strcmp(arg, "file") == 0) return OP_TYPE_FILE; else if (strcmp(arg, "memcpy") == 0) return OP_TYPE_MEMCPY; else return OP_TYPE_UNKNOWN; } /* * parse_op_mode -- parse command line "--mode" argument * * Returns proper operation mode. */ static enum op_mode parse_op_mode(const char *arg) { if (strcmp(arg, "stat") == 0) return OP_MODE_STAT; else if (strcmp(arg, "seq") == 0) return OP_MODE_SEQ; else if (strcmp(arg, "rand") == 0) return OP_MODE_RAND; else return OP_MODE_UNKNOWN; } /* * blk_do_warmup -- perform warm-up by writing to each block */ static int blk_do_warmup(struct blk_bench *bb, struct benchmark_args *args) { size_t lba; int ret = 0; auto *buff = (char *)calloc(1, args->dsize); if (!buff) { perror("calloc"); return -1; } for (lba = 0; lba < bb->nblocks; ++lba) { switch (bb->type) { case OP_TYPE_FILE: { size_t off = lba * args->dsize; if (pwrite(bb->fd, buff, args->dsize, off) != (ssize_t)args->dsize) { perror("pwrite"); ret = -1; goto out; } } break; case OP_TYPE_BLK: if (pmemblk_write(bb->pbp, buff, lba) < 0) { perror("pmemblk_write"); ret = -1; goto out; } break; case OP_TYPE_MEMCPY: { size_t off = lba * args->dsize; pmem_memcpy_persist((char *)bb->addr + off, buff, args->dsize); } break; default: perror("unknown type"); ret = -1; goto out; } } out: free(buff); return ret; } /* * blk_read -- read function for pmemblk */ static int blk_read(struct blk_bench *bb, struct benchmark_args *ba, struct blk_worker *bworker, os_off_t off) { if (pmemblk_read(bb->pbp, bworker->buff, off) < 0) { perror("pmemblk_read"); return -1; } return 0; } /* * fileio_read -- read function for file io */ static int fileio_read(struct blk_bench *bb, struct benchmark_args *ba, struct blk_worker *bworker, os_off_t off) { os_off_t file_off = off * ba->dsize; if (pread(bb->fd, bworker->buff, ba->dsize, file_off) != (ssize_t)ba->dsize) { perror("pread"); return -1; } return 0; } /* * memcpy_read -- read function for memcpy */ static int memcpy_read(struct blk_bench *bb, struct benchmark_args *ba, struct blk_worker *bworker, os_off_t off) { os_off_t file_off = off * ba->dsize; memcpy(bworker->buff, (char *)bb->addr + file_off, ba->dsize); return 0; } /* * blk_write -- write function for pmemblk */ static int blk_write(struct blk_bench *bb, struct benchmark_args *ba, struct blk_worker *bworker, os_off_t off) { if (pmemblk_write(bb->pbp, bworker->buff, off) < 0) { perror("pmemblk_write"); return -1; } return 0; } /* * memcpy_write -- write function for memcpy */ static int memcpy_write(struct blk_bench *bb, struct benchmark_args *ba, struct blk_worker *bworker, os_off_t off) { os_off_t file_off = off * ba->dsize; pmem_memcpy_persist((char *)bb->addr + file_off, bworker->buff, ba->dsize); return 0; } /* * fileio_write -- write function for file io */ static int fileio_write(struct blk_bench *bb, struct benchmark_args *ba, struct blk_worker *bworker, os_off_t off) { os_off_t file_off = off * ba->dsize; if (pwrite(bb->fd, bworker->buff, ba->dsize, file_off) != (ssize_t)ba->dsize) { perror("pwrite"); return -1; } return 0; } /* * blk_operation -- main operations for blk_read and blk_write benchmark */ static int blk_operation(struct benchmark *bench, struct operation_info *info) { auto *bb = (struct blk_bench *)pmembench_get_priv(bench); auto *bworker = (struct blk_worker *)info->worker->priv; os_off_t off = bworker->blocks[info->index]; return bb->worker(bb, info->args, bworker, off); } /* * blk_init_worker -- initialize worker */ static int blk_init_worker(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { struct blk_worker *bworker = (struct blk_worker *)malloc(sizeof(*bworker)); if (!bworker) { perror("malloc"); return -1; } auto *bb = (struct blk_bench *)pmembench_get_priv(bench); auto *bargs = (struct blk_args *)args->opts; randomize_r(&bworker->rng, bargs->seed); bworker->buff = (char *)malloc(args->dsize); if (!bworker->buff) { perror("malloc"); goto err_buff; } /* fill buffer with some random data */ memset(bworker->buff, (char)rnd64_r(&bworker->rng), args->dsize); assert(args->n_ops_per_thread != 0); bworker->blocks = (os_off_t *)malloc(sizeof(*bworker->blocks) * args->n_ops_per_thread); if (!bworker->blocks) { perror("malloc"); goto err_blocks; } switch (bb->mode) { case OP_MODE_RAND: for (size_t i = 0; i < args->n_ops_per_thread; i++) { bworker->blocks[i] = worker->index * bb->blocks_per_thread + rnd64_r(&bworker->rng) % bb->blocks_per_thread; } break; case OP_MODE_SEQ: for (size_t i = 0; i < args->n_ops_per_thread; i++) bworker->blocks[i] = i % bb->blocks_per_thread; break; case OP_MODE_STAT: for (size_t i = 0; i < args->n_ops_per_thread; i++) bworker->blocks[i] = 0; break; default: perror("unknown mode"); goto err_mode; } worker->priv = bworker; return 0; err_mode: free(bworker->blocks); err_blocks: free(bworker->buff); err_buff: free(bworker); return -1; } /* * blk_free_worker -- cleanup worker */ static void blk_free_worker(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { auto *bworker = (struct blk_worker *)worker->priv; free(bworker->blocks); free(bworker->buff); free(bworker); } /* * blk_init -- function for initialization benchmark */ static int blk_init(struct blk_bench *bb, struct benchmark_args *args) { auto *ba = (struct blk_args *)args->opts; assert(ba != nullptr); char path[PATH_MAX]; if (util_safe_strcpy(path, args->fname, sizeof(path)) != 0) return -1; bb->type = parse_op_type(ba->type_str); if (bb->type == OP_TYPE_UNKNOWN) { fprintf(stderr, "Invalid operation argument '%s'", ba->type_str); return -1; } enum file_type type = util_file_get_type(args->fname); if (type == OTHER_ERROR) { fprintf(stderr, "could not check type of file %s\n", args->fname); return -1; } if (bb->type == OP_TYPE_FILE && type == TYPE_DEVDAX) { fprintf(stderr, "fileio not supported on device dax\n"); return -1; } bb->mode = parse_op_mode(ba->mode_str); if (bb->mode == OP_MODE_UNKNOWN) { fprintf(stderr, "Invalid mode argument '%s'", ba->mode_str); return -1; } if (ba->fsize == 0) ba->fsize = PMEMBLK_MIN_POOL; size_t req_fsize = ba->fsize; if (ba->fsize / args->dsize < args->n_threads || ba->fsize < PMEMBLK_MIN_POOL) { fprintf(stderr, "too small file size\n"); return -1; } if (args->dsize >= ba->fsize) { fprintf(stderr, "block size bigger than file size\n"); return -1; } if (args->is_poolset || type == TYPE_DEVDAX) { if (args->fsize < ba->fsize) { fprintf(stderr, "file size too large\n"); return -1; } ba->fsize = 0; } else if (args->is_dynamic_poolset) { int ret = dynamic_poolset_create(args->fname, ba->fsize); if (ret == -1) return -1; if (util_safe_strcpy(path, POOLSET_PATH, sizeof(path)) != 0) return -1; ba->fsize = 0; } bb->fd = -1; /* * Create pmemblk in order to get the number of blocks * even for file-io mode. */ bb->pbp = pmemblk_create(path, args->dsize, ba->fsize, args->fmode); if (bb->pbp == nullptr) { perror("pmemblk_create"); return -1; } bb->nblocks = pmemblk_nblock(bb->pbp); /* limit the number of used blocks */ if (bb->nblocks > req_fsize / args->dsize) bb->nblocks = req_fsize / args->dsize; if (bb->nblocks < args->n_threads) { fprintf(stderr, "too small file size"); goto out_close; } if (bb->type == OP_TYPE_FILE) { pmemblk_close(bb->pbp); bb->pbp = nullptr; int flags = O_RDWR | O_CREAT | O_SYNC; #ifdef _WIN32 flags |= O_BINARY; #endif bb->fd = os_open(args->fname, flags, args->fmode); if (bb->fd < 0) { perror("open"); return -1; } } else if (bb->type == OP_TYPE_MEMCPY) { /* skip pool header, so addr points to the first block */ bb->addr = (char *)bb->pbp + 8192; } bb->blocks_per_thread = bb->nblocks / args->n_threads; if (!ba->no_warmup) { if (blk_do_warmup(bb, args) != 0) goto out_close; } return 0; out_close: if (bb->type == OP_TYPE_FILE) os_close(bb->fd); else pmemblk_close(bb->pbp); return -1; } /* * blk_read_init - function for initializing blk_read benchmark */ static int blk_read_init(struct benchmark *bench, struct benchmark_args *args) { assert(bench != nullptr); assert(args != nullptr); int ret; auto *bb = (struct blk_bench *)malloc(sizeof(struct blk_bench)); if (bb == nullptr) { perror("malloc"); return -1; } pmembench_set_priv(bench, bb); ret = blk_init(bb, args); if (ret != 0) { free(bb); return ret; } switch (bb->type) { case OP_TYPE_FILE: bb->worker = fileio_read; break; case OP_TYPE_BLK: bb->worker = blk_read; break; case OP_TYPE_MEMCPY: bb->worker = memcpy_read; break; default: perror("unknown operation type"); return -1; } return ret; } /* * blk_write_init - function for initializing blk_write benchmark */ static int blk_write_init(struct benchmark *bench, struct benchmark_args *args) { assert(bench != nullptr); assert(args != nullptr); int ret; auto *bb = (struct blk_bench *)malloc(sizeof(struct blk_bench)); if (bb == nullptr) { perror("malloc"); return -1; } pmembench_set_priv(bench, bb); ret = blk_init(bb, args); if (ret != 0) { free(bb); return ret; } switch (bb->type) { case OP_TYPE_FILE: bb->worker = fileio_write; break; case OP_TYPE_BLK: bb->worker = blk_write; break; case OP_TYPE_MEMCPY: bb->worker = memcpy_write; break; default: perror("unknown operation type"); return -1; } return ret; } /* * blk_exit -- function for de-initialization benchmark */ static int blk_exit(struct benchmark *bench, struct benchmark_args *args) { auto *bb = (struct blk_bench *)pmembench_get_priv(bench); char path[PATH_MAX]; if (util_safe_strcpy(path, args->fname, sizeof(path)) != 0) return -1; if (args->is_dynamic_poolset) { if (util_safe_strcpy(path, POOLSET_PATH, sizeof(path)) != 0) return -1; } int result; switch (bb->type) { case OP_TYPE_FILE: os_close(bb->fd); break; case OP_TYPE_BLK: pmemblk_close(bb->pbp); result = pmemblk_check(path, args->dsize); if (result < 0) { perror("pmemblk_check error"); return -1; } else if (result == 0) { perror("pmemblk_check: not consistent"); return -1; } break; case OP_TYPE_MEMCPY: pmemblk_close(bb->pbp); break; default: perror("unknown operation type"); return -1; } free(bb); return 0; } static struct benchmark_clo blk_clo[5]; static struct benchmark_info blk_read_info; static struct benchmark_info blk_write_info; CONSTRUCTOR(blk_constructor) void blk_constructor(void) { blk_clo[0].opt_short = 'o'; blk_clo[0].opt_long = "operation"; blk_clo[0].descr = "Operation type - blk, file, memcpy"; blk_clo[0].type = CLO_TYPE_STR; blk_clo[0].off = clo_field_offset(struct blk_args, type_str); blk_clo[0].def = "blk"; blk_clo[1].opt_short = 'w'; blk_clo[1].opt_long = "no-warmup"; blk_clo[1].descr = "Don't do warmup"; blk_clo[1].type = CLO_TYPE_FLAG; blk_clo[1].off = clo_field_offset(struct blk_args, no_warmup); blk_clo[2].opt_short = 'm'; blk_clo[2].opt_long = "mode"; blk_clo[2].descr = "Reading/writing mode - stat, seq, rand"; blk_clo[2].type = CLO_TYPE_STR; blk_clo[2].off = clo_field_offset(struct blk_args, mode_str); blk_clo[2].def = "seq"; blk_clo[3].opt_short = 'S'; blk_clo[3].opt_long = "seed"; blk_clo[3].descr = "Random seed"; blk_clo[3].off = clo_field_offset(struct blk_args, seed); blk_clo[3].def = "1"; blk_clo[3].type = CLO_TYPE_UINT; blk_clo[3].type_uint.size = clo_field_size(struct blk_args, seed); blk_clo[3].type_uint.base = CLO_INT_BASE_DEC; blk_clo[3].type_uint.min = 1; blk_clo[3].type_uint.max = UINT_MAX; blk_clo[4].opt_short = 's'; blk_clo[4].opt_long = "file-size"; blk_clo[4].descr = "Requested file size in bytes - 0 means minimum"; blk_clo[4].type = CLO_TYPE_UINT; blk_clo[4].off = clo_field_offset(struct blk_args, fsize); blk_clo[4].def = "0"; blk_clo[4].type_uint.size = clo_field_size(struct blk_args, fsize); blk_clo[4].type_uint.base = CLO_INT_BASE_DEC; blk_clo[4].type_uint.min = 0; blk_clo[4].type_uint.max = ~0; blk_read_info.name = "blk_read"; blk_read_info.brief = "Benchmark for blk_read() operation"; blk_read_info.init = blk_read_init; blk_read_info.exit = blk_exit; blk_read_info.multithread = true; blk_read_info.multiops = true; blk_read_info.init_worker = blk_init_worker; blk_read_info.free_worker = blk_free_worker; blk_read_info.operation = blk_operation; blk_read_info.clos = blk_clo; blk_read_info.nclos = ARRAY_SIZE(blk_clo); blk_read_info.opts_size = sizeof(struct blk_args); blk_read_info.rm_file = true; blk_read_info.allow_poolset = true; REGISTER_BENCHMARK(blk_read_info); blk_write_info.name = "blk_write"; blk_write_info.brief = "Benchmark for blk_write() operation"; blk_write_info.init = blk_write_init; blk_write_info.exit = blk_exit; blk_write_info.multithread = true; blk_write_info.multiops = true; blk_write_info.init_worker = blk_init_worker; blk_write_info.free_worker = blk_free_worker; blk_write_info.operation = blk_operation; blk_write_info.clos = blk_clo; blk_write_info.nclos = ARRAY_SIZE(blk_clo); blk_write_info.opts_size = sizeof(struct blk_args); blk_write_info.rm_file = true; blk_write_info.allow_poolset = true; REGISTER_BENCHMARK(blk_write_info); }
15,825
21.673352
72
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/benchmark_worker.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * benchmark_worker.cpp -- benchmark_worker module definitions */ #include <cassert> #include <err.h> #include "benchmark_worker.hpp" #include "sys_util.h" /* * worker_state_wait_for_transition -- wait for transition from and to * specified states */ static void worker_state_wait_for_transition(struct benchmark_worker *worker, enum benchmark_worker_state state, enum benchmark_worker_state new_state) { while (worker->state == state) os_cond_wait(&worker->cond, &worker->lock); assert(worker->state == new_state); } /* * worker_state_transition -- change worker state from and to specified states */ static void worker_state_transition(struct benchmark_worker *worker, enum benchmark_worker_state old_state, enum benchmark_worker_state new_state) { assert(worker->state == old_state); worker->state = new_state; os_cond_signal(&worker->cond); } /* * thread_func -- (internal) callback for os_thread */ static void * thread_func(void *arg) { assert(arg != nullptr); auto *worker = (struct benchmark_worker *)arg; util_mutex_lock(&worker->lock); worker_state_wait_for_transition(worker, WORKER_STATE_IDLE, WORKER_STATE_INIT); if (worker->init) worker->ret_init = worker->init(worker->bench, worker->args, &worker->info); worker_state_transition(worker, WORKER_STATE_INIT, WORKER_STATE_INITIALIZED); if (worker->ret_init) { util_mutex_unlock(&worker->lock); return nullptr; } worker_state_wait_for_transition(worker, WORKER_STATE_INITIALIZED, WORKER_STATE_RUN); worker->ret = worker->func(worker->bench, &worker->info); worker_state_transition(worker, WORKER_STATE_RUN, WORKER_STATE_END); worker_state_wait_for_transition(worker, WORKER_STATE_END, WORKER_STATE_EXIT); if (worker->exit) worker->exit(worker->bench, worker->args, &worker->info); worker_state_transition(worker, WORKER_STATE_EXIT, WORKER_STATE_DONE); util_mutex_unlock(&worker->lock); return nullptr; } /* * benchmark_worker_alloc -- allocate benchmark worker */ struct benchmark_worker * benchmark_worker_alloc(void) { struct benchmark_worker *w = (struct benchmark_worker *)calloc(1, sizeof(*w)); if (!w) return nullptr; util_mutex_init(&w->lock); if (os_cond_init(&w->cond)) goto err_destroy_mutex; if (os_thread_create(&w->thread, nullptr, thread_func, w)) goto err_destroy_cond; return w; err_destroy_cond: os_cond_destroy(&w->cond); err_destroy_mutex: util_mutex_destroy(&w->lock); free(w); return nullptr; } /* * benchmark_worker_free -- release benchmark worker */ void benchmark_worker_free(struct benchmark_worker *w) { os_thread_join(&w->thread, nullptr); os_cond_destroy(&w->cond); util_mutex_destroy(&w->lock); free(w); } /* * benchmark_worker_init -- call init function for worker */ int benchmark_worker_init(struct benchmark_worker *worker) { util_mutex_lock(&worker->lock); worker_state_transition(worker, WORKER_STATE_IDLE, WORKER_STATE_INIT); worker_state_wait_for_transition(worker, WORKER_STATE_INIT, WORKER_STATE_INITIALIZED); int ret = worker->ret_init; util_mutex_unlock(&worker->lock); return ret; } /* * benchmark_worker_exit -- call exit function for worker */ void benchmark_worker_exit(struct benchmark_worker *worker) { util_mutex_lock(&worker->lock); worker_state_transition(worker, WORKER_STATE_END, WORKER_STATE_EXIT); worker_state_wait_for_transition(worker, WORKER_STATE_EXIT, WORKER_STATE_DONE); util_mutex_unlock(&worker->lock); } /* * benchmark_worker_run -- run benchmark worker */ int benchmark_worker_run(struct benchmark_worker *worker) { int ret = 0; util_mutex_lock(&worker->lock); worker_state_transition(worker, WORKER_STATE_INITIALIZED, WORKER_STATE_RUN); util_mutex_unlock(&worker->lock); return ret; } /* * benchmark_worker_join -- join benchmark worker */ int benchmark_worker_join(struct benchmark_worker *worker) { util_mutex_lock(&worker->lock); worker_state_wait_for_transition(worker, WORKER_STATE_RUN, WORKER_STATE_END); util_mutex_unlock(&worker->lock); return 0; }
4,177
20.316327
78
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/benchmark_time.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * benchmark_time.cpp -- benchmark_time module definitions */ #include "benchmark_time.hpp" #include "os.h" #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #define NSECPSEC 1000000000 /* * benchmark_time_get -- get timestamp from clock source */ void benchmark_time_get(benchmark_time_t *time) { os_clock_gettime(CLOCK_MONOTONIC, time); } /* * benchmark_time_diff -- get time interval */ void benchmark_time_diff(benchmark_time_t *d, benchmark_time_t *t1, benchmark_time_t *t2) { long long nsecs = (t2->tv_sec - t1->tv_sec) * NSECPSEC + t2->tv_nsec - t1->tv_nsec; assert(nsecs >= 0); d->tv_sec = nsecs / NSECPSEC; d->tv_nsec = nsecs % NSECPSEC; } /* * benchmark_time_get_secs -- get total number of seconds */ double benchmark_time_get_secs(benchmark_time_t *t) { return (double)t->tv_sec + (double)t->tv_nsec / NSECPSEC; } /* * benchmark_time_get_nsecs -- get total number of nanoseconds */ unsigned long long benchmark_time_get_nsecs(benchmark_time_t *t) { unsigned long long ret = t->tv_nsec; ret += t->tv_sec * NSECPSEC; return ret; } /* * benchmark_time_compare -- compare two moments in time */ int benchmark_time_compare(const benchmark_time_t *t1, const benchmark_time_t *t2) { if (t1->tv_sec == t2->tv_sec) return (int)((long long)t1->tv_nsec - (long long)t2->tv_nsec); else return (int)((long long)t1->tv_sec - (long long)t2->tv_sec); } /* * benchmark_time_set -- set time using number of nanoseconds */ void benchmark_time_set(benchmark_time_t *time, unsigned long long nsecs) { time->tv_sec = nsecs / NSECPSEC; time->tv_nsec = nsecs % NSECPSEC; } /* * number of samples used to calculate average time required to get a current * time from the system */ #define N_PROBES_GET_TIME 10000000UL /* * benchmark_get_avg_get_time -- calculates average time required to get the * current time from the system in nanoseconds */ unsigned long long benchmark_get_avg_get_time(void) { benchmark_time_t time; benchmark_time_t start; benchmark_time_t stop; benchmark_time_get(&start); for (size_t i = 0; i < N_PROBES_GET_TIME; i++) { benchmark_time_get(&time); } benchmark_time_get(&stop); benchmark_time_diff(&time, &start, &stop); unsigned long long avg = benchmark_time_get_nsecs(&time) / N_PROBES_GET_TIME; return avg; }
2,411
20.535714
78
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/benchmark_empty.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ /* * benchmark_empty.cpp -- empty template for benchmarks */ #include <cassert> #include <cerrno> #include <cstddef> #include <cstdio> #include <cstdlib> #include <cstring> #include <fcntl.h> #include <sys/file.h> #include <sys/mman.h> #include <unistd.h> #include "benchmark.hpp" #include "file.h" #include "libpmemobj.h" /* * prog_args -- benchmark specific command line options */ struct prog_args { int my_value; }; /* * obj_bench -- benchmark context */ struct obj_bench { struct prog_args *pa; /* prog_args structure */ }; /* * benchmark_empty_op -- actual benchmark operation */ static int benchmark_empty_op(struct benchmark *bench, struct operation_info *info) { return 0; } /* * benchmark_empty_init -- initialization function */ static int benchmark_empty_init(struct benchmark *bench, struct benchmark_args *args) { assert(bench != nullptr); assert(args != nullptr); assert(args->opts != nullptr); return 0; } /* * benchmark_empty_exit -- benchmark cleanup function */ static int benchmark_empty_exit(struct benchmark *bench, struct benchmark_args *args) { return 0; } static struct benchmark_clo benchmark_empty_clo[0]; /* Stores information about benchmark. */ static struct benchmark_info benchmark_empty_info; CONSTRUCTOR(benchmark_empty_constructor) void benchmark_empty_constructor(void) { benchmark_empty_info.name = "benchmark_empty"; benchmark_empty_info.brief = "Benchmark for benchmark_empty() " "operation"; benchmark_empty_info.init = benchmark_empty_init; benchmark_empty_info.exit = benchmark_empty_exit; benchmark_empty_info.multithread = true; benchmark_empty_info.multiops = true; benchmark_empty_info.operation = benchmark_empty_op; benchmark_empty_info.measure_time = true; benchmark_empty_info.clos = benchmark_empty_clo; benchmark_empty_info.nclos = ARRAY_SIZE(benchmark_empty_clo); benchmark_empty_info.opts_size = sizeof(struct prog_args); benchmark_empty_info.rm_file = true; benchmark_empty_info.allow_poolset = true; REGISTER_BENCHMARK(benchmark_empty_info); };
2,136
22.228261
74
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/obj_lanes.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * obj_lanes.cpp -- lane benchmark definition */ #include <cassert> #include <cerrno> #include <unistd.h> #include "benchmark.hpp" #include "file.h" #include "libpmemobj.h" /* an internal libpmemobj code */ #include "lane.h" /* * The number of times to repeat the operation, used to get more accurate * results, because the operation time was minimal compared to the framework * overhead. */ #define OPERATION_REPEAT_COUNT 10000 /* * obj_bench - variables used in benchmark, passed within functions */ struct obj_bench { PMEMobjpool *pop; /* persistent pool handle */ struct prog_args *pa; /* prog_args structure */ }; /* * lanes_init -- benchmark initialization */ static int lanes_init(struct benchmark *bench, struct benchmark_args *args) { assert(bench != nullptr); assert(args != nullptr); assert(args->opts != nullptr); enum file_type type = util_file_get_type(args->fname); if (type == OTHER_ERROR) { fprintf(stderr, "could not check type of file %s\n", args->fname); return -1; } auto *ob = (struct obj_bench *)malloc(sizeof(struct obj_bench)); if (ob == nullptr) { perror("malloc"); return -1; } pmembench_set_priv(bench, ob); ob->pa = (struct prog_args *)args->opts; size_t psize; if (args->is_poolset || type == TYPE_DEVDAX) psize = 0; else psize = PMEMOBJ_MIN_POOL; /* create pmemobj pool */ ob->pop = pmemobj_create(args->fname, "obj_lanes", psize, args->fmode); if (ob->pop == nullptr) { fprintf(stderr, "%s\n", pmemobj_errormsg()); goto err; } return 0; err: free(ob); return -1; } /* * lanes_exit -- benchmark clean up */ static int lanes_exit(struct benchmark *bench, struct benchmark_args *args) { auto *ob = (struct obj_bench *)pmembench_get_priv(bench); pmemobj_close(ob->pop); free(ob); return 0; } /* * lanes_op -- performs the lane hold and release operations */ static int lanes_op(struct benchmark *bench, struct operation_info *info) { auto *ob = (struct obj_bench *)pmembench_get_priv(bench); struct lane *lane; for (int i = 0; i < OPERATION_REPEAT_COUNT; i++) { lane_hold(ob->pop, &lane); lane_release(ob->pop); } return 0; } static struct benchmark_info lanes_info; CONSTRUCTOR(obj_lines_constructor) void obj_lines_constructor(void) { lanes_info.name = "obj_lanes"; lanes_info.brief = "Benchmark for internal lanes " "operation"; lanes_info.init = lanes_init; lanes_info.exit = lanes_exit; lanes_info.multithread = true; lanes_info.multiops = true; lanes_info.operation = lanes_op; lanes_info.measure_time = true; lanes_info.clos = NULL; lanes_info.nclos = 0; lanes_info.opts_size = 0; lanes_info.rm_file = true; lanes_info.allow_poolset = true; REGISTER_BENCHMARK(lanes_info); }
2,813
20.157895
76
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/pmemobj_tx_add_range.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * pmemobj_tx_add_range.cpp -- pmemobj_tx_add_range benchmarks definition */ #include <cassert> #include <cerrno> #include <cstddef> #include <cstdio> #include <cstdlib> #include <cstring> #include <fcntl.h> #include <sys/file.h> #include <sys/mman.h> #include <unistd.h> #include "benchmark.hpp" #include "file.h" #include "libpmemobj.h" #define LAYOUT_NAME "tx_add_range_benchmark" /* * POOL_SIZE_COEFFICIENT -- pool has to hold every allocated object with * its snapshot (1 + 1), plus 0.5 because of fragmentation */ #define POOL_SIZE_COEFFICIENT (1 + 1 + 0.5) /* * MAX_ALLOC_SIZE -- maximum size of one allocation (128 MiB) */ #define MAX_ALLOC_SIZE (1024 * 1024 * 128) /* * ranged_obj -- ranged object */ struct ranged_obj { void *ptr; /* address of allocated object */ size_t size; /* size of allocated object */ }; /* * obj_bench_args -- benchmark specific command line options */ struct obj_bench_args { uint64_t nranges; /* number of allocated objects */ bool shuffle_objs; /* shuffles the array of allocated objects */ }; /* * obj_bench -- benchmark context */ struct obj_bench { PMEMobjpool *pop; /* persistent pool handle */ struct ranged_obj *ranges; /* array of ranges */ size_t obj_size; /* size of a single range */ uint64_t nranges; /* number of ranges */ uint64_t nallocs; /* number of allocations */ bool shuffle_objs; /* shuffles array of ranges */ rng_t rng; /* PRNG */ }; /* * shuffle_ranges -- randomly shuffles elements in an array * to avoid sequential pattern in the transaction loop */ static void shuffle_ranges(struct ranged_obj *ranged, uint64_t nranges, rng_t *rng) { struct ranged_obj tmp; uint64_t dest; for (uint64_t n = 0; n < nranges; ++n) { dest = RRAND_R(rng, nranges - 1, 0); tmp = ranged[n]; ranged[n] = ranged[dest]; ranged[dest] = tmp; } } /* * init_ranges -- allocate persistent objects and carve ranges from them */ static int init_ranges(struct obj_bench *ob) { assert(ob->nranges != 0); ob->ranges = (struct ranged_obj *)malloc((ob->nranges) * sizeof(struct ranged_obj)); if (!ob->ranges) { perror("malloc"); return -1; } size_t nranges_per_object = MAX_ALLOC_SIZE / ob->obj_size; for (size_t i = 0, n = 0; n < ob->nranges && i < ob->nallocs; i++) { PMEMoid oid; if (pmemobj_alloc(ob->pop, &oid, MAX_ALLOC_SIZE, 0, nullptr, nullptr)) { perror("pmemobj_alloc"); goto err; } for (size_t j = 0; j < nranges_per_object; j++) { void *ptr = (char *)pmemobj_direct(oid) + (j * ob->obj_size); struct ranged_obj range = {ptr, ob->obj_size}; ob->ranges[n++] = range; if (n == ob->nranges) break; } } if (ob->shuffle_objs == true) shuffle_ranges(ob->ranges, ob->nranges, &ob->rng); return 0; err: free(ob->ranges); return -1; } /* * tx_add_range_init -- initialization function */ static int tx_add_range_init(struct benchmark *bench, struct benchmark_args *args) { assert(bench != nullptr); assert(args != nullptr); assert(args->opts != nullptr); struct obj_bench_args *bargs = (struct obj_bench_args *)args->opts; enum file_type type = util_file_get_type(args->fname); if (type == OTHER_ERROR) { fprintf(stderr, "could not check type of file %s\n", args->fname); return -1; } auto *ob = (struct obj_bench *)malloc(sizeof(struct obj_bench)); if (ob == nullptr) { perror("malloc"); return -1; } /* let's calculate number of allocations */ ob->nallocs = (args->dsize * bargs->nranges / MAX_ALLOC_SIZE) + 1; size_t pool_size; if (args->is_poolset || type == TYPE_DEVDAX) pool_size = 0; else { pool_size = ob->nallocs * MAX_ALLOC_SIZE * POOL_SIZE_COEFFICIENT; } /* create pmemobj pool */ ob->pop = pmemobj_create(args->fname, LAYOUT_NAME, pool_size, args->fmode); if (ob->pop == nullptr) { fprintf(stderr, "%s\n", pmemobj_errormsg()); goto err; } ob->nranges = bargs->nranges; ob->obj_size = args->dsize; ob->shuffle_objs = bargs->shuffle_objs; randomize_r(&ob->rng, args->seed); if (init_ranges(ob)) goto err_pop_close; pmembench_set_priv(bench, ob); return 0; err_pop_close: pmemobj_close(ob->pop); err: free(ob); return -1; } /* * tx_add_range_op -- actual benchmark operation */ static int tx_add_range_op(struct benchmark *bench, struct operation_info *info) { auto *ob = (struct obj_bench *)pmembench_get_priv(bench); int ret = 0; TX_BEGIN(ob->pop) { for (size_t i = 0; i < ob->nranges; i++) { struct ranged_obj *r = &ob->ranges[i]; pmemobj_tx_add_range_direct(r->ptr, r->size); } } TX_ONABORT { fprintf(stderr, "transaction failed\n"); ret = -1; } TX_END return ret; } /* * tx_add_range_exit -- benchmark cleanup function */ static int tx_add_range_exit(struct benchmark *bench, struct benchmark_args *args) { auto *ob = (struct obj_bench *)pmembench_get_priv(bench); pmemobj_close(ob->pop); free(ob->ranges); free(ob); return 0; } static struct benchmark_clo tx_add_range_clo[2]; /* Stores information about benchmark. */ static struct benchmark_info tx_add_range_info; CONSTRUCTOR(tx_add_range_constructor) void tx_add_range_constructor(void) { tx_add_range_clo[0].opt_short = 0; tx_add_range_clo[0].opt_long = "num-of-ranges"; tx_add_range_clo[0].descr = "Number of ranges"; tx_add_range_clo[0].def = "1000"; tx_add_range_clo[0].off = clo_field_offset(struct obj_bench_args, nranges); tx_add_range_clo[0].type = CLO_TYPE_UINT; tx_add_range_clo[0].type_uint.size = clo_field_size(struct obj_bench_args, nranges); tx_add_range_clo[0].type_uint.base = CLO_INT_BASE_DEC; tx_add_range_clo[0].type_uint.min = 1; tx_add_range_clo[0].type_uint.max = ULONG_MAX; tx_add_range_clo[1].opt_short = 's'; tx_add_range_clo[1].opt_long = "shuffle"; tx_add_range_clo[1].descr = "Use shuffle objects - " "randomly shuffles array of allocated objects"; tx_add_range_clo[1].def = "false"; tx_add_range_clo[1].off = clo_field_offset(struct obj_bench_args, shuffle_objs); tx_add_range_clo[1].type = CLO_TYPE_FLAG; tx_add_range_info.name = "pmemobj_tx_add_range"; tx_add_range_info.brief = "Benchmark for pmemobj_tx_add_range() " "operation"; tx_add_range_info.init = tx_add_range_init; tx_add_range_info.exit = tx_add_range_exit; tx_add_range_info.multithread = true; tx_add_range_info.multiops = true; tx_add_range_info.operation = tx_add_range_op; tx_add_range_info.measure_time = true; tx_add_range_info.clos = tx_add_range_clo; tx_add_range_info.nclos = ARRAY_SIZE(tx_add_range_clo); tx_add_range_info.opts_size = sizeof(struct obj_bench_args); tx_add_range_info.rm_file = true; tx_add_range_info.allow_poolset = true; REGISTER_BENCHMARK(tx_add_range_info); };
6,782
23.311828
73
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/benchmark.hpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * benchmark.hpp -- This file contains interface for creating benchmarks to the * pmembench framework. The _most_ important data structure is * struct benchmark_info which should be properly filled and registered by the * benchmark. Some fields should be filled by meta-data and information about * the benchmark like: name, brief description, supported operation modes etc. * The other group of fields are function callbacks which may be implemented by * the benchmark. Some callbacks are required, others are optional. This is * indicated in the structure description. * * To register a benchmark you can use the special macro * REGISTER_BENCHMARK() which takes static benchmark_info data structure as an * argument. You can also use the pmembench_register() function. Please note * that registering a benchmark should be done at initialization time. You can * achieve this by specifying pmembench_init macro in function attributes: * * static void pmembench_init my_benchmark_init() * { * pmembench_register(&my_benchmark); * } * * However using the REGISTER_BENCHMARK() macro is recommended. */ #ifndef _BENCHMARK_H #define _BENCHMARK_H #include <climits> #include <cstdbool> #include <cstdint> #include <cstdio> #include <cstdlib> #include "benchmark_time.hpp" #include "os.h" #include "rand.h" #include "util.h" #ifndef ARRAY_SIZE #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0])) #endif #define RRAND(max, min) (rand() % ((max) - (min)) + (min)) #define RRAND_R(rng, max, min) (rnd64_r(rng) % ((max) - (min)) + (min)) struct benchmark; /* * benchmark_args - Arguments for benchmark. * * It contains set of common arguments and pointer to benchmark's specific * arguments which are automatically processed by framework according to * clos, nclos and opt_size in benchmark_info structure. */ struct benchmark_args { const char *fname; /* path to test file */ size_t fsize; /* size of test file */ bool is_poolset; /* test file is a poolset */ bool is_dynamic_poolset; /* test file is directory in which benchmark creates reusable files */ mode_t fmode; /* test file's permissions */ unsigned n_threads; /* number of working threads */ size_t n_ops_per_thread; /* number of operations per thread */ bool thread_affinity; /* set worker threads CPU affinity mask */ ssize_t main_affinity; /* main thread affinity */ char *affinity_list; /* set CPU affinity order */ size_t dsize; /* data size */ unsigned seed; /* PRNG seed */ unsigned repeats; /* number of repeats of one scenario */ unsigned min_exe_time; /* minimal execution time */ bool help; /* print help for benchmark */ void *opts; /* benchmark specific arguments */ }; /* * benchmark_results - Benchmark's execution results. */ struct benchmark_results { uint64_t nbytes; /* number of bytes processed */ uint64_t nops; /* number of operations executed */ benchmark_time_t time; /* total execution time */ }; /* * struct results -- statistics for total measurements */ struct results { double min; double max; double avg; double std_dev; double med; }; /* * struct latency -- statistics for latency measurements */ struct latency { uint64_t max; uint64_t min; uint64_t avg; double std_dev; uint64_t pctl50_0p; uint64_t pctl99_0p; uint64_t pctl99_9p; }; /* * struct thread_results -- results of a single thread */ struct thread_results { benchmark_time_t beg; benchmark_time_t end; benchmark_time_t end_op[]; }; /* * struct bench_results -- results of the whole benchmark */ struct bench_results { struct thread_results **thres; }; /* * struct total_results -- results and statistics of the whole benchmark */ struct total_results { size_t nrepeats; size_t nthreads; size_t nops; double nopsps; struct results total; struct latency latency; struct bench_results *res; }; /* * Command Line Option integer value base. */ #define CLO_INT_BASE_NONE 0x0 #define CLO_INT_BASE_DEC 0x1 #define CLO_INT_BASE_HEX 0x2 #define CLO_INT_BASE_OCT 0x4 /* * Command Line Option type. */ enum clo_type { CLO_TYPE_FLAG, CLO_TYPE_STR, CLO_TYPE_INT, CLO_TYPE_UINT, CLO_TYPE_MAX, }; /* * Description of command line option. * * This structure is used to declare command line options by the benchmark * which will be automatically parsed by the framework. * * opt_short : Short option char. If there is no short option write 0. * opt_long : Long option string. * descr : Description of command line option. * off : Offset in data structure in which the value should be stored. * type : Type of command line option. * def : Default value. If set to NULL, this options is required. * ignore_in_res: Do not print in results. * check : Optional callback for checking the command line option value. * type_int : Parameters for signed integer. * type_uint : Parameters for unsigned integer. * type_str : Parameters for string. * * size : Size of integer value. Valid values: 1, 2, 4, 8. * base : Integer base system from which the parsing should be * performed. This field may be used as bit mask by logically * adding different base types. * limit_min : Indicates whether value should be limited by the minimum * value. * limit_max : Indicates whether value should be limited by the maximum * value. * min : Minimum value when limit_min is set. * max : Maximum value when limit_min is set. * * alloc : If set to true the framework should allocate memory for the * value. The memory will be freed by the framework at the end of * execution. Otherwise benchmark must provide valid pointer in * opt_var and max_size parameter must be set properly. * max_size : Maximum size of string. */ struct benchmark_clo { int opt_short; const char *opt_long; enum clo_type type; const char *descr; size_t off; const char *def; bool ignore_in_res; struct { size_t size; int base; int64_t min; int64_t max; } type_int; struct { size_t size; int base; uint64_t min; uint64_t max; } type_uint; int used; }; #define clo_field_offset(s, f) ((size_t) & ((s *)0)->f) #define clo_field_size(s, f) (sizeof(((s *)0)->f)) /* * worker_info - Worker thread's information structure. */ struct worker_info { size_t index; /* index of worker thread */ struct operation_info *opinfo; /* operation info structure */ size_t nops; /* number of operations */ void *priv; /* worker's private data */ benchmark_time_t beg; /* start time */ benchmark_time_t end; /* end time */ }; /* * operation_info - Information about operation. */ struct operation_info { struct worker_info *worker; /* worker's info */ struct benchmark_args *args; /* benchmark arguments */ size_t index; /* operation's index */ benchmark_time_t end; /* operation's end time */ }; /* * struct benchmark_info -- benchmark descriptor * name : Name of benchmark. * brief : Brief description of benchmark. * clos : Command line options which will be automatically parsed by * framework. * nclos : Number of command line options. * opts_size : Size of data structure where the parsed values should be * stored in. * print_help : Callback for printing help message. * pre_init : Function for initialization of the benchmark before parsing * command line arguments. * init : Function for initialization of the benchmark after parsing * command line arguments. * exit : Function for de-initialization of the benchmark. * multithread : Indicates whether the benchmark operation function may be * run in many threads. * multiops : Indicates whether the benchmark operation function may be * run many time in a loop. * measure_time : Indicates whether the benchmark framework should measure the * execution time of operation function. If set to false, the * benchmark must report the execution time by itself. * init_worker : Callback for initialization thread specific data. Invoked in * the worker thread but globally serialized. * operation : Callback function which does the main job of benchmark. * rm_file : Indicates whether the test file should be removed by * framework before the init function will be called. * allow_poolset: Indicates whether benchmark may use poolset files. * If set to false and fname points to a poolset, an error * will be returned. * According to multithread and single_operation flags it may be * invoked in different ways: * +-------------+----------+-------------------------------------+ * | multithread | multiops | description | * +-------------+----------+-------------------------------------+ * | false | false | invoked once, in one thread | * +-------------+----------+-------------------------------------+ * | false | true | invoked many times, in one thread | * +-------------+----------+-------------------------------------+ * | true | false | invoked once, in many threads | * +-------------+----------+-------------------------------------+ * | true | true | invoked many times, in many threads | * +-------------+----------+-------------------------------------+ * */ struct benchmark_info { const char *name; const char *brief; struct benchmark_clo *clos; size_t nclos; size_t opts_size; void (*print_help)(struct benchmark *bench); int (*pre_init)(struct benchmark *bench); int (*init)(struct benchmark *bench, struct benchmark_args *args); int (*exit)(struct benchmark *bench, struct benchmark_args *args); int (*init_worker)(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker); void (*free_worker)(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker); int (*operation)(struct benchmark *bench, struct operation_info *info); void (*print_extra_headers)(); void (*print_extra_values)(struct benchmark *bench, struct benchmark_args *args, struct total_results *res); bool multithread; bool multiops; bool measure_time; bool rm_file; bool allow_poolset; bool print_bandwidth; }; void *pmembench_get_priv(struct benchmark *bench); void pmembench_set_priv(struct benchmark *bench, void *priv); struct benchmark_info *pmembench_get_info(struct benchmark *bench); int pmembench_register(struct benchmark_info *bench_info); #define REGISTER_BENCHMARK(bench) \ if (pmembench_register(&(bench))) { \ fprintf(stderr, "Unable to register benchmark '%s'\n", \ (bench).name); \ } #endif /* _BENCHMARK_H */
11,048
32.892638
80
hpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/obj_locks.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * obj_locks.cpp -- main source file for PMEM locks benchmark */ #include <cassert> #include <cerrno> #include "benchmark.hpp" #include "libpmemobj.h" #include "file.h" #include "lane.h" #include "list.h" #include "memops.h" #include "obj.h" #include "os_thread.h" #include "out.h" #include "pmalloc.h" #include "sync.h" struct prog_args { bool use_system_threads; /* use system locks instead of PMEM locks */ unsigned n_locks; /* number of mutex/rwlock objects */ bool run_id_increment; /* increment run_id after each lock/unlock */ uint64_t runid_initial_value; /* initial value of run_id */ char *lock_mode; /* "1by1" or "all-lock" */ char *lock_type; /* "mutex", "rwlock" or "ram-mutex" */ bool use_rdlock; /* use read lock, instead of write lock */ }; /* * mutex similar to PMEMmutex, but with os_mutex_t in RAM */ typedef union padded_volatile_pmemmutex { char padding[_POBJ_CL_SIZE]; struct { uint64_t runid; os_mutex_t *mutexp; /* pointer to os_thread mutex in RAM */ } volatile_pmemmutex; } PMEM_volatile_mutex; typedef union lock_union { PMEMmutex pm_mutex; PMEMrwlock pm_rwlock; PMEM_volatile_mutex pm_vmutex; os_mutex_t pt_mutex; os_rwlock_t pt_rwlock; } lock_t; POBJ_LAYOUT_BEGIN(pmembench_lock_layout); POBJ_LAYOUT_ROOT(pmembench_lock_layout, struct my_root); POBJ_LAYOUT_TOID(pmembench_lock_layout, lock_t); POBJ_LAYOUT_END(pmembench_lock_layout); /* * my_root -- root object structure */ struct my_root { TOID(lock_t) locks; /* an array of locks */ }; /* * lock usage */ enum operation_mode { OP_MODE_1BY1, /* lock and unlock one lock at a time */ OP_MODE_ALL_LOCK, /* grab all locks, then unlock them all */ OP_MODE_MAX, }; /* * lock type */ enum benchmark_mode { BENCH_MODE_MUTEX, /* PMEMmutex vs. os_mutex_t */ BENCH_MODE_RWLOCK, /* PMEMrwlock vs. os_rwlock_t */ BENCH_MODE_VOLATILE_MUTEX, /* PMEMmutex with os_thread mutex in RAM */ BENCH_MODE_MAX }; struct mutex_bench; struct bench_ops { int (*bench_init)(struct mutex_bench *); int (*bench_exit)(struct mutex_bench *); int (*bench_op)(struct mutex_bench *); }; /* * mutex_bench -- stores variables used in benchmark, passed within functions */ struct mutex_bench { PMEMobjpool *pop; /* pointer to the persistent pool */ TOID(struct my_root) root; /* OID of the root object */ struct prog_args *pa; /* prog_args structure */ enum operation_mode lock_mode; /* lock usage mode */ enum benchmark_mode lock_type; /* lock type */ lock_t *locks; /* pointer to the array of locks */ struct bench_ops *ops; }; #define GET_VOLATILE_MUTEX(pop, mutexp) \ (os_mutex_t *)get_lock( \ (pop)->run_id, &(mutexp)->volatile_pmemmutex.runid, \ (mutexp)->volatile_pmemmutex.mutexp, \ (int (*)(void **lock, void *arg))volatile_mutex_init) typedef int (*lock_fun_wrapper)(PMEMobjpool *pop, void *lock); /* * bench_operation_1by1 -- acquire lock and unlock release locks */ static void bench_operation_1by1(lock_fun_wrapper flock, lock_fun_wrapper funlock, struct mutex_bench *mb, PMEMobjpool *pop) { for (unsigned i = 0; i < (mb)->pa->n_locks; (i)++) { auto *o = (void *)(&(mb)->locks[i]); flock(pop, o); funlock(pop, o); } } /* * bench_operation_all_lock -- acquire all locks and release all locks */ static void bench_operation_all_lock(lock_fun_wrapper flock, lock_fun_wrapper funlock, struct mutex_bench *mb, PMEMobjpool *pop) { for (unsigned i = 0; i < (mb)->pa->n_locks; (i)++) { auto *o = (void *)(&(mb)->locks[i]); flock(pop, o); } for (unsigned i = 0; i < (mb)->pa->n_locks; i++) { auto *o = (void *)(&(mb)->locks[i]); funlock(pop, o); } } /* * get_lock -- atomically initialize and return a lock */ static void * get_lock(uint64_t pop_runid, volatile uint64_t *runid, void *lock, int (*init_lock)(void **lock, void *arg)) { uint64_t tmp_runid; while ((tmp_runid = *runid) != pop_runid) { if ((tmp_runid != (pop_runid - 1))) { if (util_bool_compare_and_swap64(runid, tmp_runid, (pop_runid - 1))) { if (init_lock(&lock, nullptr)) { util_fetch_and_and64(runid, 0); return nullptr; } if (util_bool_compare_and_swap64( runid, (pop_runid - 1), pop_runid) == 0) { return nullptr; } } } } return lock; } /* * volatile_mutex_init -- initialize the volatile mutex object * * Allocate memory for the os_thread mutex and initialize it. * Set the runid to the same value as in the memory pool. */ static int volatile_mutex_init(os_mutex_t **mutexp, void *attr) { if (*mutexp == nullptr) { *mutexp = (os_mutex_t *)malloc(sizeof(os_mutex_t)); if (*mutexp == nullptr) { perror("volatile_mutex_init alloc"); return ENOMEM; } } return os_mutex_init(*mutexp); } /* * volatile_mutex_lock -- initialize the mutex object if needed and lock it */ static int volatile_mutex_lock(PMEMobjpool *pop, PMEM_volatile_mutex *mutexp) { auto *mutex = GET_VOLATILE_MUTEX(pop, mutexp); if (mutex == nullptr) return EINVAL; return os_mutex_lock(mutex); } /* * volatile_mutex_unlock -- unlock the mutex */ static int volatile_mutex_unlock(PMEMobjpool *pop, PMEM_volatile_mutex *mutexp) { auto *mutex = (os_mutex_t *)GET_VOLATILE_MUTEX(pop, mutexp); if (mutex == nullptr) return EINVAL; return os_mutex_unlock(mutex); } /* * volatile_mutex_destroy -- destroy os_thread mutex and release memory */ static int volatile_mutex_destroy(PMEMobjpool *pop, PMEM_volatile_mutex *mutexp) { auto *mutex = (os_mutex_t *)GET_VOLATILE_MUTEX(pop, mutexp); if (mutex == nullptr) return EINVAL; int ret = os_mutex_destroy(mutex); if (ret != 0) return ret; free(mutex); return 0; } /* * os_mutex_lock_wrapper -- wrapper for os_mutex_lock */ static int os_mutex_lock_wrapper(PMEMobjpool *pop, void *lock) { return os_mutex_lock((os_mutex_t *)lock); } /* * os_mutex_unlock_wrapper -- wrapper for os_mutex_unlock */ static int os_mutex_unlock_wrapper(PMEMobjpool *pop, void *lock) { return os_mutex_unlock((os_mutex_t *)lock); } /* * pmemobj_mutex_lock_wrapper -- wrapper for pmemobj_mutex_lock */ static int pmemobj_mutex_lock_wrapper(PMEMobjpool *pop, void *lock) { return pmemobj_mutex_lock(pop, (PMEMmutex *)lock); } /* * pmemobj_mutex_unlock_wrapper -- wrapper for pmemobj_mutex_unlock */ static int pmemobj_mutex_unlock_wrapper(PMEMobjpool *pop, void *lock) { return pmemobj_mutex_unlock(pop, (PMEMmutex *)lock); } /* * os_rwlock_wrlock_wrapper -- wrapper for os_rwlock_wrlock */ static int os_rwlock_wrlock_wrapper(PMEMobjpool *pop, void *lock) { return os_rwlock_wrlock((os_rwlock_t *)lock); } /* * os_rwlock_rdlock_wrapper -- wrapper for os_rwlock_rdlock */ static int os_rwlock_rdlock_wrapper(PMEMobjpool *pop, void *lock) { return os_rwlock_rdlock((os_rwlock_t *)lock); } /* * os_rwlock_unlock_wrapper -- wrapper for os_rwlock_unlock */ static int os_rwlock_unlock_wrapper(PMEMobjpool *pop, void *lock) { return os_rwlock_unlock((os_rwlock_t *)lock); } /* * pmemobj_rwlock_wrlock_wrapper -- wrapper for pmemobj_rwlock_wrlock */ static int pmemobj_rwlock_wrlock_wrapper(PMEMobjpool *pop, void *lock) { return pmemobj_rwlock_wrlock(pop, (PMEMrwlock *)lock); } /* * pmemobj_rwlock_rdlock_wrapper -- wrapper for pmemobj_rwlock_rdlock */ static int pmemobj_rwlock_rdlock_wrapper(PMEMobjpool *pop, void *lock) { return pmemobj_rwlock_rdlock(pop, (PMEMrwlock *)lock); } /* * pmemobj_rwlock_unlock_wrapper -- wrapper for pmemobj_rwlock_unlock */ static int pmemobj_rwlock_unlock_wrapper(PMEMobjpool *pop, void *lock) { return pmemobj_rwlock_unlock(pop, (PMEMrwlock *)lock); } /* * volatile_mutex_lock_wrapper -- wrapper for volatile_mutex_lock */ static int volatile_mutex_lock_wrapper(PMEMobjpool *pop, void *lock) { return volatile_mutex_lock(pop, (PMEM_volatile_mutex *)lock); } /* * volatile_mutex_unlock_wrapper -- wrapper for volatile_mutex_unlock */ static int volatile_mutex_unlock_wrapper(PMEMobjpool *pop, void *lock) { return volatile_mutex_unlock(pop, (PMEM_volatile_mutex *)lock); } /* * init_bench_mutex -- allocate and initialize mutex objects */ static int init_bench_mutex(struct mutex_bench *mb) { POBJ_ZALLOC(mb->pop, &D_RW(mb->root)->locks, lock_t, mb->pa->n_locks * sizeof(lock_t)); if (TOID_IS_NULL(D_RO(mb->root)->locks)) { perror("POBJ_ZALLOC"); return -1; } struct my_root *root = D_RW(mb->root); assert(root != nullptr); mb->locks = D_RW(root->locks); assert(mb->locks != nullptr); if (!mb->pa->use_system_threads) { /* initialize PMEM mutexes */ for (unsigned i = 0; i < mb->pa->n_locks; i++) { auto *p = (PMEMmutex_internal *)&mb->locks[i]; p->pmemmutex.runid = mb->pa->runid_initial_value; os_mutex_init(&p->PMEMmutex_lock); } } else { /* initialize os_thread mutexes */ for (unsigned i = 0; i < mb->pa->n_locks; i++) { auto *p = (os_mutex_t *)&mb->locks[i]; os_mutex_init(p); } } return 0; } /* * exit_bench_mutex -- destroy the mutex objects and release memory */ static int exit_bench_mutex(struct mutex_bench *mb) { if (mb->pa->use_system_threads) { /* deinitialize os_thread mutex objects */ for (unsigned i = 0; i < mb->pa->n_locks; i++) { auto *p = (os_mutex_t *)&mb->locks[i]; os_mutex_destroy(p); } } POBJ_FREE(&D_RW(mb->root)->locks); return 0; } /* * op_bench_mutex -- lock and unlock the mutex object * * If requested, increment the run_id of the memory pool. In case of PMEMmutex * this will force the rwlock object(s) reinitialization at the lock operation. */ static int op_bench_mutex(struct mutex_bench *mb) { if (!mb->pa->use_system_threads) { if (mb->lock_mode == OP_MODE_1BY1) { bench_operation_1by1(pmemobj_mutex_lock_wrapper, pmemobj_mutex_unlock_wrapper, mb, mb->pop); } else { bench_operation_all_lock(pmemobj_mutex_lock_wrapper, pmemobj_mutex_unlock_wrapper, mb, mb->pop); } if (mb->pa->run_id_increment) mb->pop->run_id += 2; /* must be a multiple of 2 */ } else { if (mb->lock_mode == OP_MODE_1BY1) { bench_operation_1by1(os_mutex_lock_wrapper, os_mutex_unlock_wrapper, mb, nullptr); } else { bench_operation_all_lock(os_mutex_lock_wrapper, os_mutex_unlock_wrapper, mb, nullptr); } } return 0; } /* * init_bench_rwlock -- allocate and initialize rwlock objects */ static int init_bench_rwlock(struct mutex_bench *mb) { struct my_root *root = D_RW(mb->root); assert(root != nullptr); POBJ_ZALLOC(mb->pop, &root->locks, lock_t, mb->pa->n_locks * sizeof(lock_t)); if (TOID_IS_NULL(root->locks)) { perror("POBJ_ZALLOC"); return -1; } mb->locks = D_RW(root->locks); assert(mb->locks != nullptr); if (!mb->pa->use_system_threads) { /* initialize PMEM rwlocks */ for (unsigned i = 0; i < mb->pa->n_locks; i++) { auto *p = (PMEMrwlock_internal *)&mb->locks[i]; p->pmemrwlock.runid = mb->pa->runid_initial_value; os_rwlock_init(&p->PMEMrwlock_lock); } } else { /* initialize os_thread rwlocks */ for (unsigned i = 0; i < mb->pa->n_locks; i++) { auto *p = (os_rwlock_t *)&mb->locks[i]; os_rwlock_init(p); } } return 0; } /* * exit_bench_rwlock -- destroy the rwlocks and release memory */ static int exit_bench_rwlock(struct mutex_bench *mb) { if (mb->pa->use_system_threads) { /* deinitialize os_thread mutex objects */ for (unsigned i = 0; i < mb->pa->n_locks; i++) { auto *p = (os_rwlock_t *)&mb->locks[i]; os_rwlock_destroy(p); } } POBJ_FREE(&D_RW(mb->root)->locks); return 0; } /* * op_bench_rwlock -- lock and unlock the rwlock object * * If requested, increment the run_id of the memory pool. In case of PMEMrwlock * this will force the rwlock object(s) reinitialization at the lock operation. */ static int op_bench_rwlock(struct mutex_bench *mb) { if (!mb->pa->use_system_threads) { if (mb->lock_mode == OP_MODE_1BY1) { bench_operation_1by1( !mb->pa->use_rdlock ? pmemobj_rwlock_wrlock_wrapper : pmemobj_rwlock_rdlock_wrapper, pmemobj_rwlock_unlock_wrapper, mb, mb->pop); } else { bench_operation_all_lock( !mb->pa->use_rdlock ? pmemobj_rwlock_wrlock_wrapper : pmemobj_rwlock_rdlock_wrapper, pmemobj_rwlock_unlock_wrapper, mb, mb->pop); } if (mb->pa->run_id_increment) mb->pop->run_id += 2; /* must be a multiple of 2 */ } else { if (mb->lock_mode == OP_MODE_1BY1) { bench_operation_1by1( !mb->pa->use_rdlock ? os_rwlock_wrlock_wrapper : os_rwlock_rdlock_wrapper, os_rwlock_unlock_wrapper, mb, nullptr); } else { bench_operation_all_lock( !mb->pa->use_rdlock ? os_rwlock_wrlock_wrapper : os_rwlock_rdlock_wrapper, os_rwlock_unlock_wrapper, mb, nullptr); } } return 0; } /* * init_bench_vmutex -- allocate and initialize mutexes */ static int init_bench_vmutex(struct mutex_bench *mb) { struct my_root *root = D_RW(mb->root); assert(root != nullptr); POBJ_ZALLOC(mb->pop, &root->locks, lock_t, mb->pa->n_locks * sizeof(lock_t)); if (TOID_IS_NULL(root->locks)) { perror("POBJ_ZALLOC"); return -1; } mb->locks = D_RW(root->locks); assert(mb->locks != nullptr); /* initialize PMEM volatile mutexes */ for (unsigned i = 0; i < mb->pa->n_locks; i++) { auto *p = (PMEM_volatile_mutex *)&mb->locks[i]; p->volatile_pmemmutex.runid = mb->pa->runid_initial_value; volatile_mutex_init(&p->volatile_pmemmutex.mutexp, nullptr); } return 0; } /* * exit_bench_vmutex -- destroy the mutex objects and release their * memory */ static int exit_bench_vmutex(struct mutex_bench *mb) { for (unsigned i = 0; i < mb->pa->n_locks; i++) { auto *p = (PMEM_volatile_mutex *)&mb->locks[i]; volatile_mutex_destroy(mb->pop, p); } POBJ_FREE(&D_RW(mb->root)->locks); return 0; } /* * op_bench_volatile_mutex -- lock and unlock the mutex object */ static int op_bench_vmutex(struct mutex_bench *mb) { if (mb->lock_mode == OP_MODE_1BY1) { bench_operation_1by1(volatile_mutex_lock_wrapper, volatile_mutex_unlock_wrapper, mb, mb->pop); } else { bench_operation_all_lock(volatile_mutex_lock_wrapper, volatile_mutex_unlock_wrapper, mb, mb->pop); } if (mb->pa->run_id_increment) mb->pop->run_id += 2; /* must be a multiple of 2 */ return 0; } struct bench_ops benchmark_ops[BENCH_MODE_MAX] = { {init_bench_mutex, exit_bench_mutex, op_bench_mutex}, {init_bench_rwlock, exit_bench_rwlock, op_bench_rwlock}, {init_bench_vmutex, exit_bench_vmutex, op_bench_vmutex}}; /* * operation_mode -- parses command line "--mode" and returns * proper operation mode */ static enum operation_mode parse_op_mode(const char *arg) { if (strcmp(arg, "1by1") == 0) return OP_MODE_1BY1; else if (strcmp(arg, "all-lock") == 0) return OP_MODE_ALL_LOCK; else return OP_MODE_MAX; } /* * benchmark_mode -- parses command line "--bench_type" and returns * proper benchmark ops */ static struct bench_ops * parse_benchmark_mode(const char *arg) { if (strcmp(arg, "mutex") == 0) return &benchmark_ops[BENCH_MODE_MUTEX]; else if (strcmp(arg, "rwlock") == 0) return &benchmark_ops[BENCH_MODE_RWLOCK]; else if (strcmp(arg, "volatile-mutex") == 0) return &benchmark_ops[BENCH_MODE_VOLATILE_MUTEX]; else return nullptr; } /* * locks_init -- allocates persistent memory, maps it, creates the appropriate * objects in the allocated memory and initializes them */ static int locks_init(struct benchmark *bench, struct benchmark_args *args) { assert(bench != nullptr); assert(args != nullptr); enum file_type type = util_file_get_type(args->fname); if (type == OTHER_ERROR) { fprintf(stderr, "could not check type of file %s\n", args->fname); return -1; } int ret = 0; size_t poolsize; struct mutex_bench *mb = (struct mutex_bench *)malloc(sizeof(*mb)); if (mb == nullptr) { perror("malloc"); return -1; } mb->pa = (struct prog_args *)args->opts; mb->lock_mode = parse_op_mode(mb->pa->lock_mode); if (mb->lock_mode >= OP_MODE_MAX) { fprintf(stderr, "Invalid mutex mode: %s\n", mb->pa->lock_mode); errno = EINVAL; goto err_free_mb; } mb->ops = parse_benchmark_mode(mb->pa->lock_type); if (mb->ops == nullptr) { fprintf(stderr, "Invalid benchmark type: %s\n", mb->pa->lock_type); errno = EINVAL; goto err_free_mb; } /* reserve some space for metadata */ poolsize = mb->pa->n_locks * sizeof(lock_t) + PMEMOBJ_MIN_POOL; if (args->is_poolset || type == TYPE_DEVDAX) { if (args->fsize < poolsize) { fprintf(stderr, "file size too large\n"); goto err_free_mb; } poolsize = 0; } mb->pop = pmemobj_create(args->fname, POBJ_LAYOUT_NAME(pmembench_lock_layout), poolsize, args->fmode); if (mb->pop == nullptr) { ret = -1; perror("pmemobj_create"); goto err_free_mb; } mb->root = POBJ_ROOT(mb->pop, struct my_root); assert(!TOID_IS_NULL(mb->root)); ret = mb->ops->bench_init(mb); if (ret != 0) goto err_free_pop; pmembench_set_priv(bench, mb); return 0; err_free_pop: pmemobj_close(mb->pop); err_free_mb: free(mb); return ret; } /* * locks_exit -- destroys allocated objects and release memory */ static int locks_exit(struct benchmark *bench, struct benchmark_args *args) { assert(bench != nullptr); assert(args != nullptr); auto *mb = (struct mutex_bench *)pmembench_get_priv(bench); assert(mb != nullptr); mb->ops->bench_exit(mb); pmemobj_close(mb->pop); free(mb); return 0; } /* * locks_op -- actual benchmark operation * * Performs lock and unlock as by the program arguments. */ static int locks_op(struct benchmark *bench, struct operation_info *info) { auto *mb = (struct mutex_bench *)pmembench_get_priv(bench); assert(mb != nullptr); assert(mb->pop != nullptr); assert(!TOID_IS_NULL(mb->root)); assert(mb->locks != nullptr); assert(mb->lock_mode < OP_MODE_MAX); mb->ops->bench_op(mb); return 0; } /* structure to define command line arguments */ static struct benchmark_clo locks_clo[7]; static struct benchmark_info locks_info; CONSTRUCTOR(pmem_locks_constructor) void pmem_locks_constructor(void) { locks_clo[0].opt_short = 'p'; locks_clo[0].opt_long = "use_system_threads"; locks_clo[0].descr = "Use os_thread locks instead of PMEM, " "does not matter for volatile mutex"; locks_clo[0].def = "false"; locks_clo[0].off = clo_field_offset(struct prog_args, use_system_threads); locks_clo[0].type = CLO_TYPE_FLAG; locks_clo[1].opt_short = 'm'; locks_clo[1].opt_long = "numlocks"; locks_clo[1].descr = "The number of lock objects used " "for benchmark"; locks_clo[1].def = "1"; locks_clo[1].off = clo_field_offset(struct prog_args, n_locks); locks_clo[1].type = CLO_TYPE_UINT; locks_clo[1].type_uint.size = clo_field_size(struct prog_args, n_locks); locks_clo[1].type_uint.base = CLO_INT_BASE_DEC; locks_clo[1].type_uint.min = 1; locks_clo[1].type_uint.max = UINT_MAX; locks_clo[2].opt_short = 0; locks_clo[2].opt_long = "mode"; locks_clo[2].descr = "Locking mode"; locks_clo[2].type = CLO_TYPE_STR; locks_clo[2].off = clo_field_offset(struct prog_args, lock_mode); locks_clo[2].def = "1by1"; locks_clo[3].opt_short = 'r'; locks_clo[3].opt_long = "run_id"; locks_clo[3].descr = "Increment the run_id of PMEM object " "pool after each operation"; locks_clo[3].def = "false"; locks_clo[3].off = clo_field_offset(struct prog_args, run_id_increment); locks_clo[3].type = CLO_TYPE_FLAG; locks_clo[4].opt_short = 'i'; locks_clo[4].opt_long = "run_id_init_val"; locks_clo[4].descr = "Use this value for initializing the " "run_id of each PMEMmutex object"; locks_clo[4].def = "2"; locks_clo[4].off = clo_field_offset(struct prog_args, runid_initial_value); locks_clo[4].type = CLO_TYPE_UINT; locks_clo[4].type_uint.size = clo_field_size(struct prog_args, runid_initial_value); locks_clo[4].type_uint.base = CLO_INT_BASE_DEC; locks_clo[4].type_uint.min = 0; locks_clo[4].type_uint.max = UINT64_MAX; locks_clo[5].opt_short = 'b'; locks_clo[5].opt_long = "bench_type"; locks_clo[5].descr = "The Benchmark type: mutex, " "rwlock or volatile-mutex"; locks_clo[5].type = CLO_TYPE_STR; locks_clo[5].off = clo_field_offset(struct prog_args, lock_type); locks_clo[5].def = "mutex"; locks_clo[6].opt_short = 'R'; locks_clo[6].opt_long = "rdlock"; locks_clo[6].descr = "Select read over write lock, only " "valid when lock_type is \"rwlock\""; locks_clo[6].type = CLO_TYPE_FLAG; locks_clo[6].off = clo_field_offset(struct prog_args, use_rdlock); locks_info.name = "obj_locks"; locks_info.brief = "Benchmark for pmem locks operations"; locks_info.init = locks_init; locks_info.exit = locks_exit; locks_info.multithread = false; locks_info.multiops = true; locks_info.operation = locks_op; locks_info.measure_time = true; locks_info.clos = locks_clo; locks_info.nclos = ARRAY_SIZE(locks_clo); locks_info.opts_size = sizeof(struct prog_args); locks_info.rm_file = true; locks_info.allow_poolset = true; REGISTER_BENCHMARK(locks_info); };
21,213
23.957647
80
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/pmem_memset.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * pmem_memset.cpp -- benchmark for pmem_memset function */ #include <cassert> #include <cerrno> #include <cstring> #include <fcntl.h> #include <libpmem.h> #include <sys/mman.h> #include <sys/stat.h> #include <unistd.h> #include "benchmark.hpp" #include "file.h" #include "os.h" #define MAX_OFFSET 63 #define CONST_B 0xFF struct memset_bench; typedef int (*operation_fn)(void *dest, int c, size_t len); /* * memset_args -- benchmark specific command line options */ struct memset_args { char *mode; /* operation mode: stat, seq, rand */ bool memset; /* use libc memset function */ bool persist; /* perform persist operation */ bool msync; /* perform msync operation */ bool no_warmup; /* do not do warmup */ size_t chunk_size; /* elementary chunk size */ size_t dest_off; /* destination address offset */ unsigned seed; /* seed for random numbers */ }; /* * memset_bench -- benchmark context */ struct memset_bench { struct memset_args *pargs; /* benchmark specific arguments */ uint64_t *offsets; /* random/sequential address offsets */ size_t n_offsets; /* number of random elements */ int const_b; /* memset() value */ size_t fsize; /* file size */ void *pmem_addr; /* mapped file address */ operation_fn func_op; /* operation function */ }; /* * operation_mode -- mode of operation of memset() */ enum operation_mode { OP_MODE_UNKNOWN, OP_MODE_STAT, /* always use the same chunk */ OP_MODE_SEQ, /* use consecutive chunks */ OP_MODE_RAND /* use random chunks */ }; /* * parse_op_mode -- parse operation mode from string */ static enum operation_mode parse_op_mode(const char *arg) { if (strcmp(arg, "stat") == 0) return OP_MODE_STAT; else if (strcmp(arg, "seq") == 0) return OP_MODE_SEQ; else if (strcmp(arg, "rand") == 0) return OP_MODE_RAND; else return OP_MODE_UNKNOWN; } /* * init_offsets -- initialize offsets[] array depending on the selected mode */ static int init_offsets(struct benchmark_args *args, struct memset_bench *mb, enum operation_mode op_mode) { unsigned n_threads = args->n_threads; size_t n_ops = args->n_ops_per_thread; mb->n_offsets = n_ops * n_threads; assert(mb->n_offsets != 0); mb->offsets = (uint64_t *)malloc(mb->n_offsets * sizeof(*mb->offsets)); if (!mb->offsets) { perror("malloc"); return -1; } rng_t rng; randomize_r(&rng, mb->pargs->seed); for (unsigned i = 0; i < n_threads; i++) { for (size_t j = 0; j < n_ops; j++) { size_t o; switch (op_mode) { case OP_MODE_STAT: o = i; break; case OP_MODE_SEQ: o = i * n_ops + j; break; case OP_MODE_RAND: o = i * n_ops + rnd64_r(&rng) % n_ops; break; default: assert(0); return -1; } mb->offsets[i * n_ops + j] = o * mb->pargs->chunk_size; } } return 0; } /* * libpmem_memset_persist -- perform operation using libpmem * pmem_memset_persist(). */ static int libpmem_memset_persist(void *dest, int c, size_t len) { pmem_memset_persist(dest, c, len); return 0; } /* * libpmem_memset_nodrain -- perform operation using libpmem * pmem_memset_nodrain(). */ static int libpmem_memset_nodrain(void *dest, int c, size_t len) { pmem_memset_nodrain(dest, c, len); return 0; } /* * libc_memset_persist -- perform operation using libc memset() function * followed by pmem_persist(). */ static int libc_memset_persist(void *dest, int c, size_t len) { memset(dest, c, len); pmem_persist(dest, len); return 0; } /* * libc_memset_msync -- perform operation using libc memset() function * followed by pmem_msync(). */ static int libc_memset_msync(void *dest, int c, size_t len) { memset(dest, c, len); return pmem_msync(dest, len); } /* * libc_memset -- perform operation using libc memset() function * followed by pmem_flush(). */ static int libc_memset(void *dest, int c, size_t len) { memset(dest, c, len); pmem_flush(dest, len); return 0; } /* * warmup_persist -- does the warmup by writing the whole pool area */ static int warmup_persist(struct memset_bench *mb) { void *dest = mb->pmem_addr; int c = mb->const_b; size_t len = mb->fsize; pmem_memset_persist(dest, c, len); return 0; } /* * warmup_msync -- does the warmup by writing the whole pool area */ static int warmup_msync(struct memset_bench *mb) { void *dest = mb->pmem_addr; int c = mb->const_b; size_t len = mb->fsize; return libc_memset_msync(dest, c, len); } /* * memset_op -- actual benchmark operation. It can have one of the four * functions assigned: * libc_memset, * libc_memset_persist, * libpmem_memset_nodrain, * libpmem_memset_persist. */ static int memset_op(struct benchmark *bench, struct operation_info *info) { auto *mb = (struct memset_bench *)pmembench_get_priv(bench); assert(info->index < mb->n_offsets); size_t idx = info->worker->index * info->args->n_ops_per_thread + info->index; void *dest = (char *)mb->pmem_addr + mb->offsets[idx] + mb->pargs->dest_off; int c = mb->const_b; size_t len = mb->pargs->chunk_size; mb->func_op(dest, c, len); return 0; } /* * memset_init -- initialization function */ static int memset_init(struct benchmark *bench, struct benchmark_args *args) { assert(bench != nullptr); assert(args != nullptr); assert(args->opts != nullptr); int ret = 0; size_t size; size_t large; size_t little; size_t file_size = 0; int flags = 0; enum file_type type = util_file_get_type(args->fname); if (type == OTHER_ERROR) { fprintf(stderr, "could not check type of file %s\n", args->fname); return -1; } int (*warmup_func)(struct memset_bench *) = warmup_persist; auto *mb = (struct memset_bench *)malloc(sizeof(struct memset_bench)); if (!mb) { perror("malloc"); return -1; } mb->pargs = (struct memset_args *)args->opts; mb->pargs->chunk_size = args->dsize; enum operation_mode op_mode = parse_op_mode(mb->pargs->mode); if (op_mode == OP_MODE_UNKNOWN) { fprintf(stderr, "Invalid operation mode argument '%s'\n", mb->pargs->mode); ret = -1; goto err_free_mb; } size = MAX_OFFSET + mb->pargs->chunk_size; large = size * args->n_ops_per_thread * args->n_threads; little = size * args->n_threads; mb->fsize = (op_mode == OP_MODE_STAT) ? little : large; /* initialize offsets[] array depending on benchmark args */ if (init_offsets(args, mb, op_mode) < 0) { ret = -1; goto err_free_mb; } /* initialize memset() value */ mb->const_b = CONST_B; if (type != TYPE_DEVDAX) { file_size = mb->fsize; flags = PMEM_FILE_CREATE | PMEM_FILE_EXCL; } /* create a pmem file and memory map it */ if ((mb->pmem_addr = pmem_map_file(args->fname, file_size, flags, args->fmode, nullptr, nullptr)) == nullptr) { perror(args->fname); ret = -1; goto err_free_offsets; } if (mb->pargs->memset) { if (mb->pargs->persist && mb->pargs->msync) { fprintf(stderr, "Invalid benchmark parameters: persist and msync cannot be specified together\n"); ret = -1; goto err_free_offsets; } if (mb->pargs->persist) { mb->func_op = libc_memset_persist; } else if (mb->pargs->msync) { mb->func_op = libc_memset_msync; warmup_func = warmup_msync; } else { mb->func_op = libc_memset; } } else { mb->func_op = (mb->pargs->persist) ? libpmem_memset_persist : libpmem_memset_nodrain; } if (!mb->pargs->no_warmup && type != TYPE_DEVDAX) { ret = warmup_func(mb); if (ret) { perror("Pool warmup failed"); goto err_free_offsets; } } pmembench_set_priv(bench, mb); return ret; err_free_offsets: free(mb->offsets); err_free_mb: free(mb); return ret; } /* * memset_exit -- benchmark cleanup function */ static int memset_exit(struct benchmark *bench, struct benchmark_args *args) { auto *mb = (struct memset_bench *)pmembench_get_priv(bench); pmem_unmap(mb->pmem_addr, mb->fsize); free(mb->offsets); free(mb); return 0; } static struct benchmark_clo memset_clo[7]; /* Stores information about benchmark. */ static struct benchmark_info memset_info; CONSTRUCTOR(pmem_memset_constructor) void pmem_memset_constructor(void) { memset_clo[0].opt_short = 'M'; memset_clo[0].opt_long = "mem-mode"; memset_clo[0].descr = "Memory writing mode - " "stat, seq, rand"; memset_clo[0].def = "seq"; memset_clo[0].off = clo_field_offset(struct memset_args, mode); memset_clo[0].type = CLO_TYPE_STR; memset_clo[1].opt_short = 'm'; memset_clo[1].opt_long = "memset"; memset_clo[1].descr = "Use libc memset()"; memset_clo[1].def = "false"; memset_clo[1].off = clo_field_offset(struct memset_args, memset); memset_clo[1].type = CLO_TYPE_FLAG; memset_clo[2].opt_short = 'p'; memset_clo[2].opt_long = "persist"; memset_clo[2].descr = "Use pmem_persist()"; memset_clo[2].def = "true"; memset_clo[2].off = clo_field_offset(struct memset_args, persist); memset_clo[2].type = CLO_TYPE_FLAG; memset_clo[3].opt_short = 'D'; memset_clo[3].opt_long = "dest-offset"; memset_clo[3].descr = "Destination cache line alignment " "offset"; memset_clo[3].def = "0"; memset_clo[3].off = clo_field_offset(struct memset_args, dest_off); memset_clo[3].type = CLO_TYPE_UINT; memset_clo[3].type_uint.size = clo_field_size(struct memset_args, dest_off); memset_clo[3].type_uint.base = CLO_INT_BASE_DEC; memset_clo[3].type_uint.min = 0; memset_clo[3].type_uint.max = MAX_OFFSET; memset_clo[4].opt_short = 'w'; memset_clo[4].opt_long = "no-warmup"; memset_clo[4].descr = "Don't do warmup"; memset_clo[4].def = "false"; memset_clo[4].type = CLO_TYPE_FLAG; memset_clo[4].off = clo_field_offset(struct memset_args, no_warmup); memset_clo[5].opt_short = 'S'; memset_clo[5].opt_long = "seed"; memset_clo[5].descr = "seed for random numbers"; memset_clo[5].def = "1"; memset_clo[5].off = clo_field_offset(struct memset_args, seed); memset_clo[5].type = CLO_TYPE_UINT; memset_clo[5].type_uint.size = clo_field_size(struct memset_args, seed); memset_clo[5].type_uint.base = CLO_INT_BASE_DEC; memset_clo[5].type_uint.min = 1; memset_clo[5].type_uint.max = UINT_MAX; memset_clo[6].opt_short = 's'; memset_clo[6].opt_long = "msync"; memset_clo[6].descr = "Use pmem_msync()"; memset_clo[6].def = "false"; memset_clo[6].off = clo_field_offset(struct memset_args, msync); memset_clo[6].type = CLO_TYPE_FLAG; memset_info.name = "pmem_memset"; memset_info.brief = "Benchmark for pmem_memset_persist() " "and pmem_memset_nodrain() operations"; memset_info.init = memset_init; memset_info.exit = memset_exit; memset_info.multithread = true; memset_info.multiops = true; memset_info.operation = memset_op; memset_info.measure_time = true; memset_info.clos = memset_clo; memset_info.nclos = ARRAY_SIZE(memset_clo); memset_info.opts_size = sizeof(struct memset_args); memset_info.rm_file = true; memset_info.allow_poolset = false; memset_info.print_bandwidth = true; REGISTER_BENCHMARK(memset_info); };
11,041
23.375276
86
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/scenario.hpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * scenario.hpp -- scenario module declaration */ #include "queue.h" #include <cstdbool> struct kv { PMDK_TAILQ_ENTRY(kv) next; char *key; char *value; }; struct scenario { PMDK_TAILQ_ENTRY(scenario) next; PMDK_TAILQ_HEAD(scenariohead, kv) head; char *name; char *benchmark; char *group; }; struct scenarios { PMDK_TAILQ_HEAD(scenarioshead, scenario) head; }; #define FOREACH_SCENARIO(s, ss) PMDK_TAILQ_FOREACH((s), &(ss)->head, next) #define FOREACH_KV(kv, s) PMDK_TAILQ_FOREACH((kv), &(s)->head, next) struct kv *kv_alloc(const char *key, const char *value); void kv_free(struct kv *kv); struct scenario *scenario_alloc(const char *name, const char *bench); void scenario_free(struct scenario *s); void scenario_set_group(struct scenario *s, const char *group); struct scenarios *scenarios_alloc(void); void scenarios_free(struct scenarios *scenarios); struct scenario *scenarios_get_scenario(struct scenarios *ss, const char *name); bool contains_scenarios(int argc, char **argv, struct scenarios *ss); struct scenario *clone_scenario(struct scenario *src_scenario); struct kv *find_kv_in_scenario(const char *key, const struct scenario *scenario);
1,271
26.06383
80
hpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/log.cpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * log.cpp -- pmemlog benchmarks definitions */ #include <cassert> #include <cerrno> #include <cstring> #include <fcntl.h> #include <sys/stat.h> #include <sys/uio.h> #include <unistd.h> #include "benchmark.hpp" #include "file.h" #include "libpmemlog.h" #include "os.h" #include "poolset_util.hpp" #include "rand.h" /* * Size of pool header, pool descriptor * and additional page alignment overhead */ #define POOL_HDR_SIZE (3 * 4096) #define MIN_VEC_SIZE 1 /* * prog_args - benchmark's specific command line arguments */ struct prog_args { unsigned seed; /* seed for pseudo-random generator */ bool rand; /* use random numbers */ int vec_size; /* vector size */ size_t el_size; /* size of single append */ size_t min_size; /* minimum size for random mode */ bool no_warmup; /* don't do warmup */ bool fileio; /* use file io instead of pmemlog */ }; /* * thread_info - thread specific data */ struct log_worker_info { rng_t rng; struct iovec *iov; /* io vector */ char *buf; /* buffer for write/read operations */ size_t buf_size; /* buffer size */ size_t buf_ptr; /* pointer for read operations */ size_t *rand_sizes; size_t *vec_sizes; /* sum of sizes in vector */ }; /* * log_bench - main context of benchmark */ struct log_bench { size_t psize; /* size of pool */ PMEMlogpool *plp; /* pmemlog handle */ struct prog_args *args; /* benchmark specific arguments */ int fd; /* file descriptor for file io mode */ rng_t rng; /* * Pointer to the main benchmark operation. The appropriate function * will be assigned depending on the benchmark specific arguments. */ int (*func_op)(struct benchmark *, struct operation_info *); }; /* * do_warmup -- do warmup by writing the whole pool area */ static int do_warmup(struct log_bench *lb, size_t nops) { int ret = 0; size_t bsize = lb->args->vec_size * lb->args->el_size; auto *buf = (char *)calloc(1, bsize); if (!buf) { perror("calloc"); return -1; } if (!lb->args->fileio) { for (size_t i = 0; i < nops; i++) { if (pmemlog_append(lb->plp, buf, lb->args->el_size) < 0) { ret = -1; perror("pmemlog_append"); goto out; } } pmemlog_rewind(lb->plp); } else { for (size_t i = 0; i < nops; i++) { if (write(lb->fd, buf, (unsigned)lb->args->el_size) != (ssize_t)lb->args->el_size) { ret = -1; perror("write"); os_close(lb->fd); goto out; } } if (os_lseek(lb->fd, 0, SEEK_SET) < 0) { ret = -1; perror("lseek"); os_close(lb->fd); } } out: free(buf); return ret; } /* * log_append -- performs pmemlog_append operation */ static int log_append(struct benchmark *bench, struct operation_info *info) { auto *lb = (struct log_bench *)pmembench_get_priv(bench); assert(lb); auto *worker_info = (struct log_worker_info *)info->worker->priv; assert(worker_info); size_t size = lb->args->rand ? worker_info->rand_sizes[info->index] : lb->args->el_size; if (pmemlog_append(lb->plp, worker_info->buf, size) < 0) { perror("pmemlog_append"); return -1; } return 0; } /* * log_appendv -- performs pmemlog_appendv operation */ static int log_appendv(struct benchmark *bench, struct operation_info *info) { auto *lb = (struct log_bench *)pmembench_get_priv(bench); assert(lb); auto *worker_info = (struct log_worker_info *)info->worker->priv; assert(worker_info); struct iovec *iov = &worker_info->iov[info->index * lb->args->vec_size]; if (pmemlog_appendv(lb->plp, iov, lb->args->vec_size) < 0) { perror("pmemlog_appendv"); return -1; } return 0; } /* * fileio_append -- performs fileio append operation */ static int fileio_append(struct benchmark *bench, struct operation_info *info) { auto *lb = (struct log_bench *)pmembench_get_priv(bench); assert(lb); auto *worker_info = (struct log_worker_info *)info->worker->priv; assert(worker_info); size_t size = lb->args->rand ? worker_info->rand_sizes[info->index] : lb->args->el_size; if (write(lb->fd, worker_info->buf, (unsigned)size) != (ssize_t)size) { perror("write"); return -1; } return 0; } /* * fileio_appendv -- performs fileio appendv operation */ static int fileio_appendv(struct benchmark *bench, struct operation_info *info) { auto *lb = (struct log_bench *)pmembench_get_priv(bench); assert(lb != nullptr); auto *worker_info = (struct log_worker_info *)info->worker->priv; assert(worker_info); struct iovec *iov = &worker_info->iov[info->index * lb->args->vec_size]; size_t vec_size = worker_info->vec_sizes[info->index]; if (os_writev(lb->fd, iov, lb->args->vec_size) != (ssize_t)vec_size) { perror("writev"); return -1; } return 0; } /* * log_process_data -- callback function for pmemlog_walk. */ static int log_process_data(const void *buf, size_t len, void *arg) { auto *worker_info = (struct log_worker_info *)arg; size_t left = worker_info->buf_size - worker_info->buf_ptr; if (len > left) { worker_info->buf_ptr = 0; left = worker_info->buf_size; } len = len < left ? len : left; assert(len <= left); void *buff = &worker_info->buf[worker_info->buf_ptr]; memcpy(buff, buf, len); worker_info->buf_ptr += len; return 1; } /* * fileio_read -- perform single fileio read */ static int fileio_read(int fd, ssize_t len, struct log_worker_info *worker_info) { ssize_t left = worker_info->buf_size - worker_info->buf_ptr; if (len > left) { worker_info->buf_ptr = 0; left = worker_info->buf_size; } len = len < left ? len : left; assert(len <= left); size_t off = worker_info->buf_ptr; void *buff = &worker_info->buf[off]; if ((len = pread(fd, buff, len, off)) < 0) return -1; worker_info->buf_ptr += len; return 1; } /* * log_read_op -- perform read operation */ static int log_read_op(struct benchmark *bench, struct operation_info *info) { auto *lb = (struct log_bench *)pmembench_get_priv(bench); assert(lb); auto *worker_info = (struct log_worker_info *)info->worker->priv; assert(worker_info); worker_info->buf_ptr = 0; size_t chunk_size = lb->args->rand ? worker_info->rand_sizes[info->index] : lb->args->el_size; if (!lb->args->fileio) { pmemlog_walk(lb->plp, chunk_size, log_process_data, worker_info); return 0; } int ret; while ((ret = fileio_read(lb->fd, chunk_size, worker_info)) == 1) ; return ret; } /* * log_init_worker -- init benchmark worker */ static int log_init_worker(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { int ret = 0; auto *lb = (struct log_bench *)pmembench_get_priv(bench); size_t i_size, n_vectors; assert(lb); auto *worker_info = (struct log_worker_info *)malloc( sizeof(struct log_worker_info)); if (!worker_info) { perror("malloc"); return -1; } /* allocate buffer for append / read */ worker_info->buf_size = lb->args->el_size * lb->args->vec_size; worker_info->buf = (char *)malloc(worker_info->buf_size); if (!worker_info->buf) { perror("malloc"); ret = -1; goto err_free_worker_info; } /* * For random mode, each operation has its own vector with * random sizes. Otherwise there is only one vector with * equal sizes. */ n_vectors = args->n_ops_per_thread; worker_info->iov = (struct iovec *)malloc( n_vectors * lb->args->vec_size * sizeof(struct iovec)); if (!worker_info->iov) { perror("malloc"); ret = -1; goto err_free_buf; } if (lb->args->rand) { /* each thread has random seed */ randomize_r(&worker_info->rng, rnd64_r(&lb->rng)); /* each vector element has its own random size */ size_t n_sizes = args->n_ops_per_thread * lb->args->vec_size; worker_info->rand_sizes = (size_t *)malloc( n_sizes * sizeof(*worker_info->rand_sizes)); if (!worker_info->rand_sizes) { perror("malloc"); ret = -1; goto err_free_iov; } /* generate append sizes */ for (size_t i = 0; i < n_sizes; i++) { size_t width = lb->args->el_size - lb->args->min_size; worker_info->rand_sizes[i] = rnd64_r(&worker_info->rng) % width + lb->args->min_size; } } else { worker_info->rand_sizes = nullptr; } worker_info->vec_sizes = (size_t *)calloc( args->n_ops_per_thread, sizeof(*worker_info->vec_sizes)); if (!worker_info->vec_sizes) { perror("malloc\n"); ret = -1; goto err_free_rand_sizes; } /* fill up the io vectors */ i_size = 0; for (size_t n = 0; n < args->n_ops_per_thread; n++) { size_t buf_ptr = 0; size_t vec_off = n * lb->args->vec_size; for (int i = 0; i < lb->args->vec_size; ++i) { size_t el_size = lb->args->rand ? worker_info->rand_sizes[i_size++] : lb->args->el_size; worker_info->iov[vec_off + i].iov_base = &worker_info->buf[buf_ptr]; worker_info->iov[vec_off + i].iov_len = el_size; worker_info->vec_sizes[n] += el_size; buf_ptr += el_size; } } worker->priv = worker_info; return 0; err_free_rand_sizes: free(worker_info->rand_sizes); err_free_iov: free(worker_info->iov); err_free_buf: free(worker_info->buf); err_free_worker_info: free(worker_info); return ret; } /* * log_free_worker -- cleanup benchmark worker */ static void log_free_worker(struct benchmark *bench, struct benchmark_args *args, struct worker_info *worker) { auto *worker_info = (struct log_worker_info *)worker->priv; assert(worker_info); free(worker_info->buf); free(worker_info->iov); free(worker_info->rand_sizes); free(worker_info->vec_sizes); free(worker_info); } /* * log_init -- benchmark initialization function */ static int log_init(struct benchmark *bench, struct benchmark_args *args) { int ret = 0; assert(bench); assert(args != nullptr); assert(args->opts != nullptr); struct benchmark_info *bench_info; char path[PATH_MAX]; if (util_safe_strcpy(path, args->fname, sizeof(path)) != 0) return -1; enum file_type type = util_file_get_type(args->fname); if (type == OTHER_ERROR) { fprintf(stderr, "could not check type of file %s\n", args->fname); return -1; } auto *lb = (struct log_bench *)malloc(sizeof(struct log_bench)); if (!lb) { perror("malloc"); return -1; } lb->args = (struct prog_args *)args->opts; lb->args->el_size = args->dsize; if (lb->args->vec_size == 0) lb->args->vec_size = 1; if (lb->args->rand && lb->args->min_size > lb->args->el_size) { errno = EINVAL; ret = -1; goto err_free_lb; } if (lb->args->rand && lb->args->min_size == lb->args->el_size) lb->args->rand = false; randomize_r(&lb->rng, lb->args->seed); /* align pool size to ensure that we have enough usable space */ lb->psize = ALIGN_UP(POOL_HDR_SIZE + args->n_ops_per_thread * args->n_threads * lb->args->vec_size * lb->args->el_size, Mmap_align); /* calculate a required pool size */ if (lb->psize < PMEMLOG_MIN_POOL) lb->psize = PMEMLOG_MIN_POOL; if (args->is_poolset || type == TYPE_DEVDAX) { if (lb->args->fileio) { fprintf(stderr, "fileio not supported on device dax nor poolset\n"); ret = -1; goto err_free_lb; } if (args->fsize < lb->psize) { fprintf(stderr, "file size too large\n"); ret = -1; goto err_free_lb; } lb->psize = 0; } else if (args->is_dynamic_poolset) { if (lb->args->fileio) { fprintf(stderr, "fileio not supported with dynamic poolset\n"); ret = -1; goto err_free_lb; } ret = dynamic_poolset_create(args->fname, lb->psize); if (ret == -1) goto err_free_lb; if (util_safe_strcpy(path, POOLSET_PATH, sizeof(path)) != 0) goto err_free_lb; lb->psize = 0; } bench_info = pmembench_get_info(bench); if (!lb->args->fileio) { if ((lb->plp = pmemlog_create(path, lb->psize, args->fmode)) == nullptr) { perror("pmemlog_create"); ret = -1; goto err_free_lb; } bench_info->operation = (lb->args->vec_size > 1) ? log_appendv : log_append; } else { int flags = O_CREAT | O_RDWR | O_SYNC; /* Create a file if it does not exist. */ if ((lb->fd = os_open(args->fname, flags, args->fmode)) < 0) { perror(args->fname); ret = -1; goto err_free_lb; } /* allocate the pmem */ if ((errno = os_posix_fallocate(lb->fd, 0, lb->psize)) != 0) { perror("posix_fallocate"); ret = -1; goto err_close; } bench_info->operation = (lb->args->vec_size > 1) ? fileio_appendv : fileio_append; } if (!lb->args->no_warmup && type != TYPE_DEVDAX) { size_t warmup_nops = args->n_threads * args->n_ops_per_thread; if (do_warmup(lb, warmup_nops)) { fprintf(stderr, "warmup failed\n"); ret = -1; goto err_close; } } pmembench_set_priv(bench, lb); return 0; err_close: if (lb->args->fileio) os_close(lb->fd); else pmemlog_close(lb->plp); err_free_lb: free(lb); return ret; } /* * log_exit -- cleanup benchmark */ static int log_exit(struct benchmark *bench, struct benchmark_args *args) { auto *lb = (struct log_bench *)pmembench_get_priv(bench); if (!lb->args->fileio) pmemlog_close(lb->plp); else os_close(lb->fd); free(lb); return 0; } /* command line options definition */ static struct benchmark_clo log_clo[6]; /* log_append benchmark info */ static struct benchmark_info log_append_info; /* log_read benchmark info */ static struct benchmark_info log_read_info; CONSTRUCTOR(log_constructor) void log_constructor(void) { log_clo[0].opt_short = 'r'; log_clo[0].opt_long = "random"; log_clo[0].descr = "Use random sizes for append/read"; log_clo[0].off = clo_field_offset(struct prog_args, rand); log_clo[0].type = CLO_TYPE_FLAG; log_clo[1].opt_short = 'S'; log_clo[1].opt_long = "seed"; log_clo[1].descr = "Random mode"; log_clo[1].off = clo_field_offset(struct prog_args, seed); log_clo[1].def = "1"; log_clo[1].type = CLO_TYPE_UINT; log_clo[1].type_uint.size = clo_field_size(struct prog_args, seed); log_clo[1].type_uint.base = CLO_INT_BASE_DEC; log_clo[1].type_uint.min = 1; log_clo[1].type_uint.max = UINT_MAX; log_clo[2].opt_short = 'i'; log_clo[2].opt_long = "file-io"; log_clo[2].descr = "File I/O mode"; log_clo[2].off = clo_field_offset(struct prog_args, fileio); log_clo[2].type = CLO_TYPE_FLAG; log_clo[3].opt_short = 'w'; log_clo[3].opt_long = "no-warmup"; log_clo[3].descr = "Don't do warmup", log_clo[3].type = CLO_TYPE_FLAG; log_clo[3].off = clo_field_offset(struct prog_args, no_warmup); log_clo[4].opt_short = 'm'; log_clo[4].opt_long = "min-size"; log_clo[4].descr = "Minimum size of append/read for " "random mode"; log_clo[4].type = CLO_TYPE_UINT; log_clo[4].off = clo_field_offset(struct prog_args, min_size); log_clo[4].def = "1"; log_clo[4].type_uint.size = clo_field_size(struct prog_args, min_size); log_clo[4].type_uint.base = CLO_INT_BASE_DEC; log_clo[4].type_uint.min = 1; log_clo[4].type_uint.max = UINT64_MAX; /* this one is only for log_append */ log_clo[5].opt_short = 'v'; log_clo[5].opt_long = "vector"; log_clo[5].descr = "Vector size"; log_clo[5].off = clo_field_offset(struct prog_args, vec_size); log_clo[5].def = "1"; log_clo[5].type = CLO_TYPE_INT; log_clo[5].type_int.size = clo_field_size(struct prog_args, vec_size); log_clo[5].type_int.base = CLO_INT_BASE_DEC; log_clo[5].type_int.min = MIN_VEC_SIZE; log_clo[5].type_int.max = INT_MAX; log_append_info.name = "log_append"; log_append_info.brief = "Benchmark for pmemlog_append() " "operation"; log_append_info.init = log_init; log_append_info.exit = log_exit; log_append_info.multithread = true; log_append_info.multiops = true; log_append_info.init_worker = log_init_worker; log_append_info.free_worker = log_free_worker; /* this will be assigned in log_init */ log_append_info.operation = nullptr; log_append_info.measure_time = true; log_append_info.clos = log_clo; log_append_info.nclos = ARRAY_SIZE(log_clo); log_append_info.opts_size = sizeof(struct prog_args); log_append_info.rm_file = true; log_append_info.allow_poolset = true; REGISTER_BENCHMARK(log_append_info); log_read_info.name = "log_read"; log_read_info.brief = "Benchmark for pmemlog_walk() " "operation"; log_read_info.init = log_init; log_read_info.exit = log_exit; log_read_info.multithread = true; log_read_info.multiops = true; log_read_info.init_worker = log_init_worker; log_read_info.free_worker = log_free_worker; log_read_info.operation = log_read_op; log_read_info.measure_time = true; log_read_info.clos = log_clo; /* without vector */ log_read_info.nclos = ARRAY_SIZE(log_clo) - 1; log_read_info.opts_size = sizeof(struct prog_args); log_read_info.rm_file = true; log_read_info.allow_poolset = true; REGISTER_BENCHMARK(log_read_info); };
16,617
22.979798
73
cpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/benchmarks/clo.hpp
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * clo.hpp -- command line options module declarations */ int benchmark_clo_parse(int argc, char *argv[], struct benchmark_clo *clos, ssize_t nclo, struct clo_vec *clovec); int benchmark_clo_parse_scenario(struct scenario *scenario, struct benchmark_clo *clos, size_t nclo, struct clo_vec *clovec); const char *benchmark_clo_str(struct benchmark_clo *clo, void *args, size_t size); int clo_get_scenarios(int argc, char *argv[], struct scenarios *available_scenarios, struct scenarios *found_scenarios); int benchmark_override_clos_in_scenario(struct scenario *scenario, int argc, char *argv[], struct benchmark_clo *clos, int nclos);
772
39.684211
76
hpp
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/page_size.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, IBM Corporation */ #ifndef PMDK_PAGE_SIZE_H #define PMDK_PAGE_SIZE_H #if defined(__x86_64) || defined(_M_X64) || defined(__aarch64__) #define PMEM_PAGESIZE 4096 #elif defined(__PPC64__) #define PMEM_PAGESIZE 65536 #else #error unable to recognize ISA at compile time #endif #endif /* PMDK_PAGE_SIZE_H */
374
16.045455
64
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/ctl.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * ctl.h -- internal declaration of statistics and control related structures */ #ifndef PMDK_CTL_H #define PMDK_CTL_H 1 #include "queue.h" #include "errno.h" #include "out.h" #ifdef __cplusplus extern "C" { #endif struct ctl; struct ctl_index { const char *name; long value; PMDK_SLIST_ENTRY(ctl_index) entry; }; PMDK_SLIST_HEAD(ctl_indexes, ctl_index); enum ctl_query_source { CTL_UNKNOWN_QUERY_SOURCE, /* query executed directly from the program */ CTL_QUERY_PROGRAMMATIC, /* query executed from the config file */ CTL_QUERY_CONFIG_INPUT, MAX_CTL_QUERY_SOURCE }; enum ctl_query_type { CTL_QUERY_READ, CTL_QUERY_WRITE, CTL_QUERY_RUNNABLE, MAX_CTL_QUERY_TYPE }; typedef int (*node_callback)(void *ctx, enum ctl_query_source type, void *arg, struct ctl_indexes *indexes); enum ctl_node_type { CTL_NODE_UNKNOWN, CTL_NODE_NAMED, CTL_NODE_LEAF, CTL_NODE_INDEXED, MAX_CTL_NODE }; typedef int (*ctl_arg_parser)(const void *arg, void *dest, size_t dest_size); struct ctl_argument_parser { size_t dest_offset; /* offset of the field inside of the argument */ size_t dest_size; /* size of the field inside of the argument */ ctl_arg_parser parser; }; struct ctl_argument { size_t dest_size; /* sizeof the entire argument */ struct ctl_argument_parser parsers[]; /* array of 'fields' in arg */ }; #define sizeof_member(t, m) sizeof(((t *)0)->m) #define CTL_ARG_PARSER(t, p)\ {0, sizeof(t), p} #define CTL_ARG_PARSER_STRUCT(t, m, p)\ {offsetof(t, m), sizeof_member(t, m), p} #define CTL_ARG_PARSER_END {0, 0, NULL} /* * CTL Tree node structure, do not use directly. All the necessary functionality * is provided by the included macros. */ struct ctl_node { const char *name; enum ctl_node_type type; node_callback cb[MAX_CTL_QUERY_TYPE]; const struct ctl_argument *arg; const struct ctl_node *children; }; struct ctl *ctl_new(void); void ctl_delete(struct ctl *stats); int ctl_load_config_from_string(struct ctl *ctl, void *ctx, const char *cfg_string); int ctl_load_config_from_file(struct ctl *ctl, void *ctx, const char *cfg_file); /* Use through CTL_REGISTER_MODULE, never directly */ void ctl_register_module_node(struct ctl *c, const char *name, struct ctl_node *n); int ctl_arg_boolean(const void *arg, void *dest, size_t dest_size); #define CTL_ARG_BOOLEAN {sizeof(int),\ {{0, sizeof(int), ctl_arg_boolean},\ CTL_ARG_PARSER_END}}; int ctl_arg_integer(const void *arg, void *dest, size_t dest_size); #define CTL_ARG_INT {sizeof(int),\ {{0, sizeof(int), ctl_arg_integer},\ CTL_ARG_PARSER_END}}; #define CTL_ARG_LONG_LONG {sizeof(long long),\ {{0, sizeof(long long), ctl_arg_integer},\ CTL_ARG_PARSER_END}}; int ctl_arg_string(const void *arg, void *dest, size_t dest_size); #define CTL_ARG_STRING(len) {len,\ {{0, len, ctl_arg_string},\ CTL_ARG_PARSER_END}}; #define CTL_STR(name) #name #define CTL_NODE_END {NULL, CTL_NODE_UNKNOWN, {NULL, NULL, NULL}, NULL, NULL} #define CTL_NODE(name, ...)\ ctl_node_##__VA_ARGS__##_##name int ctl_query(struct ctl *ctl, void *ctx, enum ctl_query_source source, const char *name, enum ctl_query_type type, void *arg); /* Declaration of a new child node */ #define CTL_CHILD(name, ...)\ {CTL_STR(name), CTL_NODE_NAMED, {NULL, NULL, NULL}, NULL,\ (struct ctl_node *)CTL_NODE(name, __VA_ARGS__)} /* Declaration of a new indexed node */ #define CTL_INDEXED(name, ...)\ {CTL_STR(name), CTL_NODE_INDEXED, {NULL, NULL, NULL}, NULL,\ (struct ctl_node *)CTL_NODE(name, __VA_ARGS__)} #define CTL_READ_HANDLER(name, ...)\ ctl_##__VA_ARGS__##_##name##_read #define CTL_WRITE_HANDLER(name, ...)\ ctl_##__VA_ARGS__##_##name##_write #define CTL_RUNNABLE_HANDLER(name, ...)\ ctl_##__VA_ARGS__##_##name##_runnable #define CTL_ARG(name)\ ctl_arg_##name /* * Declaration of a new read-only leaf. If used the corresponding read function * must be declared by CTL_READ_HANDLER macro. */ #define CTL_LEAF_RO(name, ...)\ {CTL_STR(name), CTL_NODE_LEAF, \ {CTL_READ_HANDLER(name, __VA_ARGS__), NULL, NULL}, NULL, NULL} /* * Declaration of a new write-only leaf. If used the corresponding write * function must be declared by CTL_WRITE_HANDLER macro. */ #define CTL_LEAF_WO(name, ...)\ {CTL_STR(name), CTL_NODE_LEAF, \ {NULL, CTL_WRITE_HANDLER(name, __VA_ARGS__), NULL},\ &CTL_ARG(name), NULL} /* * Declaration of a new runnable leaf. If used the corresponding run * function must be declared by CTL_RUNNABLE_HANDLER macro. */ #define CTL_LEAF_RUNNABLE(name, ...)\ {CTL_STR(name), CTL_NODE_LEAF, \ {NULL, NULL, CTL_RUNNABLE_HANDLER(name, __VA_ARGS__)},\ NULL, NULL} /* * Declaration of a new read-write leaf. If used both read and write function * must be declared by CTL_READ_HANDLER and CTL_WRITE_HANDLER macros. */ #define CTL_LEAF_RW(name)\ {CTL_STR(name), CTL_NODE_LEAF,\ {CTL_READ_HANDLER(name), CTL_WRITE_HANDLER(name), NULL},\ &CTL_ARG(name), NULL} #define CTL_REGISTER_MODULE(_ctl, name)\ ctl_register_module_node((_ctl), CTL_STR(name),\ (struct ctl_node *)CTL_NODE(name)) #ifdef __cplusplus } #endif #endif
5,127
24.261084
80
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/set_badblocks.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2020, Intel Corporation */ /* * set_badblocks.h - poolset part of bad blocks API */ #ifndef PMDK_SET_BADBLOCKS_H #define PMDK_SET_BADBLOCKS_H 1 #include "set.h" #ifdef __cplusplus extern "C" { #endif int badblocks_check_poolset(struct pool_set *set, int create); int badblocks_clear_poolset(struct pool_set *set, int create); char *badblocks_recovery_file_alloc(const char *file, unsigned rep, unsigned part); int badblocks_recovery_file_exists(struct pool_set *set); #ifdef __cplusplus } #endif #endif /* PMDK_SET_BADBLOCKS_H */
604
19.862069
62
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/os_deep.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2018, Intel Corporation */ /* * os_deep.h -- abstraction layer for common usage of deep_* functions */ #ifndef PMDK_OS_DEEP_PERSIST_H #define PMDK_OS_DEEP_PERSIST_H 1 #include <stdint.h> #include <stddef.h> #include "set.h" #ifdef __cplusplus extern "C" { #endif int os_range_deep_common(uintptr_t addr, size_t len); int os_part_deep_common(struct pool_replica *rep, unsigned partidx, void *addr, size_t len, int flush); #ifdef __cplusplus } #endif #endif
527
17.857143
79
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/ctl_global.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * ctl_global.h -- definitions for the global CTL namespace */ #ifndef PMDK_CTL_GLOBAL_H #define PMDK_CTL_GLOBAL_H 1 #ifdef __cplusplus extern "C" { #endif extern void ctl_prefault_register(void); extern void ctl_sds_register(void); extern void ctl_fallocate_register(void); extern void ctl_cow_register(void); static inline void ctl_global_register(void) { ctl_prefault_register(); ctl_sds_register(); ctl_fallocate_register(); ctl_cow_register(); } #ifdef __cplusplus } #endif #endif
587
16.294118
59
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/file.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * file.h -- internal definitions for file module */ #ifndef PMDK_FILE_H #define PMDK_FILE_H 1 #include <stddef.h> #include <sys/stat.h> #include <sys/types.h> #include <dirent.h> #include <limits.h> #include "os.h" #ifdef __cplusplus extern "C" { #endif #ifdef _WIN32 #define NAME_MAX _MAX_FNAME #endif struct file_info { char filename[NAME_MAX + 1]; int is_dir; }; struct dir_handle { const char *path; #ifdef _WIN32 HANDLE handle; char *_file; #else DIR *dirp; #endif }; enum file_type { OTHER_ERROR = -2, NOT_EXISTS = -1, TYPE_NORMAL = 1, TYPE_DEVDAX = 2 }; int util_file_dir_open(struct dir_handle *a, const char *path); int util_file_dir_next(struct dir_handle *a, struct file_info *info); int util_file_dir_close(struct dir_handle *a); int util_file_dir_remove(const char *path); int util_file_exists(const char *path); enum file_type util_stat_get_type(const os_stat_t *st); enum file_type util_fd_get_type(int fd); enum file_type util_file_get_type(const char *path); int util_ddax_region_find(const char *path, unsigned *region_id); ssize_t util_file_get_size(const char *path); ssize_t util_fd_get_size(int fd); size_t util_file_device_dax_alignment(const char *path); void *util_file_map_whole(const char *path); int util_file_zero(const char *path, os_off_t off, size_t len); ssize_t util_file_pread(const char *path, void *buffer, size_t size, os_off_t offset); ssize_t util_file_pwrite(const char *path, const void *buffer, size_t size, os_off_t offset); int util_tmpfile(const char *dir, const char *templ, int flags); int util_is_absolute_path(const char *path); int util_file_create(const char *path, size_t size, size_t minsize); int util_file_open(const char *path, size_t *size, size_t minsize, int flags); int util_unlink(const char *path); int util_unlink_flock(const char *path); int util_file_mkdir(const char *path, mode_t mode); int util_write_all(int fd, const char *buf, size_t count); #ifndef _WIN32 #define util_read read #define util_write write #else static inline ssize_t util_read(int fd, void *buf, size_t count) { /* * Simulate short read, because Windows' _read uses "unsigned" as * a type of the last argument and "int" as a return type. * We have to limit "count" to what _read can return as a success, * not what it can accept. */ if (count > INT_MAX) count = INT_MAX; return _read(fd, buf, (unsigned)count); } static inline ssize_t util_write(int fd, const void *buf, size_t count) { /* * Simulate short write, because Windows' _write uses "unsigned" as * a type of the last argument and "int" as a return type. * We have to limit "count" to what _write can return as a success, * not what it can accept. */ if (count > INT_MAX) count = INT_MAX; return _write(fd, buf, (unsigned)count); } #define S_ISCHR(m) (((m) & S_IFMT) == S_IFCHR) #define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR) #endif #ifdef __cplusplus } #endif #endif
3,013
24.982759
78
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/badblocks.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ /* * badblocks.h -- bad blocks API based on the libpmem2 library */ #ifndef PMDK_BADBLOCKS_H #define PMDK_BADBLOCKS_H 1 #include <string.h> #include <stdint.h> #include <sys/types.h> #ifdef __cplusplus extern "C" { #endif #define B2SEC(n) ((n) >> 9) /* convert bytes to sectors */ #define SEC2B(n) ((n) << 9) /* convert sectors to bytes */ #define NO_HEALTHY_REPLICA ((int)(-1)) #define BB_NOT_SUPP \ "checking bad blocks is not supported on this OS, please switch off the CHECK_BAD_BLOCKS compat feature using 'pmempool-feature'" /* * 'struct badblock' is already defined in ndctl/libndctl.h, * so we cannot use this name. * * libndctl returns offset relative to the beginning of the region, * but in this structure we save offset relative to the beginning of: * - namespace (before badblocks_get()) * and * - file (before sync_recalc_badblocks()) * and * - pool (after sync_recalc_badblocks()) */ struct bad_block { /* * offset in bytes relative to the beginning of * - namespace (before badblocks_get()) * and * - file (before sync_recalc_badblocks()) * and * - pool (after sync_recalc_badblocks()) */ size_t offset; /* length in bytes */ size_t length; /* number of healthy replica to fix this bad block */ int nhealthy; }; struct badblocks { unsigned bb_cnt; /* number of bad blocks */ struct bad_block *bbv; /* array of bad blocks */ }; struct badblocks *badblocks_new(void); void badblocks_delete(struct badblocks *bbs); long badblocks_count(const char *path); int badblocks_get(const char *file, struct badblocks *bbs); int badblocks_clear(const char *path, struct badblocks *bbs); int badblocks_clear_all(const char *file); int badblocks_check_file(const char *path); #ifdef __cplusplus } #endif #endif /* PMDK_BADBLOCKS_H */
1,878
23.089744
130
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/util_pmem.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2018, Intel Corporation */ /* * util_pmem.h -- internal definitions for pmem utils */ #ifndef PMDK_UTIL_PMEM_H #define PMDK_UTIL_PMEM_H 1 #include "libpmem.h" #include "out.h" #ifdef __cplusplus extern "C" { #endif /* * util_persist -- flush to persistence */ static inline void util_persist(int is_pmem, const void *addr, size_t len) { LOG(3, "is_pmem %d, addr %p, len %zu", is_pmem, addr, len); if (is_pmem) pmem_persist(addr, len); else if (pmem_msync(addr, len)) FATAL("!pmem_msync"); } /* * util_persist_auto -- flush to persistence */ static inline void util_persist_auto(int is_pmem, const void *addr, size_t len) { LOG(3, "is_pmem %d, addr %p, len %zu", is_pmem, addr, len); util_persist(is_pmem || pmem_is_pmem(addr, len), addr, len); } #ifdef __cplusplus } #endif #endif /* util_pmem.h */
883
17.416667
61
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/pmemcommon.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2020, Intel Corporation */ /* * pmemcommon.h -- definitions for "common" module */ #ifndef PMEMCOMMON_H #define PMEMCOMMON_H 1 #include "mmap.h" #include "pmemcore.h" #ifdef __cplusplus extern "C" { #endif static inline void common_init(const char *log_prefix, const char *log_level_var, const char *log_file_var, int major_version, int minor_version) { core_init(log_prefix, log_level_var, log_file_var, major_version, minor_version); util_mmap_init(); } static inline void common_fini(void) { util_mmap_fini(); core_fini(); } #ifdef __cplusplus } #endif #endif
642
15.075
66
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/shutdown_state.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ /* * shutdown_state.h -- unsafe shudown detection */ #ifndef PMDK_SHUTDOWN_STATE_H #define PMDK_SHUTDOWN_STATE_H 1 #include <stdint.h> #ifdef __cplusplus extern "C" { #endif struct pool_replica; struct shutdown_state { uint64_t usc; uint64_t uuid; /* UID checksum */ uint8_t dirty; uint8_t reserved[39]; uint64_t checksum; }; int shutdown_state_init(struct shutdown_state *sds, struct pool_replica *rep); int shutdown_state_add_part(struct shutdown_state *sds, int fd, struct pool_replica *rep); void shutdown_state_set_dirty(struct shutdown_state *sds, struct pool_replica *rep); void shutdown_state_clear_dirty(struct shutdown_state *sds, struct pool_replica *rep); int shutdown_state_check(struct shutdown_state *curr_sds, struct shutdown_state *pool_sds, struct pool_replica *rep); #ifdef __cplusplus } #endif #endif /* shutdown_state.h */
950
21.642857
78
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/uuid.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2018, Intel Corporation */ /* * uuid.h -- internal definitions for uuid module */ #ifndef PMDK_UUID_H #define PMDK_UUID_H 1 #include <stdint.h> #include <string.h> #ifdef __cplusplus extern "C" { #endif /* * Structure for binary version of uuid. From RFC4122, * https://tools.ietf.org/html/rfc4122 */ struct uuid { uint32_t time_low; uint16_t time_mid; uint16_t time_hi_and_ver; uint8_t clock_seq_hi; uint8_t clock_seq_low; uint8_t node[6]; }; #define POOL_HDR_UUID_LEN 16 /* uuid byte length */ #define POOL_HDR_UUID_STR_LEN 37 /* uuid string length */ #define POOL_HDR_UUID_GEN_FILE "/proc/sys/kernel/random/uuid" typedef unsigned char uuid_t[POOL_HDR_UUID_LEN]; /* 16 byte binary uuid value */ int util_uuid_generate(uuid_t uuid); int util_uuid_to_string(const uuid_t u, char *buf); int util_uuid_from_string(const char uuid[POOL_HDR_UUID_STR_LEN], struct uuid *ud); /* * uuidcmp -- compare two uuids */ static inline int uuidcmp(const uuid_t uuid1, const uuid_t uuid2) { return memcmp(uuid1, uuid2, POOL_HDR_UUID_LEN); } #ifdef __cplusplus } #endif #endif
1,145
19.464286
80
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/queue.h
/* * Source: glibc 2.24 (git://sourceware.org/glibc.git /misc/sys/queue.h) * * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)queue.h 8.5 (Berkeley) 8/20/94 */ #ifndef _PMDK_QUEUE_H_ #define _PMDK_QUEUE_H_ /* * This file defines five types of data structures: singly-linked lists, * lists, simple queues, tail queues, and circular queues. * * A singly-linked list is headed by a single forward pointer. The * elements are singly linked for minimum space and pointer manipulation * overhead at the expense of O(n) removal for arbitrary elements. New * elements can be added to the list after an existing element or at the * head of the list. Elements being removed from the head of the list * should use the explicit macro for this purpose for optimum * efficiency. A singly-linked list may only be traversed in the forward * direction. Singly-linked lists are ideal for applications with large * datasets and few or no removals or for implementing a LIFO queue. * * A list is headed by a single forward pointer (or an array of forward * pointers for a hash table header). The elements are doubly linked * so that an arbitrary element can be removed without a need to * traverse the list. New elements can be added to the list before * or after an existing element or at the head of the list. A list * may only be traversed in the forward direction. * * A simple queue is headed by a pair of pointers, one the head of the * list and the other to the tail of the list. The elements are singly * linked to save space, so elements can only be removed from the * head of the list. New elements can be added to the list after * an existing element, at the head of the list, or at the end of the * list. A simple queue may only be traversed in the forward direction. * * A tail queue is headed by a pair of pointers, one to the head of the * list and the other to the tail of the list. The elements are doubly * linked so that an arbitrary element can be removed without a need to * traverse the list. New elements can be added to the list before or * after an existing element, at the head of the list, or at the end of * the list. A tail queue may be traversed in either direction. * * A circle queue is headed by a pair of pointers, one to the head of the * list and the other to the tail of the list. The elements are doubly * linked so that an arbitrary element can be removed without a need to * traverse the list. New elements can be added to the list before or after * an existing element, at the head of the list, or at the end of the list. * A circle queue may be traversed in either direction, but has a more * complex end of list detection. * * For details on the use of these macros, see the queue(3) manual page. */ /* * XXX This is a workaround for a bug in the llvm's static analyzer. For more * info see https://github.com/pmem/issues/issues/309. */ #ifdef __clang_analyzer__ static void custom_assert(void) { abort(); } #define ANALYZER_ASSERT(x) (__builtin_expect(!(x), 0) ? (void)0 : custom_assert()) #else #define ANALYZER_ASSERT(x) do {} while (0) #endif /* * List definitions. */ #define PMDK_LIST_HEAD(name, type) \ struct name { \ struct type *lh_first; /* first element */ \ } #define PMDK_LIST_HEAD_INITIALIZER(head) \ { NULL } #ifdef __cplusplus #define PMDK__CAST_AND_ASSIGN(x, y) x = (__typeof__(x))y; #else #define PMDK__CAST_AND_ASSIGN(x, y) x = (void *)(y); #endif #define PMDK_LIST_ENTRY(type) \ struct { \ struct type *le_next; /* next element */ \ struct type **le_prev; /* address of previous next element */ \ } /* * List functions. */ #define PMDK_LIST_INIT(head) do { \ (head)->lh_first = NULL; \ } while (/*CONSTCOND*/0) #define PMDK_LIST_INSERT_AFTER(listelm, elm, field) do { \ if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \ (listelm)->field.le_next->field.le_prev = \ &(elm)->field.le_next; \ (listelm)->field.le_next = (elm); \ (elm)->field.le_prev = &(listelm)->field.le_next; \ } while (/*CONSTCOND*/0) #define PMDK_LIST_INSERT_BEFORE(listelm, elm, field) do { \ (elm)->field.le_prev = (listelm)->field.le_prev; \ (elm)->field.le_next = (listelm); \ *(listelm)->field.le_prev = (elm); \ (listelm)->field.le_prev = &(elm)->field.le_next; \ } while (/*CONSTCOND*/0) #define PMDK_LIST_INSERT_HEAD(head, elm, field) do { \ if (((elm)->field.le_next = (head)->lh_first) != NULL) \ (head)->lh_first->field.le_prev = &(elm)->field.le_next;\ (head)->lh_first = (elm); \ (elm)->field.le_prev = &(head)->lh_first; \ } while (/*CONSTCOND*/0) #define PMDK_LIST_REMOVE(elm, field) do { \ ANALYZER_ASSERT((elm) != NULL); \ if ((elm)->field.le_next != NULL) \ (elm)->field.le_next->field.le_prev = \ (elm)->field.le_prev; \ *(elm)->field.le_prev = (elm)->field.le_next; \ } while (/*CONSTCOND*/0) #define PMDK_LIST_FOREACH(var, head, field) \ for ((var) = ((head)->lh_first); \ (var); \ (var) = ((var)->field.le_next)) /* * List access methods. */ #define PMDK_LIST_EMPTY(head) ((head)->lh_first == NULL) #define PMDK_LIST_FIRST(head) ((head)->lh_first) #define PMDK_LIST_NEXT(elm, field) ((elm)->field.le_next) /* * Singly-linked List definitions. */ #define PMDK_SLIST_HEAD(name, type) \ struct name { \ struct type *slh_first; /* first element */ \ } #define PMDK_SLIST_HEAD_INITIALIZER(head) \ { NULL } #define PMDK_SLIST_ENTRY(type) \ struct { \ struct type *sle_next; /* next element */ \ } /* * Singly-linked List functions. */ #define PMDK_SLIST_INIT(head) do { \ (head)->slh_first = NULL; \ } while (/*CONSTCOND*/0) #define PMDK_SLIST_INSERT_AFTER(slistelm, elm, field) do { \ (elm)->field.sle_next = (slistelm)->field.sle_next; \ (slistelm)->field.sle_next = (elm); \ } while (/*CONSTCOND*/0) #define PMDK_SLIST_INSERT_HEAD(head, elm, field) do { \ (elm)->field.sle_next = (head)->slh_first; \ (head)->slh_first = (elm); \ } while (/*CONSTCOND*/0) #define PMDK_SLIST_REMOVE_HEAD(head, field) do { \ (head)->slh_first = (head)->slh_first->field.sle_next; \ } while (/*CONSTCOND*/0) #define PMDK_SLIST_REMOVE(head, elm, type, field) do { \ if ((head)->slh_first == (elm)) { \ PMDK_SLIST_REMOVE_HEAD((head), field); \ } \ else { \ struct type *curelm = (head)->slh_first; \ while(curelm->field.sle_next != (elm)) \ curelm = curelm->field.sle_next; \ curelm->field.sle_next = \ curelm->field.sle_next->field.sle_next; \ } \ } while (/*CONSTCOND*/0) #define PMDK_SLIST_FOREACH(var, head, field) \ for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next) /* * Singly-linked List access methods. */ #define PMDK_SLIST_EMPTY(head) ((head)->slh_first == NULL) #define PMDK_SLIST_FIRST(head) ((head)->slh_first) #define PMDK_SLIST_NEXT(elm, field) ((elm)->field.sle_next) /* * Singly-linked Tail queue declarations. */ #define PMDK_STAILQ_HEAD(name, type) \ struct name { \ struct type *stqh_first; /* first element */ \ struct type **stqh_last; /* addr of last next element */ \ } #define PMDK_STAILQ_HEAD_INITIALIZER(head) \ { NULL, &(head).stqh_first } #define PMDK_STAILQ_ENTRY(type) \ struct { \ struct type *stqe_next; /* next element */ \ } /* * Singly-linked Tail queue functions. */ #define PMDK_STAILQ_INIT(head) do { \ (head)->stqh_first = NULL; \ (head)->stqh_last = &(head)->stqh_first; \ } while (/*CONSTCOND*/0) #define PMDK_STAILQ_INSERT_HEAD(head, elm, field) do { \ if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \ (head)->stqh_last = &(elm)->field.stqe_next; \ (head)->stqh_first = (elm); \ } while (/*CONSTCOND*/0) #define PMDK_STAILQ_INSERT_TAIL(head, elm, field) do { \ (elm)->field.stqe_next = NULL; \ *(head)->stqh_last = (elm); \ (head)->stqh_last = &(elm)->field.stqe_next; \ } while (/*CONSTCOND*/0) #define PMDK_STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\ (head)->stqh_last = &(elm)->field.stqe_next; \ (listelm)->field.stqe_next = (elm); \ } while (/*CONSTCOND*/0) #define PMDK_STAILQ_REMOVE_HEAD(head, field) do { \ if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \ (head)->stqh_last = &(head)->stqh_first; \ } while (/*CONSTCOND*/0) #define PMDK_STAILQ_REMOVE(head, elm, type, field) do { \ if ((head)->stqh_first == (elm)) { \ PMDK_STAILQ_REMOVE_HEAD((head), field); \ } else { \ struct type *curelm = (head)->stqh_first; \ while (curelm->field.stqe_next != (elm)) \ curelm = curelm->field.stqe_next; \ if ((curelm->field.stqe_next = \ curelm->field.stqe_next->field.stqe_next) == NULL) \ (head)->stqh_last = &(curelm)->field.stqe_next; \ } \ } while (/*CONSTCOND*/0) #define PMDK_STAILQ_FOREACH(var, head, field) \ for ((var) = ((head)->stqh_first); \ (var); \ (var) = ((var)->field.stqe_next)) #define PMDK_STAILQ_CONCAT(head1, head2) do { \ if (!PMDK_STAILQ_EMPTY((head2))) { \ *(head1)->stqh_last = (head2)->stqh_first; \ (head1)->stqh_last = (head2)->stqh_last; \ PMDK_STAILQ_INIT((head2)); \ } \ } while (/*CONSTCOND*/0) /* * Singly-linked Tail queue access methods. */ #define PMDK_STAILQ_EMPTY(head) ((head)->stqh_first == NULL) #define PMDK_STAILQ_FIRST(head) ((head)->stqh_first) #define PMDK_STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) /* * Simple queue definitions. */ #define PMDK_SIMPLEQ_HEAD(name, type) \ struct name { \ struct type *sqh_first; /* first element */ \ struct type **sqh_last; /* addr of last next element */ \ } #define PMDK_SIMPLEQ_HEAD_INITIALIZER(head) \ { NULL, &(head).sqh_first } #define PMDK_SIMPLEQ_ENTRY(type) \ struct { \ struct type *sqe_next; /* next element */ \ } /* * Simple queue functions. */ #define PMDK_SIMPLEQ_INIT(head) do { \ (head)->sqh_first = NULL; \ (head)->sqh_last = &(head)->sqh_first; \ } while (/*CONSTCOND*/0) #define PMDK_SIMPLEQ_INSERT_HEAD(head, elm, field) do { \ if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ (head)->sqh_last = &(elm)->field.sqe_next; \ (head)->sqh_first = (elm); \ } while (/*CONSTCOND*/0) #define PMDK_SIMPLEQ_INSERT_TAIL(head, elm, field) do { \ (elm)->field.sqe_next = NULL; \ *(head)->sqh_last = (elm); \ (head)->sqh_last = &(elm)->field.sqe_next; \ } while (/*CONSTCOND*/0) #define PMDK_SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\ (head)->sqh_last = &(elm)->field.sqe_next; \ (listelm)->field.sqe_next = (elm); \ } while (/*CONSTCOND*/0) #define PMDK_SIMPLEQ_REMOVE_HEAD(head, field) do { \ if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \ (head)->sqh_last = &(head)->sqh_first; \ } while (/*CONSTCOND*/0) #define PMDK_SIMPLEQ_REMOVE(head, elm, type, field) do { \ if ((head)->sqh_first == (elm)) { \ PMDK_SIMPLEQ_REMOVE_HEAD((head), field); \ } else { \ struct type *curelm = (head)->sqh_first; \ while (curelm->field.sqe_next != (elm)) \ curelm = curelm->field.sqe_next; \ if ((curelm->field.sqe_next = \ curelm->field.sqe_next->field.sqe_next) == NULL) \ (head)->sqh_last = &(curelm)->field.sqe_next; \ } \ } while (/*CONSTCOND*/0) #define PMDK_SIMPLEQ_FOREACH(var, head, field) \ for ((var) = ((head)->sqh_first); \ (var); \ (var) = ((var)->field.sqe_next)) /* * Simple queue access methods. */ #define PMDK_SIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL) #define PMDK_SIMPLEQ_FIRST(head) ((head)->sqh_first) #define PMDK_SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) /* * Tail queue definitions. */ #define PMDK__TAILQ_HEAD(name, type, qual) \ struct name { \ qual type *tqh_first; /* first element */ \ qual type *qual *tqh_last; /* addr of last next element */ \ } #define PMDK_TAILQ_HEAD(name, type) PMDK__TAILQ_HEAD(name, struct type,) #define PMDK_TAILQ_HEAD_INITIALIZER(head) \ { NULL, &(head).tqh_first } #define PMDK__TAILQ_ENTRY(type, qual) \ struct { \ qual type *tqe_next; /* next element */ \ qual type *qual *tqe_prev; /* address of previous next element */\ } #define PMDK_TAILQ_ENTRY(type) PMDK__TAILQ_ENTRY(struct type,) /* * Tail queue functions. */ #define PMDK_TAILQ_INIT(head) do { \ (head)->tqh_first = NULL; \ (head)->tqh_last = &(head)->tqh_first; \ } while (/*CONSTCOND*/0) #define PMDK_TAILQ_INSERT_HEAD(head, elm, field) do { \ if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \ (head)->tqh_first->field.tqe_prev = \ &(elm)->field.tqe_next; \ else \ (head)->tqh_last = &(elm)->field.tqe_next; \ (head)->tqh_first = (elm); \ (elm)->field.tqe_prev = &(head)->tqh_first; \ } while (/*CONSTCOND*/0) #define PMDK_TAILQ_INSERT_TAIL(head, elm, field) do { \ (elm)->field.tqe_next = NULL; \ (elm)->field.tqe_prev = (head)->tqh_last; \ *(head)->tqh_last = (elm); \ (head)->tqh_last = &(elm)->field.tqe_next; \ } while (/*CONSTCOND*/0) #define PMDK_TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\ (elm)->field.tqe_next->field.tqe_prev = \ &(elm)->field.tqe_next; \ else \ (head)->tqh_last = &(elm)->field.tqe_next; \ (listelm)->field.tqe_next = (elm); \ (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \ } while (/*CONSTCOND*/0) #define PMDK_TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ (elm)->field.tqe_next = (listelm); \ *(listelm)->field.tqe_prev = (elm); \ (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ } while (/*CONSTCOND*/0) #define PMDK_TAILQ_REMOVE(head, elm, field) do { \ ANALYZER_ASSERT((elm) != NULL); \ if (((elm)->field.tqe_next) != NULL) \ (elm)->field.tqe_next->field.tqe_prev = \ (elm)->field.tqe_prev; \ else \ (head)->tqh_last = (elm)->field.tqe_prev; \ *(elm)->field.tqe_prev = (elm)->field.tqe_next; \ } while (/*CONSTCOND*/0) #define PMDK_TAILQ_FOREACH(var, head, field) \ for ((var) = ((head)->tqh_first); \ (var); \ (var) = ((var)->field.tqe_next)) #define PMDK_TAILQ_FOREACH_REVERSE(var, head, headname, field) \ for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \ (var); \ (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last))) #define PMDK_TAILQ_CONCAT(head1, head2, field) do { \ if (!PMDK_TAILQ_EMPTY(head2)) { \ *(head1)->tqh_last = (head2)->tqh_first; \ (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ (head1)->tqh_last = (head2)->tqh_last; \ PMDK_TAILQ_INIT((head2)); \ } \ } while (/*CONSTCOND*/0) /* * Tail queue access methods. */ #define PMDK_TAILQ_EMPTY(head) ((head)->tqh_first == NULL) #define PMDK_TAILQ_FIRST(head) ((head)->tqh_first) #define PMDK_TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) #define PMDK_TAILQ_LAST(head, headname) \ (*(((struct headname *)((head)->tqh_last))->tqh_last)) #define PMDK_TAILQ_PREV(elm, headname, field) \ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) /* * Circular queue definitions. */ #define PMDK_CIRCLEQ_HEAD(name, type) \ struct name { \ struct type *cqh_first; /* first element */ \ struct type *cqh_last; /* last element */ \ } #define PMDK_CIRCLEQ_HEAD_INITIALIZER(head) \ { (void *)&(head), (void *)&(head) } #define PMDK_CIRCLEQ_ENTRY(type) \ struct { \ struct type *cqe_next; /* next element */ \ struct type *cqe_prev; /* previous element */ \ } /* * Circular queue functions. */ #define PMDK_CIRCLEQ_INIT(head) do { \ PMDK__CAST_AND_ASSIGN((head)->cqh_first, (head)); \ PMDK__CAST_AND_ASSIGN((head)->cqh_last, (head)); \ } while (/*CONSTCOND*/0) #define PMDK_CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ (elm)->field.cqe_next = (listelm)->field.cqe_next; \ (elm)->field.cqe_prev = (listelm); \ if ((listelm)->field.cqe_next == (void *)(head)) \ (head)->cqh_last = (elm); \ else \ (listelm)->field.cqe_next->field.cqe_prev = (elm); \ (listelm)->field.cqe_next = (elm); \ } while (/*CONSTCOND*/0) #define PMDK_CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \ (elm)->field.cqe_next = (listelm); \ (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \ if ((listelm)->field.cqe_prev == (void *)(head)) \ (head)->cqh_first = (elm); \ else \ (listelm)->field.cqe_prev->field.cqe_next = (elm); \ (listelm)->field.cqe_prev = (elm); \ } while (/*CONSTCOND*/0) #define PMDK_CIRCLEQ_INSERT_HEAD(head, elm, field) do { \ (elm)->field.cqe_next = (head)->cqh_first; \ (elm)->field.cqe_prev = (void *)(head); \ if ((head)->cqh_last == (void *)(head)) \ (head)->cqh_last = (elm); \ else \ (head)->cqh_first->field.cqe_prev = (elm); \ (head)->cqh_first = (elm); \ } while (/*CONSTCOND*/0) #define PMDK_CIRCLEQ_INSERT_TAIL(head, elm, field) do { \ PMDK__CAST_AND_ASSIGN((elm)->field.cqe_next, (head)); \ (elm)->field.cqe_prev = (head)->cqh_last; \ if ((head)->cqh_first == (void *)(head)) \ (head)->cqh_first = (elm); \ else \ (head)->cqh_last->field.cqe_next = (elm); \ (head)->cqh_last = (elm); \ } while (/*CONSTCOND*/0) #define PMDK_CIRCLEQ_REMOVE(head, elm, field) do { \ if ((elm)->field.cqe_next == (void *)(head)) \ (head)->cqh_last = (elm)->field.cqe_prev; \ else \ (elm)->field.cqe_next->field.cqe_prev = \ (elm)->field.cqe_prev; \ if ((elm)->field.cqe_prev == (void *)(head)) \ (head)->cqh_first = (elm)->field.cqe_next; \ else \ (elm)->field.cqe_prev->field.cqe_next = \ (elm)->field.cqe_next; \ } while (/*CONSTCOND*/0) #define PMDK_CIRCLEQ_FOREACH(var, head, field) \ for ((var) = ((head)->cqh_first); \ (var) != (const void *)(head); \ (var) = ((var)->field.cqe_next)) #define PMDK_CIRCLEQ_FOREACH_REVERSE(var, head, field) \ for ((var) = ((head)->cqh_last); \ (var) != (const void *)(head); \ (var) = ((var)->field.cqe_prev)) /* * Circular queue access methods. */ #define PMDK_CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head)) #define PMDK_CIRCLEQ_FIRST(head) ((head)->cqh_first) #define PMDK_CIRCLEQ_LAST(head) ((head)->cqh_last) #define PMDK_CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next) #define PMDK_CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev) #define PMDK_CIRCLEQ_LOOP_NEXT(head, elm, field) \ (((elm)->field.cqe_next == (void *)(head)) \ ? ((head)->cqh_first) \ : ((elm)->field.cqe_next)) #define PMDK_CIRCLEQ_LOOP_PREV(head, elm, field) \ (((elm)->field.cqe_prev == (void *)(head)) \ ? ((head)->cqh_last) \ : ((elm)->field.cqe_prev)) /* * Sorted queue functions. */ #define PMDK_SORTEDQ_HEAD(name, type) PMDK_CIRCLEQ_HEAD(name, type) #define PMDK_SORTEDQ_HEAD_INITIALIZER(head) PMDK_CIRCLEQ_HEAD_INITIALIZER(head) #define PMDK_SORTEDQ_ENTRY(type) PMDK_CIRCLEQ_ENTRY(type) #define PMDK_SORTEDQ_INIT(head) PMDK_CIRCLEQ_INIT(head) #define PMDK_SORTEDQ_INSERT(head, elm, field, type, comparer) { \ type *_elm_it; \ for (_elm_it = (head)->cqh_first; \ ((_elm_it != (void *)(head)) && \ (comparer(_elm_it, (elm)) < 0)); \ _elm_it = _elm_it->field.cqe_next) \ /*NOTHING*/; \ if (_elm_it == (void *)(head)) \ PMDK_CIRCLEQ_INSERT_TAIL(head, elm, field); \ else \ PMDK_CIRCLEQ_INSERT_BEFORE(head, _elm_it, elm, field); \ } #define PMDK_SORTEDQ_REMOVE(head, elm, field) PMDK_CIRCLEQ_REMOVE(head, elm, field) #define PMDK_SORTEDQ_FOREACH(var, head, field) PMDK_CIRCLEQ_FOREACH(var, head, field) #define PMDK_SORTEDQ_FOREACH_REVERSE(var, head, field) \ PMDK_CIRCLEQ_FOREACH_REVERSE(var, head, field) /* * Sorted queue access methods. */ #define PMDK_SORTEDQ_EMPTY(head) PMDK_CIRCLEQ_EMPTY(head) #define PMDK_SORTEDQ_FIRST(head) PMDK_CIRCLEQ_FIRST(head) #define PMDK_SORTEDQ_LAST(head) PMDK_CIRCLEQ_LAST(head) #define PMDK_SORTEDQ_NEXT(elm, field) PMDK_CIRCLEQ_NEXT(elm, field) #define PMDK_SORTEDQ_PREV(elm, field) PMDK_CIRCLEQ_PREV(elm, field) #endif /* sys/queue.h */
22,165
33.907087
85
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/set.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * set.h -- internal definitions for set module */ #ifndef PMDK_SET_H #define PMDK_SET_H 1 #include <stddef.h> #include <stdint.h> #include <stdio.h> #include <sys/types.h> #include "out.h" #include "vec.h" #include "pool_hdr.h" #include "librpmem.h" #ifdef __cplusplus extern "C" { #endif /* * pool sets & replicas */ #define POOLSET_HDR_SIG "PMEMPOOLSET" #define POOLSET_HDR_SIG_LEN 11 /* does NOT include '\0' */ #define POOLSET_REPLICA_SIG "REPLICA" #define POOLSET_REPLICA_SIG_LEN 7 /* does NOT include '\0' */ #define POOLSET_OPTION_SIG "OPTION" #define POOLSET_OPTION_SIG_LEN 6 /* does NOT include '\0' */ /* pool set option flags */ enum pool_set_option_flag { OPTION_UNKNOWN = 0x0, OPTION_SINGLEHDR = 0x1, /* pool headers only in the first part */ OPTION_NOHDRS = 0x2, /* no pool headers, remote replicas only */ }; struct pool_set_option { const char *name; enum pool_set_option_flag flag; }; #define POOL_LOCAL 0 #define POOL_REMOTE 1 #define REPLICAS_DISABLED 0 #define REPLICAS_ENABLED 1 /* util_pool_open flags */ #define POOL_OPEN_COW 1 /* copy-on-write mode */ #define POOL_OPEN_IGNORE_SDS 2 /* ignore shutdown state */ #define POOL_OPEN_IGNORE_BAD_BLOCKS 4 /* ignore bad blocks */ #define POOL_OPEN_CHECK_BAD_BLOCKS 8 /* check bad blocks */ enum del_parts_mode { DO_NOT_DELETE_PARTS, /* do not delete part files */ DELETE_CREATED_PARTS, /* delete only newly created parts files */ DELETE_ALL_PARTS /* force delete all parts files */ }; struct pool_set_part { /* populated by a pool set file parser */ const char *path; size_t filesize; /* aligned to page size */ int fd; int flags; /* stores flags used when opening the file */ /* valid only if fd >= 0 */ int is_dev_dax; /* indicates if the part is on device dax */ size_t alignment; /* internal alignment (Device DAX only) */ int created; /* indicates newly created (zeroed) file */ /* util_poolset_open/create */ void *remote_hdr; /* allocated header for remote replica */ void *hdr; /* base address of header */ size_t hdrsize; /* size of the header mapping */ int hdr_map_sync; /* header mapped with MAP_SYNC */ void *addr; /* base address of the mapping */ size_t size; /* size of the mapping - page aligned */ int map_sync; /* part has been mapped with MAP_SYNC flag */ int rdonly; /* is set based on compat features, affects */ /* the whole poolset */ uuid_t uuid; int has_bad_blocks; /* part file contains bad blocks */ int sds_dirty_modified; /* sds dirty flag was set */ }; struct pool_set_directory { const char *path; size_t resvsize; /* size of the address space reservation */ }; struct remote_replica { void *rpp; /* RPMEMpool opaque handle */ char *node_addr; /* address of a remote node */ /* poolset descriptor is a pool set file name on a remote node */ char *pool_desc; /* descriptor of a poolset */ }; struct pool_replica { unsigned nparts; unsigned nallocated; unsigned nhdrs; /* should be 0, 1 or nparts */ size_t repsize; /* total size of all the parts (mappings) */ size_t resvsize; /* min size of the address space reservation */ int is_pmem; /* true if all the parts are in PMEM */ struct remote_replica *remote; /* not NULL if the replica */ /* is a remote one */ VEC(, struct pool_set_directory) directory; struct pool_set_part part[]; }; struct pool_set { char *path; /* path of the poolset file */ unsigned nreplicas; uuid_t uuid; int rdonly; int zeroed; /* true if all the parts are new files */ size_t poolsize; /* the smallest replica size */ int has_bad_blocks; /* pool set contains bad blocks */ int remote; /* true if contains a remote replica */ unsigned options; /* enabled pool set options */ int directory_based; size_t resvsize; unsigned next_id; unsigned next_directory_id; int ignore_sds; /* don't use shutdown state */ struct pool_replica *replica[]; }; struct part_file { int is_remote; /* * Pointer to the part file structure - * - not-NULL only for a local part file */ struct pool_set_part *part; /* * Pointer to the replica structure - * - not-NULL only for a remote replica */ struct remote_replica *remote; }; struct pool_attr { char signature[POOL_HDR_SIG_LEN]; /* pool signature */ uint32_t major; /* format major version number */ features_t features; /* features flags */ unsigned char poolset_uuid[POOL_HDR_UUID_LEN]; /* pool uuid */ unsigned char first_part_uuid[POOL_HDR_UUID_LEN]; /* first part uuid */ unsigned char prev_repl_uuid[POOL_HDR_UUID_LEN]; /* prev replica uuid */ unsigned char next_repl_uuid[POOL_HDR_UUID_LEN]; /* next replica uuid */ unsigned char arch_flags[POOL_HDR_ARCH_LEN]; /* arch flags */ }; /* get index of the (r)th replica */ static inline unsigned REPidx(const struct pool_set *set, unsigned r) { ASSERTne(set->nreplicas, 0); return r % set->nreplicas; } /* get index of the (r + 1)th replica */ static inline unsigned REPNidx(const struct pool_set *set, unsigned r) { ASSERTne(set->nreplicas, 0); return (r + 1) % set->nreplicas; } /* get index of the (r - 1)th replica */ static inline unsigned REPPidx(const struct pool_set *set, unsigned r) { ASSERTne(set->nreplicas, 0); return (set->nreplicas + r - 1) % set->nreplicas; } /* get index of the (r)th part */ static inline unsigned PARTidx(const struct pool_replica *rep, unsigned p) { ASSERTne(rep->nparts, 0); return p % rep->nparts; } /* get index of the (r + 1)th part */ static inline unsigned PARTNidx(const struct pool_replica *rep, unsigned p) { ASSERTne(rep->nparts, 0); return (p + 1) % rep->nparts; } /* get index of the (r - 1)th part */ static inline unsigned PARTPidx(const struct pool_replica *rep, unsigned p) { ASSERTne(rep->nparts, 0); return (rep->nparts + p - 1) % rep->nparts; } /* get index of the (r)th part */ static inline unsigned HDRidx(const struct pool_replica *rep, unsigned p) { ASSERTne(rep->nhdrs, 0); return p % rep->nhdrs; } /* get index of the (r + 1)th part */ static inline unsigned HDRNidx(const struct pool_replica *rep, unsigned p) { ASSERTne(rep->nhdrs, 0); return (p + 1) % rep->nhdrs; } /* get index of the (r - 1)th part */ static inline unsigned HDRPidx(const struct pool_replica *rep, unsigned p) { ASSERTne(rep->nhdrs, 0); return (rep->nhdrs + p - 1) % rep->nhdrs; } /* get (r)th replica */ static inline struct pool_replica * REP(const struct pool_set *set, unsigned r) { return set->replica[REPidx(set, r)]; } /* get (r + 1)th replica */ static inline struct pool_replica * REPN(const struct pool_set *set, unsigned r) { return set->replica[REPNidx(set, r)]; } /* get (r - 1)th replica */ static inline struct pool_replica * REPP(const struct pool_set *set, unsigned r) { return set->replica[REPPidx(set, r)]; } /* get (p)th part */ static inline struct pool_set_part * PART(struct pool_replica *rep, unsigned p) { return &rep->part[PARTidx(rep, p)]; } /* get (p + 1)th part */ static inline struct pool_set_part * PARTN(struct pool_replica *rep, unsigned p) { return &rep->part[PARTNidx(rep, p)]; } /* get (p - 1)th part */ static inline struct pool_set_part * PARTP(struct pool_replica *rep, unsigned p) { return &rep->part[PARTPidx(rep, p)]; } /* get (p)th header */ static inline struct pool_hdr * HDR(struct pool_replica *rep, unsigned p) { return (struct pool_hdr *)(rep->part[HDRidx(rep, p)].hdr); } /* get (p + 1)th header */ static inline struct pool_hdr * HDRN(struct pool_replica *rep, unsigned p) { return (struct pool_hdr *)(rep->part[HDRNidx(rep, p)].hdr); } /* get (p - 1)th header */ static inline struct pool_hdr * HDRP(struct pool_replica *rep, unsigned p) { return (struct pool_hdr *)(rep->part[HDRPidx(rep, p)].hdr); } extern int Prefault_at_open; extern int Prefault_at_create; extern int SDS_at_create; extern int Fallocate_at_create; extern int COW_at_open; int util_poolset_parse(struct pool_set **setp, const char *path, int fd); int util_poolset_read(struct pool_set **setp, const char *path); int util_poolset_create_set(struct pool_set **setp, const char *path, size_t poolsize, size_t minsize, int ignore_sds); int util_poolset_open(struct pool_set *set); void util_poolset_close(struct pool_set *set, enum del_parts_mode del); void util_poolset_free(struct pool_set *set); int util_poolset_chmod(struct pool_set *set, mode_t mode); void util_poolset_fdclose(struct pool_set *set); void util_poolset_fdclose_always(struct pool_set *set); int util_is_poolset_file(const char *path); int util_poolset_foreach_part_struct(struct pool_set *set, int (*cb)(struct part_file *pf, void *arg), void *arg); int util_poolset_foreach_part(const char *path, int (*cb)(struct part_file *pf, void *arg), void *arg); size_t util_poolset_size(const char *path); int util_replica_deep_common(const void *addr, size_t len, struct pool_set *set, unsigned replica_id, int flush); int util_replica_deep_persist(const void *addr, size_t len, struct pool_set *set, unsigned replica_id); int util_replica_deep_drain(const void *addr, size_t len, struct pool_set *set, unsigned replica_id); int util_pool_create(struct pool_set **setp, const char *path, size_t poolsize, size_t minsize, size_t minpartsize, const struct pool_attr *attr, unsigned *nlanes, int can_have_rep); int util_pool_create_uuids(struct pool_set **setp, const char *path, size_t poolsize, size_t minsize, size_t minpartsize, const struct pool_attr *attr, unsigned *nlanes, int can_have_rep, int remote); int util_part_open(struct pool_set_part *part, size_t minsize, int create_part); void util_part_fdclose(struct pool_set_part *part); int util_replica_open(struct pool_set *set, unsigned repidx, int flags); int util_replica_set_attr(struct pool_replica *rep, const struct rpmem_pool_attr *rattr); void util_pool_hdr2attr(struct pool_attr *attr, struct pool_hdr *hdr); void util_pool_attr2hdr(struct pool_hdr *hdr, const struct pool_attr *attr); int util_replica_close(struct pool_set *set, unsigned repidx); int util_map_part(struct pool_set_part *part, void *addr, size_t size, size_t offset, int flags, int rdonly); int util_unmap_part(struct pool_set_part *part); int util_unmap_parts(struct pool_replica *rep, unsigned start_index, unsigned end_index); int util_header_create(struct pool_set *set, unsigned repidx, unsigned partidx, const struct pool_attr *attr, int overwrite); int util_map_hdr(struct pool_set_part *part, int flags, int rdonly); void util_unmap_hdr(struct pool_set_part *part); int util_pool_has_device_dax(struct pool_set *set); int util_pool_open_nocheck(struct pool_set *set, unsigned flags); int util_pool_open(struct pool_set **setp, const char *path, size_t minpartsize, const struct pool_attr *attr, unsigned *nlanes, void *addr, unsigned flags); int util_pool_open_remote(struct pool_set **setp, const char *path, int cow, size_t minpartsize, struct rpmem_pool_attr *rattr); void *util_pool_extend(struct pool_set *set, size_t *size, size_t minpartsize); void util_remote_init(void); void util_remote_fini(void); int util_update_remote_header(struct pool_set *set, unsigned repn); void util_remote_init_lock(void); void util_remote_destroy_lock(void); int util_pool_close_remote(RPMEMpool *rpp); void util_remote_unload(void); void util_replica_fdclose(struct pool_replica *rep); int util_poolset_remote_open(struct pool_replica *rep, unsigned repidx, size_t minsize, int create, void *pool_addr, size_t pool_size, unsigned *nlanes); int util_remote_load(void); int util_replica_open_remote(struct pool_set *set, unsigned repidx, int flags); int util_poolset_remote_replica_open(struct pool_set *set, unsigned repidx, size_t minsize, int create, unsigned *nlanes); int util_replica_close_local(struct pool_replica *rep, unsigned repn, enum del_parts_mode del); int util_replica_close_remote(struct pool_replica *rep, unsigned repn, enum del_parts_mode del); extern int (*Rpmem_persist)(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane, unsigned flags); extern int (*Rpmem_deep_persist)(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane); extern int (*Rpmem_read)(RPMEMpool *rpp, void *buff, size_t offset, size_t length, unsigned lane); extern int (*Rpmem_close)(RPMEMpool *rpp); extern int (*Rpmem_remove)(const char *target, const char *pool_set_name, int flags); extern int (*Rpmem_set_attr)(RPMEMpool *rpp, const struct rpmem_pool_attr *rattr); #ifdef __cplusplus } #endif #endif
14,145
31.077098
80
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/dlsym.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2017, Intel Corporation */ /* * dlsym.h -- dynamic linking utilities with library-specific implementation */ #ifndef PMDK_DLSYM_H #define PMDK_DLSYM_H 1 #include "out.h" #if defined(USE_LIBDL) && !defined(_WIN32) #include <dlfcn.h> /* * util_dlopen -- calls real dlopen() */ static inline void * util_dlopen(const char *filename) { LOG(3, "filename %s", filename); return dlopen(filename, RTLD_NOW); } /* * util_dlerror -- calls real dlerror() */ static inline char * util_dlerror(void) { return dlerror(); } /* * util_dlsym -- calls real dlsym() */ static inline void * util_dlsym(void *handle, const char *symbol) { LOG(3, "handle %p symbol %s", handle, symbol); return dlsym(handle, symbol); } /* * util_dlclose -- calls real dlclose() */ static inline int util_dlclose(void *handle) { LOG(3, "handle %p", handle); return dlclose(handle); } #else /* empty functions */ /* * util_dlopen -- empty function */ static inline void * util_dlopen(const char *filename) { errno = ENOSYS; return NULL; } /* * util_dlerror -- empty function */ static inline char * util_dlerror(void) { errno = ENOSYS; return NULL; } /* * util_dlsym -- empty function */ static inline void * util_dlsym(void *handle, const char *symbol) { errno = ENOSYS; return NULL; } /* * util_dlclose -- empty function */ static inline int util_dlclose(void *handle) { errno = ENOSYS; return 0; } #endif #endif
1,485
13.288462
76
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/sys_util.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2020, Intel Corporation */ /* * sys_util.h -- internal utility wrappers around system functions */ #ifndef PMDK_SYS_UTIL_H #define PMDK_SYS_UTIL_H 1 #include <errno.h> #include "os_thread.h" #include "out.h" #ifdef __cplusplus extern "C" { #endif /* * util_mutex_init -- os_mutex_init variant that never fails from * caller perspective. If os_mutex_init failed, this function aborts * the program. */ static inline void util_mutex_init(os_mutex_t *m) { int tmp = os_mutex_init(m); if (tmp) { errno = tmp; FATAL("!os_mutex_init"); } } /* * util_mutex_destroy -- os_mutex_destroy variant that never fails from * caller perspective. If os_mutex_destroy failed, this function aborts * the program. */ static inline void util_mutex_destroy(os_mutex_t *m) { int tmp = os_mutex_destroy(m); if (tmp) { errno = tmp; FATAL("!os_mutex_destroy"); } } /* * util_mutex_lock -- os_mutex_lock variant that never fails from * caller perspective. If os_mutex_lock failed, this function aborts * the program. */ static inline void util_mutex_lock(os_mutex_t *m) { int tmp = os_mutex_lock(m); if (tmp) { errno = tmp; FATAL("!os_mutex_lock"); } } /* * util_mutex_trylock -- os_mutex_trylock variant that never fails from * caller perspective (other than EBUSY). If util_mutex_trylock failed, this * function aborts the program. * Returns 0 if locked successfully, otherwise returns EBUSY. */ static inline int util_mutex_trylock(os_mutex_t *m) { int tmp = os_mutex_trylock(m); if (tmp && tmp != EBUSY) { errno = tmp; FATAL("!os_mutex_trylock"); } return tmp; } /* * util_mutex_unlock -- os_mutex_unlock variant that never fails from * caller perspective. If os_mutex_unlock failed, this function aborts * the program. */ static inline void util_mutex_unlock(os_mutex_t *m) { int tmp = os_mutex_unlock(m); if (tmp) { errno = tmp; FATAL("!os_mutex_unlock"); } } /* * util_rwlock_init -- os_rwlock_init variant that never fails from * caller perspective. If os_rwlock_init failed, this function aborts * the program. */ static inline void util_rwlock_init(os_rwlock_t *m) { int tmp = os_rwlock_init(m); if (tmp) { errno = tmp; FATAL("!os_rwlock_init"); } } /* * util_rwlock_rdlock -- os_rwlock_rdlock variant that never fails from * caller perspective. If os_rwlock_rdlock failed, this function aborts * the program. */ static inline void util_rwlock_rdlock(os_rwlock_t *m) { int tmp = os_rwlock_rdlock(m); if (tmp) { errno = tmp; FATAL("!os_rwlock_rdlock"); } } /* * util_rwlock_wrlock -- os_rwlock_wrlock variant that never fails from * caller perspective. If os_rwlock_wrlock failed, this function aborts * the program. */ static inline void util_rwlock_wrlock(os_rwlock_t *m) { int tmp = os_rwlock_wrlock(m); if (tmp) { errno = tmp; FATAL("!os_rwlock_wrlock"); } } /* * util_rwlock_unlock -- os_rwlock_unlock variant that never fails from * caller perspective. If os_rwlock_unlock failed, this function aborts * the program. */ static inline void util_rwlock_unlock(os_rwlock_t *m) { int tmp = os_rwlock_unlock(m); if (tmp) { errno = tmp; FATAL("!os_rwlock_unlock"); } } /* * util_rwlock_destroy -- os_rwlock_destroy variant that never fails from * caller perspective. If os_rwlock_destroy failed, this function aborts * the program. */ static inline void util_rwlock_destroy(os_rwlock_t *m) { int tmp = os_rwlock_destroy(m); if (tmp) { errno = tmp; FATAL("!os_rwlock_destroy"); } } /* * util_spin_init -- os_spin_init variant that logs on fail and sets errno. */ static inline int util_spin_init(os_spinlock_t *lock, int pshared) { int tmp = os_spin_init(lock, pshared); if (tmp) { errno = tmp; ERR("!os_spin_init"); } return tmp; } /* * util_spin_destroy -- os_spin_destroy variant that never fails from * caller perspective. If os_spin_destroy failed, this function aborts * the program. */ static inline void util_spin_destroy(os_spinlock_t *lock) { int tmp = os_spin_destroy(lock); if (tmp) { errno = tmp; FATAL("!os_spin_destroy"); } } /* * util_spin_lock -- os_spin_lock variant that never fails from caller * perspective. If os_spin_lock failed, this function aborts the program. */ static inline void util_spin_lock(os_spinlock_t *lock) { int tmp = os_spin_lock(lock); if (tmp) { errno = tmp; FATAL("!os_spin_lock"); } } /* * util_spin_unlock -- os_spin_unlock variant that never fails * from caller perspective. If os_spin_unlock failed, * this function aborts the program. */ static inline void util_spin_unlock(os_spinlock_t *lock) { int tmp = os_spin_unlock(lock); if (tmp) { errno = tmp; FATAL("!os_spin_unlock"); } } /* * util_semaphore_init -- os_semaphore_init variant that never fails * from caller perspective. If os_semaphore_init failed, * this function aborts the program. */ static inline void util_semaphore_init(os_semaphore_t *sem, unsigned value) { if (os_semaphore_init(sem, value)) FATAL("!os_semaphore_init"); } /* * util_semaphore_destroy -- deletes a semaphore instance */ static inline void util_semaphore_destroy(os_semaphore_t *sem) { if (os_semaphore_destroy(sem) != 0) FATAL("!os_semaphore_destroy"); } /* * util_semaphore_wait -- decreases the value of the semaphore */ static inline void util_semaphore_wait(os_semaphore_t *sem) { errno = 0; int ret; do { ret = os_semaphore_wait(sem); } while (errno == EINTR); /* signal interrupt */ if (ret != 0) FATAL("!os_semaphore_wait"); } /* * util_semaphore_trywait -- tries to decrease the value of the semaphore */ static inline int util_semaphore_trywait(os_semaphore_t *sem) { errno = 0; int ret; do { ret = os_semaphore_trywait(sem); } while (errno == EINTR); /* signal interrupt */ if (ret != 0 && errno != EAGAIN) FATAL("!os_semaphore_trywait"); return ret; } /* * util_semaphore_post -- increases the value of the semaphore */ static inline void util_semaphore_post(os_semaphore_t *sem) { if (os_semaphore_post(sem) != 0) FATAL("!os_semaphore_post"); } static inline void util_cond_init(os_cond_t *__restrict cond) { if (os_cond_init(cond)) FATAL("!os_cond_init"); } static inline void util_cond_destroy(os_cond_t *__restrict cond) { if (os_cond_destroy(cond)) FATAL("!os_cond_destroy"); } #ifdef __cplusplus } #endif #endif
6,387
19.21519
76
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/mmap.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * mmap.h -- internal definitions for mmap module */ #ifndef PMDK_MMAP_H #define PMDK_MMAP_H 1 #include <stddef.h> #include <stdint.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> #include <errno.h> #include "out.h" #include "queue.h" #include "os.h" #ifdef __cplusplus extern "C" { #endif extern int Mmap_no_random; extern void *Mmap_hint; extern char *Mmap_mapfile; void *util_map_sync(void *addr, size_t len, int proto, int flags, int fd, os_off_t offset, int *map_sync); void *util_map(int fd, os_off_t off, size_t len, int flags, int rdonly, size_t req_align, int *map_sync); int util_unmap(void *addr, size_t len); #ifdef __FreeBSD__ #define MAP_NORESERVE 0 #define OS_MAPFILE "/proc/curproc/map" #else #define OS_MAPFILE "/proc/self/maps" #endif #ifndef MAP_SYNC #define MAP_SYNC 0x80000 #endif #ifndef MAP_SHARED_VALIDATE #define MAP_SHARED_VALIDATE 0x03 #endif /* * macros for micromanaging range protections for the debug version */ #ifdef DEBUG #define RANGE(addr, len, is_dev_dax, type) do {\ if (!is_dev_dax) ASSERT(util_range_##type(addr, len) >= 0);\ } while (0) #else #define RANGE(addr, len, is_dev_dax, type) do {} while (0) #endif #define RANGE_RO(addr, len, is_dev_dax) RANGE(addr, len, is_dev_dax, ro) #define RANGE_RW(addr, len, is_dev_dax) RANGE(addr, len, is_dev_dax, rw) #define RANGE_NONE(addr, len, is_dev_dax) RANGE(addr, len, is_dev_dax, none) /* pmem mapping type */ enum pmem_map_type { PMEM_DEV_DAX, /* device dax */ PMEM_MAP_SYNC, /* mapping with MAP_SYNC flag on dax fs */ MAX_PMEM_TYPE }; /* * this structure tracks the file mappings outstanding per file handle */ struct map_tracker { PMDK_SORTEDQ_ENTRY(map_tracker) entry; uintptr_t base_addr; uintptr_t end_addr; unsigned region_id; enum pmem_map_type type; #ifdef _WIN32 /* Windows-specific data */ HANDLE FileHandle; HANDLE FileMappingHandle; DWORD Access; os_off_t Offset; size_t FileLen; #endif }; void util_mmap_init(void); void util_mmap_fini(void); int util_range_ro(void *addr, size_t len); int util_range_rw(void *addr, size_t len); int util_range_none(void *addr, size_t len); char *util_map_hint_unused(void *minaddr, size_t len, size_t align); char *util_map_hint(size_t len, size_t req_align); #define KILOBYTE ((uintptr_t)1 << 10) #define MEGABYTE ((uintptr_t)1 << 20) #define GIGABYTE ((uintptr_t)1 << 30) /* * util_map_hint_align -- choose the desired mapping alignment * * The smallest supported alignment is 2 megabytes because of the object * alignment requirements. Changing this value to 4 kilobytes constitues a * layout change. * * Use 1GB page alignment only if the mapping length is at least * twice as big as the page size. */ static inline size_t util_map_hint_align(size_t len, size_t req_align) { size_t align = 2 * MEGABYTE; if (req_align) align = req_align; else if (len >= 2 * GIGABYTE) align = GIGABYTE; return align; } int util_range_register(const void *addr, size_t len, const char *path, enum pmem_map_type type); int util_range_unregister(const void *addr, size_t len); struct map_tracker *util_range_find(uintptr_t addr, size_t len); int util_range_is_pmem(const void *addr, size_t len); #ifdef __cplusplus } #endif #endif
3,328
22.27972
76
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/vecq.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * vecq.h -- vector queue (FIFO) interface */ #ifndef PMDK_VECQ_H #define PMDK_VECQ_H 1 #include <stddef.h> #include "util.h" #include "out.h" #include "alloc.h" #ifdef __cplusplus extern "C" { #endif #define VECQ_INIT_SIZE (64) #define VECQ(name, type)\ struct name {\ type *buffer;\ size_t capacity;\ size_t front;\ size_t back;\ } #define VECQ_INIT(vec) do {\ (vec)->buffer = NULL;\ (vec)->capacity = 0;\ (vec)->front = 0;\ (vec)->back = 0;\ } while (0) #define VECQ_REINIT(vec) do {\ VALGRIND_ANNOTATE_NEW_MEMORY((vec), sizeof(*vec));\ VALGRIND_ANNOTATE_NEW_MEMORY((vec)->buffer,\ (sizeof(*(vec)->buffer) * ((vec)->capacity)));\ (vec)->front = 0;\ (vec)->back = 0;\ } while (0) #define VECQ_FRONT_POS(vec)\ ((vec)->front & ((vec)->capacity - 1)) #define VECQ_BACK_POS(vec)\ ((vec)->back & ((vec)->capacity - 1)) #define VECQ_FRONT(vec)\ (vec)->buffer[VECQ_FRONT_POS(vec)] #define VECQ_BACK(vec)\ (vec)->buffer[VECQ_BACK_POS(vec)] #define VECQ_DEQUEUE(vec)\ ((vec)->buffer[(((vec)->front++) & ((vec)->capacity - 1))]) #define VECQ_SIZE(vec)\ ((vec)->back - (vec)->front) static inline int realloc_set(void **buf, size_t s) { void *tbuf = Realloc(*buf, s); if (tbuf == NULL) { ERR("!Realloc"); return -1; } *buf = tbuf; return 0; } #define VECQ_NCAPACITY(vec)\ ((vec)->capacity == 0 ? VECQ_INIT_SIZE : (vec)->capacity * 2) #define VECQ_GROW(vec)\ (realloc_set((void **)&(vec)->buffer,\ VECQ_NCAPACITY(vec) * sizeof(*(vec)->buffer)) ? -1 :\ (memcpy((vec)->buffer + (vec)->capacity, (vec)->buffer,\ VECQ_FRONT_POS(vec) * sizeof(*(vec)->buffer)),\ (vec)->front = VECQ_FRONT_POS(vec),\ (vec)->back = (vec)->front + (vec)->capacity,\ (vec)->capacity = VECQ_NCAPACITY(vec),\ 0\ )) #define VECQ_INSERT(vec, element)\ (VECQ_BACK(vec) = element, (vec)->back += 1, 0) #define VECQ_ENQUEUE(vec, element)\ ((vec)->capacity == VECQ_SIZE(vec) ?\ (VECQ_GROW(vec) == 0 ? VECQ_INSERT(vec, element) : -1) :\ VECQ_INSERT(vec, element)) #define VECQ_CAPACITY(vec)\ ((vec)->capacity) #define VECQ_FOREACH(el, vec)\ for (size_t _vec_i = 0;\ _vec_i < VECQ_SIZE(vec) &&\ (((el) = (vec)->buffer[_vec_i & ((vec)->capacity - 1)]), 1);\ ++_vec_i) #define VECQ_FOREACH_REVERSE(el, vec)\ for (size_t _vec_i = VECQ_SIZE(vec);\ _vec_i > 0 &&\ (((el) = (vec)->buffer[(_vec_i - 1) & ((vec)->capacity - 1)]), 1);\ --_vec_i) #define VECQ_CLEAR(vec) do {\ (vec)->front = 0;\ (vec)->back = 0;\ } while (0) #define VECQ_DELETE(vec) do {\ Free((vec)->buffer);\ (vec)->buffer = NULL;\ (vec)->capacity = 0;\ (vec)->front = 0;\ (vec)->back = 0;\ } while (0) #ifdef __cplusplus } #endif #endif /* PMDK_VECQ_H */
2,731
20.178295
68
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/vec.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2019, Intel Corporation */ /* * vec.h -- vector interface */ #ifndef PMDK_VEC_H #define PMDK_VEC_H 1 #include <stddef.h> #include "valgrind_internal.h" #include "util.h" #include "out.h" #include "alloc.h" #ifdef __cplusplus extern "C" { #endif #define VEC_INIT_SIZE (64) #define VEC(name, type)\ struct name {\ type *buffer;\ size_t size;\ size_t capacity;\ } #define VEC_INITIALIZER {NULL, 0, 0} #define VEC_INIT(vec) do {\ (vec)->buffer = NULL;\ (vec)->size = 0;\ (vec)->capacity = 0;\ } while (0) #define VEC_MOVE(vecl, vecr) do {\ Free((vecl)->buffer);\ (vecl)->buffer = (vecr)->buffer;\ (vecl)->size = (vecr)->size;\ (vecl)->capacity = (vecr)->capacity;\ (vecr)->buffer = NULL;\ (vecr)->size = 0;\ (vecr)->capacity = 0;\ } while (0) #define VEC_REINIT(vec) do {\ VALGRIND_ANNOTATE_NEW_MEMORY((vec), sizeof(*vec));\ VALGRIND_ANNOTATE_NEW_MEMORY((vec)->buffer,\ (sizeof(*(vec)->buffer) * ((vec)->capacity)));\ (vec)->size = 0;\ } while (0) static inline int vec_reserve(void *vec, size_t ncapacity, size_t s) { size_t ncap = ncapacity == 0 ? VEC_INIT_SIZE : ncapacity; VEC(vvec, void) *vecp = (struct vvec *)vec; void *tbuf = Realloc(vecp->buffer, s * ncap); if (tbuf == NULL) { ERR("!Realloc"); return -1; } vecp->buffer = tbuf; vecp->capacity = ncap; return 0; } #define VEC_RESERVE(vec, ncapacity)\ (((vec)->size == 0 || (ncapacity) > (vec)->size) ?\ vec_reserve((void *)vec, ncapacity, sizeof(*(vec)->buffer)) :\ 0) #define VEC_POP_BACK(vec) do {\ (vec)->size -= 1;\ } while (0) #define VEC_FRONT(vec)\ (vec)->buffer[0] #define VEC_BACK(vec)\ (vec)->buffer[(vec)->size - 1] #define VEC_ERASE_BY_POS(vec, pos) do {\ if ((pos) != ((vec)->size - 1))\ (vec)->buffer[(pos)] = VEC_BACK(vec);\ VEC_POP_BACK(vec);\ } while (0) #define VEC_ERASE_BY_PTR(vec, element) do {\ if ((element) != &VEC_BACK(vec))\ *(element) = VEC_BACK(vec);\ VEC_POP_BACK(vec);\ } while (0) #define VEC_INSERT(vec, element)\ ((vec)->buffer[(vec)->size - 1] = (element), 0) #define VEC_INC_SIZE(vec)\ (((vec)->size++), 0) #define VEC_INC_BACK(vec)\ ((vec)->capacity == (vec)->size ?\ (VEC_RESERVE((vec), ((vec)->capacity * 2)) == 0 ?\ VEC_INC_SIZE(vec) : -1) :\ VEC_INC_SIZE(vec)) #define VEC_PUSH_BACK(vec, element)\ (VEC_INC_BACK(vec) == 0? VEC_INSERT(vec, element) : -1) #define VEC_FOREACH(el, vec)\ for (size_t _vec_i = 0;\ _vec_i < (vec)->size && (((el) = (vec)->buffer[_vec_i]), 1);\ ++_vec_i) #define VEC_FOREACH_REVERSE(el, vec)\ for (size_t _vec_i = ((vec)->size);\ _vec_i != 0 && (((el) = (vec)->buffer[_vec_i - 1]), 1);\ --_vec_i) #define VEC_FOREACH_BY_POS(elpos, vec)\ for ((elpos) = 0; (elpos) < (vec)->size; ++(elpos)) #define VEC_FOREACH_BY_PTR(el, vec)\ for (size_t _vec_i = 0;\ _vec_i < (vec)->size && (((el) = &(vec)->buffer[_vec_i]), 1);\ ++_vec_i) #define VEC_SIZE(vec)\ ((vec)->size) #define VEC_CAPACITY(vec)\ ((vec)->capacity) #define VEC_ARR(vec)\ ((vec)->buffer) #define VEC_GET(vec, id)\ (&(vec)->buffer[id]) #define VEC_CLEAR(vec) do {\ (vec)->size = 0;\ } while (0) #define VEC_DELETE(vec) do {\ Free((vec)->buffer);\ (vec)->buffer = NULL;\ (vec)->size = 0;\ (vec)->capacity = 0;\ } while (0) #ifdef __cplusplus } #endif #endif /* PMDK_VEC_H */
3,300
19.892405
63
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/rand.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ /* * rand.h -- random utils */ #ifndef RAND_H #define RAND_H 1 #include <stdint.h> #ifdef __cplusplus extern "C" { #endif typedef uint64_t rng_t[4]; uint64_t hash64(uint64_t x); uint64_t rnd64_r(rng_t *rng); void randomize_r(rng_t *rng, uint64_t seed); uint64_t rnd64(void); void randomize(uint64_t seed); #ifdef __cplusplus } #endif #endif
432
13.433333
44
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/ravl.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * ravl.h -- internal definitions for ravl tree */ #ifndef LIBPMEMOBJ_RAVL_H #define LIBPMEMOBJ_RAVL_H 1 #include <stddef.h> #ifdef __cplusplus extern "C" { #endif struct ravl; struct ravl_node; enum ravl_predicate { RAVL_PREDICATE_EQUAL = 1 << 0, RAVL_PREDICATE_GREATER = 1 << 1, RAVL_PREDICATE_LESS = 1 << 2, RAVL_PREDICATE_LESS_EQUAL = RAVL_PREDICATE_EQUAL | RAVL_PREDICATE_LESS, RAVL_PREDICATE_GREATER_EQUAL = RAVL_PREDICATE_EQUAL | RAVL_PREDICATE_GREATER, }; typedef int ravl_compare(const void *lhs, const void *rhs); typedef void ravl_cb(void *data, void *arg); typedef void ravl_constr(void *data, size_t data_size, const void *arg); struct ravl *ravl_new(ravl_compare *compare); struct ravl *ravl_new_sized(ravl_compare *compare, size_t data_size); void ravl_delete(struct ravl *ravl); void ravl_delete_cb(struct ravl *ravl, ravl_cb cb, void *arg); void ravl_foreach(struct ravl *ravl, ravl_cb cb, void *arg); int ravl_empty(struct ravl *ravl); void ravl_clear(struct ravl *ravl); int ravl_insert(struct ravl *ravl, const void *data); int ravl_emplace(struct ravl *ravl, ravl_constr constr, const void *arg); int ravl_emplace_copy(struct ravl *ravl, const void *data); struct ravl_node *ravl_find(struct ravl *ravl, const void *data, enum ravl_predicate predicate_flags); void *ravl_data(struct ravl_node *node); void ravl_remove(struct ravl *ravl, struct ravl_node *node); #ifdef __cplusplus } #endif #endif /* LIBPMEMOBJ_RAVL_H */
1,556
27.309091
73
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/common/pool_hdr.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * pool_hdr.h -- internal definitions for pool header module */ #ifndef PMDK_POOL_HDR_H #define PMDK_POOL_HDR_H 1 #include <stddef.h> #include <stdint.h> #include <unistd.h> #include "uuid.h" #include "shutdown_state.h" #include "util.h" #include "page_size.h" #ifdef __cplusplus extern "C" { #endif /* * Number of bits per type in alignment descriptor */ #define ALIGNMENT_DESC_BITS 4 /* * architecture identification flags * * These flags allow to unambiguously determine the architecture * on which the pool was created. * * The alignment_desc field contains information about alignment * of the following basic types: * - char * - short * - int * - long * - long long * - size_t * - os_off_t * - float * - double * - long double * - void * * * The alignment of each type is computed as an offset of field * of specific type in the following structure: * struct { * char byte; * type field; * }; * * The value is decremented by 1 and masked by 4 bits. * Multiple alignments are stored on consecutive 4 bits of each * type in the order specified above. * * The values used in the machine, and machine_class fields are in * principle independent of operating systems, and object formats. * In practice they happen to match constants used in ELF object headers. */ struct arch_flags { uint64_t alignment_desc; /* alignment descriptor */ uint8_t machine_class; /* address size -- 64 bit or 32 bit */ uint8_t data; /* data encoding -- LE or BE */ uint8_t reserved[4]; uint16_t machine; /* required architecture */ }; #define POOL_HDR_ARCH_LEN sizeof(struct arch_flags) /* possible values of the machine class field in the above struct */ #define PMDK_MACHINE_CLASS_64 2 /* 64 bit pointers, 64 bit size_t */ /* possible values of the machine field in the above struct */ #define PMDK_MACHINE_X86_64 62 #define PMDK_MACHINE_AARCH64 183 #define PMDK_MACHINE_PPC64 21 /* possible values of the data field in the above struct */ #define PMDK_DATA_LE 1 /* 2's complement, little endian */ #define PMDK_DATA_BE 2 /* 2's complement, big endian */ /* * features flags */ typedef struct { uint32_t compat; /* mask: compatible "may" features */ uint32_t incompat; /* mask: "must support" features */ uint32_t ro_compat; /* mask: force RO if unsupported */ } features_t; /* * header used at the beginning of all types of memory pools * * for pools build on persistent memory, the integer types * below are stored in little-endian byte order. */ #define POOL_HDR_SIG_LEN 8 #define POOL_HDR_UNUSED_SIZE 1904 #define POOL_HDR_UNUSED2_SIZE 1976 #define POOL_HDR_ALIGN_PAD (PMEM_PAGESIZE - 4096) struct pool_hdr { char signature[POOL_HDR_SIG_LEN]; uint32_t major; /* format major version number */ features_t features; /* features flags */ uuid_t poolset_uuid; /* pool set UUID */ uuid_t uuid; /* UUID of this file */ uuid_t prev_part_uuid; /* prev part */ uuid_t next_part_uuid; /* next part */ uuid_t prev_repl_uuid; /* prev replica */ uuid_t next_repl_uuid; /* next replica */ uint64_t crtime; /* when created (seconds since epoch) */ struct arch_flags arch_flags; /* architecture identification flags */ unsigned char unused[POOL_HDR_UNUSED_SIZE]; /* must be zero */ /* not checksumed */ unsigned char unused2[POOL_HDR_UNUSED2_SIZE]; /* must be zero */ struct shutdown_state sds; /* shutdown status */ uint64_t checksum; /* checksum of above fields */ #if PMEM_PAGESIZE > 4096 /* prevent zero size array */ unsigned char align_pad[POOL_HDR_ALIGN_PAD]; /* alignment pad */ #endif }; #define POOL_HDR_SIZE (sizeof(struct pool_hdr)) #define POOL_DESC_SIZE PMEM_PAGESIZE void util_convert2le_hdr(struct pool_hdr *hdrp); void util_convert2h_hdr_nocheck(struct pool_hdr *hdrp); void util_get_arch_flags(struct arch_flags *arch_flags); int util_check_arch_flags(const struct arch_flags *arch_flags); features_t util_get_unknown_features(features_t features, features_t known); int util_feature_check(struct pool_hdr *hdrp, features_t features); int util_feature_cmp(features_t features, features_t ref); int util_feature_is_zero(features_t features); int util_feature_is_set(features_t features, features_t flag); void util_feature_enable(features_t *features, features_t new_feature); void util_feature_disable(features_t *features, features_t new_feature); const char *util_feature2str(features_t feature, features_t *found); features_t util_str2feature(const char *str); uint32_t util_str2pmempool_feature(const char *str); uint32_t util_feature2pmempool_feature(features_t feat); /* * set of macros for determining the alignment descriptor */ #define DESC_MASK ((1 << ALIGNMENT_DESC_BITS) - 1) #define alignment_of(t) offsetof(struct { char c; t x; }, x) #define alignment_desc_of(t) (((uint64_t)alignment_of(t) - 1) & DESC_MASK) #define alignment_desc()\ (alignment_desc_of(char) << 0 * ALIGNMENT_DESC_BITS) |\ (alignment_desc_of(short) << 1 * ALIGNMENT_DESC_BITS) |\ (alignment_desc_of(int) << 2 * ALIGNMENT_DESC_BITS) |\ (alignment_desc_of(long) << 3 * ALIGNMENT_DESC_BITS) |\ (alignment_desc_of(long long) << 4 * ALIGNMENT_DESC_BITS) |\ (alignment_desc_of(size_t) << 5 * ALIGNMENT_DESC_BITS) |\ (alignment_desc_of(off_t) << 6 * ALIGNMENT_DESC_BITS) |\ (alignment_desc_of(float) << 7 * ALIGNMENT_DESC_BITS) |\ (alignment_desc_of(double) << 8 * ALIGNMENT_DESC_BITS) |\ (alignment_desc_of(long double) << 9 * ALIGNMENT_DESC_BITS) |\ (alignment_desc_of(void *) << 10 * ALIGNMENT_DESC_BITS) #define POOL_FEAT_ZERO 0x0000U static const features_t features_zero = {POOL_FEAT_ZERO, POOL_FEAT_ZERO, POOL_FEAT_ZERO}; /* * compat features */ #define POOL_FEAT_CHECK_BAD_BLOCKS 0x0001U /* check bad blocks in a pool */ #define POOL_FEAT_COMPAT_ALL \ (POOL_FEAT_CHECK_BAD_BLOCKS) #define FEAT_COMPAT(X) \ {POOL_FEAT_##X, POOL_FEAT_ZERO, POOL_FEAT_ZERO} /* * incompat features */ #define POOL_FEAT_SINGLEHDR 0x0001U /* pool header only in the first part */ #define POOL_FEAT_CKSUM_2K 0x0002U /* only first 2K of hdr checksummed */ #define POOL_FEAT_SDS 0x0004U /* check shutdown state */ #define POOL_FEAT_INCOMPAT_ALL \ (POOL_FEAT_SINGLEHDR | POOL_FEAT_CKSUM_2K | POOL_FEAT_SDS) /* * incompat features effective values (if applicable) */ #ifdef SDS_ENABLED #define POOL_E_FEAT_SDS POOL_FEAT_SDS #else #define POOL_E_FEAT_SDS 0x0000U /* empty */ #endif #define POOL_FEAT_COMPAT_VALID \ (POOL_FEAT_CHECK_BAD_BLOCKS) #define POOL_FEAT_INCOMPAT_VALID \ (POOL_FEAT_SINGLEHDR | POOL_FEAT_CKSUM_2K | POOL_E_FEAT_SDS) #if defined(_WIN32) || NDCTL_ENABLED #define POOL_FEAT_INCOMPAT_DEFAULT \ (POOL_FEAT_CKSUM_2K | POOL_E_FEAT_SDS) #else /* * shutdown state support on Linux requires root access on kernel < 4.20 with * ndctl < 63 so it is disabled by default */ #define POOL_FEAT_INCOMPAT_DEFAULT \ (POOL_FEAT_CKSUM_2K) #endif #if NDCTL_ENABLED #define POOL_FEAT_COMPAT_DEFAULT \ (POOL_FEAT_CHECK_BAD_BLOCKS) #else #define POOL_FEAT_COMPAT_DEFAULT \ (POOL_FEAT_ZERO) #endif #define FEAT_INCOMPAT(X) \ {POOL_FEAT_ZERO, POOL_FEAT_##X, POOL_FEAT_ZERO} #define POOL_FEAT_VALID \ {POOL_FEAT_COMPAT_VALID, POOL_FEAT_INCOMPAT_VALID, POOL_FEAT_ZERO} /* * defines the first not checksummed field - all fields after this will be * ignored during checksum calculations. */ #define POOL_HDR_CSUM_2K_END_OFF offsetof(struct pool_hdr, unused2) #define POOL_HDR_CSUM_4K_END_OFF offsetof(struct pool_hdr, checksum) /* * pick the first not checksummed field. 2K variant is used if * POOL_FEAT_CKSUM_2K incompat feature is set. */ #define POOL_HDR_CSUM_END_OFF(hdrp) \ ((hdrp)->features.incompat & POOL_FEAT_CKSUM_2K) \ ? POOL_HDR_CSUM_2K_END_OFF : POOL_HDR_CSUM_4K_END_OFF /* ignore shutdown state if incompat feature is disabled */ #define IGNORE_SDS(hdrp) \ (((hdrp) != NULL) && (((hdrp)->features.incompat & POOL_FEAT_SDS) == 0)) #ifdef __cplusplus } #endif #endif
7,980
29.696154
77
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/ex_common.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2017, Intel Corporation */ /* * ex_common.h -- examples utilities */ #ifndef EX_COMMON_H #define EX_COMMON_H #include <stdint.h> #define MIN(a, b) (((a) < (b)) ? (a) : (b)) #ifdef __cplusplus extern "C" { #endif #ifndef _WIN32 #include <unistd.h> #define CREATE_MODE_RW (S_IWUSR | S_IRUSR) /* * file_exists -- checks if file exists */ static inline int file_exists(char const *file) { return access(file, F_OK); } /* * find_last_set_64 -- returns last set bit position or -1 if set bit not found */ static inline int find_last_set_64(uint64_t val) { return 64 - __builtin_clzll(val) - 1; } #else #include <windows.h> #include <corecrt_io.h> #include <process.h> #define CREATE_MODE_RW (S_IWRITE | S_IREAD) /* * file_exists -- checks if file exists */ static inline int file_exists(char const *file) { return _access(file, 0); } /* * find_last_set_64 -- returns last set bit position or -1 if set bit not found */ static inline int find_last_set_64(uint64_t val) { DWORD lz = 0; if (BitScanReverse64(&lz, val)) return (int)lz; else return -1; } #endif #ifdef __cplusplus } #endif #endif /* ex_common.h */
1,199
14.584416
79
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemlog/logfile/logentry.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * info prepended to each log entry... */ struct logentry { size_t len; /* length of the rest of the log entry */ time_t timestamp; #ifndef _WIN32 pid_t pid; #else int pid; #endif };
280
15.529412
55
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemblk/assetdb/asset.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ #define ASSET_NAME_MAX 256 #define ASSET_USER_NAME_MAX 64 #define ASSET_CHECKED_OUT 2 #define ASSET_FREE 1 struct asset { char name[ASSET_NAME_MAX]; char user[ASSET_USER_NAME_MAX]; time_t time; int state; };
300
19.066667
44
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/slab_allocator/slab_allocator.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017, Intel Corporation */ /* * slab_allocator.h -- slab-like mechanism for libpmemobj */ #ifndef SLAB_ALLOCATOR_H #define SLAB_ALLOCATOR_H #include <libpmemobj.h> struct slab_allocator; struct slab_allocator *slab_new(PMEMobjpool *pop, size_t size); void slab_delete(struct slab_allocator *slab); int slab_alloc(struct slab_allocator *slab, PMEMoid *oid, pmemobj_constr constructor, void *arg); PMEMoid slab_tx_alloc(struct slab_allocator *slab); #endif /* SLAB_ALLOCATOR_H */
542
22.608696
63
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/string_store_tx_type/layout.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * layout.h -- example from introduction part 3 */ #define MAX_BUF_LEN 10 POBJ_LAYOUT_BEGIN(string_store); POBJ_LAYOUT_ROOT(string_store, struct my_root); POBJ_LAYOUT_END(string_store); struct my_root { char buf[MAX_BUF_LEN]; };
324
18.117647
47
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/list_map/skiplist_map.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * skiplist_map.h -- sorted list collection implementation */ #ifndef SKIPLIST_MAP_H #define SKIPLIST_MAP_H #include <libpmemobj.h> #ifndef SKIPLIST_MAP_TYPE_OFFSET #define SKIPLIST_MAP_TYPE_OFFSET 2020 #endif struct skiplist_map_node; TOID_DECLARE(struct skiplist_map_node, SKIPLIST_MAP_TYPE_OFFSET + 0); int skiplist_map_check(PMEMobjpool *pop, TOID(struct skiplist_map_node) map); int skiplist_map_create(PMEMobjpool *pop, TOID(struct skiplist_map_node) *map, void *arg); int skiplist_map_destroy(PMEMobjpool *pop, TOID(struct skiplist_map_node) *map); int skiplist_map_insert(PMEMobjpool *pop, TOID(struct skiplist_map_node) map, uint64_t key, PMEMoid value); int skiplist_map_insert_new(PMEMobjpool *pop, TOID(struct skiplist_map_node) map, uint64_t key, size_t size, unsigned type_num, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg); PMEMoid skiplist_map_remove(PMEMobjpool *pop, TOID(struct skiplist_map_node) map, uint64_t key); int skiplist_map_remove_free(PMEMobjpool *pop, TOID(struct skiplist_map_node) map, uint64_t key); int skiplist_map_clear(PMEMobjpool *pop, TOID(struct skiplist_map_node) map); PMEMoid skiplist_map_get(PMEMobjpool *pop, TOID(struct skiplist_map_node) map, uint64_t key); int skiplist_map_lookup(PMEMobjpool *pop, TOID(struct skiplist_map_node) map, uint64_t key); int skiplist_map_foreach(PMEMobjpool *pop, TOID(struct skiplist_map_node) map, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg); int skiplist_map_is_empty(PMEMobjpool *pop, TOID(struct skiplist_map_node) map); #endif /* SKIPLIST_MAP_H */
1,688
36.533333
80
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/hashmap/hashmap.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ #ifndef HASHMAP_H #define HASHMAP_H /* common API provided by both implementations */ #include <stddef.h> #include <stdint.h> struct hashmap_args { uint32_t seed; }; enum hashmap_cmd { HASHMAP_CMD_REBUILD, HASHMAP_CMD_DEBUG, }; #endif /* HASHMAP_H */
345
15.47619
49
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/hashmap/hashmap_tx.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ #ifndef HASHMAP_TX_H #define HASHMAP_TX_H #include <stddef.h> #include <stdint.h> #include <hashmap.h> #include <libpmemobj.h> #ifndef HASHMAP_TX_TYPE_OFFSET #define HASHMAP_TX_TYPE_OFFSET 1004 #endif struct hashmap_tx; TOID_DECLARE(struct hashmap_tx, HASHMAP_TX_TYPE_OFFSET + 0); int hm_tx_check(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap); int hm_tx_create(PMEMobjpool *pop, TOID(struct hashmap_tx) *map, void *arg); int hm_tx_init(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap); int hm_tx_insert(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key, PMEMoid value); PMEMoid hm_tx_remove(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key); PMEMoid hm_tx_get(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key); int hm_tx_lookup(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key); int hm_tx_foreach(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg); size_t hm_tx_count(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap); int hm_tx_cmd(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, unsigned cmd, uint64_t arg); #endif /* HASHMAP_TX_H */
1,270
34.305556
76
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/hashmap/hashmap_rp.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018, Intel Corporation */ #ifndef HASHMAP_RP_H #define HASHMAP_RP_H #include <stddef.h> #include <stdint.h> #include <hashmap.h> #include <libpmemobj.h> #ifndef HASHMAP_RP_TYPE_OFFSET #define HASHMAP_RP_TYPE_OFFSET 1008 #endif /* Flags to indicate if insertion is being made during rebuild process */ #define HASHMAP_RP_REBUILD 1 #define HASHMAP_RP_NO_REBUILD 0 /* Initial number of entries for hashamap_rp */ #define INIT_ENTRIES_NUM_RP 16 /* Load factor to indicate resize threshold */ #define HASHMAP_RP_LOAD_FACTOR 0.5f /* Maximum number of swaps allowed during single insertion */ #define HASHMAP_RP_MAX_SWAPS 150 /* Size of an action array used during single insertion */ #define HASHMAP_RP_MAX_ACTIONS (4 * HASHMAP_RP_MAX_SWAPS + 5) struct hashmap_rp; TOID_DECLARE(struct hashmap_rp, HASHMAP_RP_TYPE_OFFSET + 0); int hm_rp_check(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap); int hm_rp_create(PMEMobjpool *pop, TOID(struct hashmap_rp) *map, void *arg); int hm_rp_init(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap); int hm_rp_insert(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap, uint64_t key, PMEMoid value); PMEMoid hm_rp_remove(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap, uint64_t key); PMEMoid hm_rp_get(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap, uint64_t key); int hm_rp_lookup(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap, uint64_t key); int hm_rp_foreach(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg); size_t hm_rp_count(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap); int hm_rp_cmd(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap, unsigned cmd, uint64_t arg); #endif /* HASHMAP_RP_H */
1,780
36.104167
76
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/hashmap/hashmap_internal.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ #ifndef HASHSET_INTERNAL_H #define HASHSET_INTERNAL_H /* large prime number used as a hashing function coefficient */ #define HASH_FUNC_COEFF_P 32212254719ULL /* initial number of buckets */ #define INIT_BUCKETS_NUM 10 /* number of values in a bucket which trigger hashtable rebuild check */ #define MIN_HASHSET_THRESHOLD 5 /* number of values in a bucket which force hashtable rebuild */ #define MAX_HASHSET_THRESHOLD 10 #endif
521
25.1
72
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/hashmap/hashmap_atomic.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ #ifndef HASHMAP_ATOMIC_H #define HASHMAP_ATOMIC_H #include <stddef.h> #include <stdint.h> #include <hashmap.h> #include <libpmemobj.h> #ifndef HASHMAP_ATOMIC_TYPE_OFFSET #define HASHMAP_ATOMIC_TYPE_OFFSET 1000 #endif struct hashmap_atomic; TOID_DECLARE(struct hashmap_atomic, HASHMAP_ATOMIC_TYPE_OFFSET + 0); int hm_atomic_check(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap); int hm_atomic_create(PMEMobjpool *pop, TOID(struct hashmap_atomic) *map, void *arg); int hm_atomic_init(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap); int hm_atomic_insert(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap, uint64_t key, PMEMoid value); PMEMoid hm_atomic_remove(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap, uint64_t key); PMEMoid hm_atomic_get(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap, uint64_t key); int hm_atomic_lookup(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap, uint64_t key); int hm_atomic_foreach(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg); size_t hm_atomic_count(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap); int hm_atomic_cmd(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap, unsigned cmd, uint64_t arg); #endif /* HASHMAP_ATOMIC_H */
1,384
36.432432
79
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/libart/arttree_structures.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2017, Intel Corporation */ /* * Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * =========================================================================== * * Filename: arttree_structures.h * * Description: known structures of the ART tree * * Author: Andreas Bluemle, Dieter Kasper * [email protected] * [email protected] * * Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH * * =========================================================================== */ #ifndef _ARTTREE_STRUCTURES_H #define _ARTTREE_STRUCTURES_H #define MAX_PREFIX_LEN 10 /* * pmem_context -- structure for pmempool file */ struct pmem_context { char *filename; size_t psize; int fd; char *addr; uint64_t art_tree_root_offset; }; struct _art_node_u; typedef struct _art_node_u art_node_u; struct _art_node; typedef struct _art_node art_node; struct _art_node4; typedef struct _art_node4 art_node4; struct _art_node16; typedef struct _art_node16 art_node16; struct _art_node48; typedef struct _art_node48 art_node48; struct _art_node256; typedef struct _art_node256 art_node256; struct _var_string; typedef struct _var_string var_string; struct _art_leaf; typedef struct _art_leaf art_leaf; struct _art_tree_root; typedef struct _art_tree_root art_tree_root; typedef uint8_t art_tree_root_toid_type_num[65535]; typedef uint8_t _toid_art_node_u_toid_type_num[2]; typedef uint8_t _toid_art_node_toid_type_num[3]; typedef uint8_t _toid_art_node4_toid_type_num[4]; typedef uint8_t _toid_art_node16_toid_type_num[5]; typedef uint8_t _toid_art_node48_toid_type_num[6]; typedef uint8_t _toid_art_node256_toid_type_num[7]; typedef uint8_t _toid_art_leaf_toid_type_num[8]; typedef uint8_t _toid_var_string_toid_type_num[9]; typedef struct pmemoid { uint64_t pool_uuid_lo; uint64_t off; } PMEMoid; union _toid_art_node_u_toid { PMEMoid oid; art_node_u *_type; _toid_art_node_u_toid_type_num *_type_num; }; union art_tree_root_toid { PMEMoid oid; struct art_tree_root *_type; art_tree_root_toid_type_num *_type_num; }; union _toid_art_node_toid { PMEMoid oid; art_node *_type; _toid_art_node_toid_type_num *_type_num; }; union _toid_art_node4_toid { PMEMoid oid; art_node4 *_type; _toid_art_node4_toid_type_num *_type_num; }; union _toid_art_node16_toid { PMEMoid oid; art_node16 *_type; _toid_art_node16_toid_type_num *_type_num; }; union _toid_art_node48_toid { PMEMoid oid; art_node48 *_type; _toid_art_node48_toid_type_num *_type_num; }; union _toid_art_node256_toid { PMEMoid oid; art_node256 *_type; _toid_art_node256_toid_type_num *_type_num; }; union _toid_var_string_toid { PMEMoid oid; var_string *_type; _toid_var_string_toid_type_num *_type_num; }; union _toid_art_leaf_toid { PMEMoid oid; art_leaf *_type; _toid_art_leaf_toid_type_num *_type_num; }; struct _art_tree_root { int size; union _toid_art_node_u_toid root; }; struct _art_node { uint8_t num_children; uint32_t partial_len; unsigned char partial[MAX_PREFIX_LEN]; }; struct _art_node4 { art_node n; unsigned char keys[4]; union _toid_art_node_u_toid children[4]; }; struct _art_node16 { art_node n; unsigned char keys[16]; union _toid_art_node_u_toid children[16]; }; struct _art_node48 { art_node n; unsigned char keys[256]; union _toid_art_node_u_toid children[48]; }; struct _art_node256 { art_node n; union _toid_art_node_u_toid children[256]; }; struct _var_string { size_t len; unsigned char s[]; }; struct _art_leaf { union _toid_var_string_toid value; union _toid_var_string_toid key; }; struct _art_node_u { uint8_t art_node_type; uint8_t art_node_tag; union { union _toid_art_node4_toid an4; union _toid_art_node16_toid an16; union _toid_art_node48_toid an48; union _toid_art_node256_toid an256; union _toid_art_leaf_toid al; } u; }; typedef enum { ART_NODE4 = 0, ART_NODE16 = 1, ART_NODE48 = 2, ART_NODE256 = 3, ART_LEAF = 4, ART_NODE_U = 5, ART_NODE = 6, ART_TREE_ROOT = 7, VAR_STRING = 8, art_node_types = 9 /* number of different art_nodes */ } art_node_type; #define VALID_NODE_TYPE(n) (((n) >= 0) && ((n) < art_node_types)) extern size_t art_node_sizes[]; extern char *art_node_names[]; #endif /* _ARTTREE_STRUCTURES_H */
5,923
25.927273
78
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/libart/art.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ /* * Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH * Copyright 2012, Armon Dadgar. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * =========================================================================== * * Filename: art.h * * Description: header file for art tree on pmem implementation * * Author: Andreas Bluemle, Dieter Kasper * [email protected] * [email protected] * * Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH * * =========================================================================== */ /* * based on https://github.com/armon/libart/src/art.h */ #ifndef _ART_H #define _ART_H #ifdef __cplusplus extern "C" { #endif #define MAX_PREFIX_LEN 10 typedef enum { NODE4 = 0, NODE16 = 1, NODE48 = 2, NODE256 = 3, art_leaf_t = 4, art_node_types = 5 /* number of different art_nodes */ } art_node_type; char *art_node_names[] = { "art_node4", "art_node16", "art_node48", "art_node256", "art_leaf" }; /* * forward declarations; these are required when typedef shall be * used instead of struct */ struct _art_node_u; typedef struct _art_node_u art_node_u; struct _art_node; typedef struct _art_node art_node; struct _art_node4; typedef struct _art_node4 art_node4; struct _art_node16; typedef struct _art_node16 art_node16; struct _art_node48; typedef struct _art_node48 art_node48; struct _art_node256; typedef struct _art_node256 art_node256; struct _art_leaf; typedef struct _art_leaf art_leaf; struct _var_string; typedef struct _var_string var_string; POBJ_LAYOUT_BEGIN(arttree_tx); POBJ_LAYOUT_ROOT(arttree_tx, struct art_tree_root); POBJ_LAYOUT_TOID(arttree_tx, art_node_u); POBJ_LAYOUT_TOID(arttree_tx, art_node4); POBJ_LAYOUT_TOID(arttree_tx, art_node16); POBJ_LAYOUT_TOID(arttree_tx, art_node48); POBJ_LAYOUT_TOID(arttree_tx, art_node256); POBJ_LAYOUT_TOID(arttree_tx, art_leaf); POBJ_LAYOUT_TOID(arttree_tx, var_string); POBJ_LAYOUT_END(arttree_tx); struct _var_string { size_t len; unsigned char s[]; }; /* * This struct is included as part of all the various node sizes */ struct _art_node { uint8_t num_children; uint32_t partial_len; unsigned char partial[MAX_PREFIX_LEN]; }; /* * Small node with only 4 children */ struct _art_node4 { art_node n; unsigned char keys[4]; TOID(art_node_u) children[4]; }; /* * Node with 16 children */ struct _art_node16 { art_node n; unsigned char keys[16]; TOID(art_node_u) children[16]; }; /* * Node with 48 children, but a full 256 byte field. */ struct _art_node48 { art_node n; unsigned char keys[256]; TOID(art_node_u) children[48]; }; /* * Full node with 256 children */ struct _art_node256 { art_node n; TOID(art_node_u) children[256]; }; /* * Represents a leaf. These are of arbitrary size, as they include the key. */ struct _art_leaf { TOID(var_string) value; TOID(var_string) key; }; struct _art_node_u { uint8_t art_node_type; uint8_t art_node_tag; union { TOID(art_node4) an4; /* starts with art_node */ TOID(art_node16) an16; /* starts with art_node */ TOID(art_node48) an48; /* starts with art_node */ TOID(art_node256) an256; /* starts with art_node */ TOID(art_leaf) al; } u; }; struct art_tree_root { int size; TOID(art_node_u) root; }; typedef struct _cb_data { TOID(art_node_u) node; int child_idx; } cb_data; /* * Macros to manipulate art_node tags */ #define IS_LEAF(x) (((x)->art_node_type == art_leaf_t)) #define SET_LEAF(x) (((x)->art_node_tag = art_leaf_t)) #define COPY_BLOB(_obj, _blob, _len) \ D_RW(_obj)->len = _len; \ TX_MEMCPY(D_RW(_obj)->s, _blob, _len); \ D_RW(_obj)->s[(_len) - 1] = '\0'; typedef int(*art_callback)(void *data, const unsigned char *key, uint32_t key_len, const unsigned char *value, uint32_t val_len); extern int art_tree_init(PMEMobjpool *pop, int *newpool); extern uint64_t art_size(PMEMobjpool *pop); extern int art_iter(PMEMobjpool *pop, art_callback cb, void *data); extern TOID(var_string) art_insert(PMEMobjpool *pop, const unsigned char *key, int key_len, void *value, int val_len); extern TOID(var_string) art_search(PMEMobjpool *pop, const unsigned char *key, int key_len); extern TOID(var_string) art_delete(PMEMobjpool *pop, const unsigned char *key, int key_len); #ifdef __cplusplus } #endif #endif /* _ART_H */
5,998
26.773148
78
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/libart/arttree.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017, Intel Corporation */ /* * Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * =========================================================================== * * Filename: arttree.h * * Description: header file for art tree on pmem implementation * * Author: Andreas Bluemle, Dieter Kasper * [email protected] * [email protected] * * Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH * * =========================================================================== */ #ifndef _ARTTREE_H #define _ARTTREE_H #ifdef __cplusplus extern "C" { #endif #include "art.h" #ifdef __cplusplus } #endif #endif /* _ARTTREE_H */
2,337
34.969231
78
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/queue/multirun.sh
#!/bin/bash sudo rm -rf /mnt/mem/queue.pool sudo pmempool create --layout="queue" obj myobjpool.set sudo ./queue /mnt/mem/queue.pool new 10000 #for (( c=1; c<=10000; c++ )) #do #echo "$c" sudo ./queue /mnt/mem/queue.pool enqueue hello #done
246
23.7
55
sh
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/queue/run.sh
sudo rm -rf /mnt/mem/queue.pool sudo pmempool create --layout="queue" obj myobjpool.set #sudo ../../../tools/pmempool/pmempool create obj /mnt/mem/queue.pool --layout queue sudo ./queue /mnt/mem/queue.pool new 10000 sudo ./queue /mnt/mem/queue.pool enqueue hello>enqueue sudo ./queue /mnt/mem/queue.pool show grep tx enqueue | awk '{print $3}'>file grep tx dequeue | awk '{print $3}'>file make -j12 EXTRA_CFLAGS+=-DGET_NDP_PERFORMENCE EXTRA_CFLAGS+=-DRUN_COUNT=10000 EXTRA_CFLAGS="-Wno-error"
497
40.5
105
sh
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/queue/runall.sh
make -j12 EXTRA_CFLAGS+=-DGET_NDP_PERFORMENCE EXTRA_CFLAGS+=-DRUN_COUNT=10000 EXTRA_CFLAGS="-Wno-error" sudo rm -rf /mnt/mem/queue.pool sudo pmempool create --layout="queue" obj myobjpool.set #sudo ../../../tools/pmempool/pmempool create obj /mnt/mem/queue.pool --layout queue sudo ./queue /mnt/mem/queue.pool new 10000
324
39.625
105
sh
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/linkedlist/run.sh
sudo rm -rf /mnt/mem/fifo.pool sudo pmempool create --layout="list" obj myobjpool.set sudo ../../../tools/pmempool/pmempool create obj /mnt/mem/fifo.pool --layout list sudo ./fifo /mnt/mem/fifo.pool insert a sudo ./fifo /mnt/mem/fifo.pool remove a
249
34.714286
81
sh
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/linkedlist/pmemobj_list.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * pmemobj_list.h -- macro definitions for persistent * singly linked list and tail queue */ #ifndef PMEMOBJ_LISTS_H #define PMEMOBJ_LISTS_H #include <libpmemobj.h> /* * This file defines two types of persistent data structures: * singly-linked lists and tail queue. * * All macros defined in this file must be used within libpmemobj * transactional API. Following snippet presents example of usage: * * TX_BEGIN(pop) { * POBJ_TAILQ_INIT(head); * } TX_ONABORT { * abort(); * } TX_END * * SLIST TAILQ * _HEAD + + * _ENTRY + + * _INIT + + * _EMPTY + + * _FIRST + + * _NEXT + + * _PREV - + * _LAST - + * _FOREACH + + * _FOREACH_REVERSE - + * _INSERT_HEAD + + * _INSERT_BEFORE - + * _INSERT_AFTER + + * _INSERT_TAIL - + * _MOVE_ELEMENT_HEAD - + * _MOVE_ELEMENT_TAIL - + * _REMOVE_HEAD + - * _REMOVE + + * _REMOVE_FREE + + * _SWAP_HEAD_TAIL - + */ /* * Singly-linked List definitions. */ #define POBJ_SLIST_HEAD(name, type)\ struct name {\ TOID(type) pe_first;\ } #define POBJ_SLIST_ENTRY(type)\ struct {\ TOID(type) pe_next;\ } /* * Singly-linked List access methods. */ #define POBJ_SLIST_EMPTY(head) (TOID_IS_NULL((head)->pe_first)) #define POBJ_SLIST_FIRST(head) ((head)->pe_first) #define POBJ_SLIST_NEXT(elm, field) (D_RO(elm)->field.pe_next) /* * Singly-linked List functions. */ #define POBJ_SLIST_INIT(head) do {\ TX_ADD_DIRECT(&(head)->pe_first);\ TOID_ASSIGN((head)->pe_first, OID_NULL);\ } while (0) #define POBJ_SLIST_INSERT_HEAD(head, elm, field) do {\ TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\ TX_ADD_DIRECT(&elm_ptr->field.pe_next);\ elm_ptr->field.pe_next = (head)->pe_first;\ TX_SET_DIRECT(head, pe_first, elm);\ } while (0) #define POBJ_SLIST_INSERT_AFTER(slistelm, elm, field) do {\ TOID_TYPEOF(slistelm) *slistelm_ptr = D_RW(slistelm);\ TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\ TX_ADD_DIRECT(&elm_ptr->field.pe_next);\ elm_ptr->field.pe_next = slistelm_ptr->field.pe_next;\ TX_ADD_DIRECT(&slistelm_ptr->field.pe_next);\ slistelm_ptr->field.pe_next = elm;\ } while (0) #define POBJ_SLIST_REMOVE_HEAD(head, field) do {\ TX_ADD_DIRECT(&(head)->pe_first);\ (head)->pe_first = D_RO((head)->pe_first)->field.pe_next;\ } while (0) #define POBJ_SLIST_REMOVE(head, elm, field) do {\ if (TOID_EQUALS((head)->pe_first, elm)) {\ POBJ_SLIST_REMOVE_HEAD(head, field);\ } else {\ TOID_TYPEOF(elm) *curelm_ptr = D_RW((head)->pe_first);\ while (!TOID_EQUALS(curelm_ptr->field.pe_next, elm))\ curelm_ptr = D_RW(curelm_ptr->field.pe_next);\ TX_ADD_DIRECT(&curelm_ptr->field.pe_next);\ curelm_ptr->field.pe_next = D_RO(elm)->field.pe_next;\ }\ } while (0) #define POBJ_SLIST_REMOVE_FREE(head, elm, field) do {\ POBJ_SLIST_REMOVE(head, elm, field);\ TX_FREE(elm);\ } while (0) #define POBJ_SLIST_FOREACH(var, head, field)\ for ((var) = POBJ_SLIST_FIRST(head);\ !TOID_IS_NULL(var);\ var = POBJ_SLIST_NEXT(var, field)) /* * Tail-queue definitions. */ #define POBJ_TAILQ_ENTRY(type)\ struct {\ TOID(type) pe_next;\ TOID(type) pe_prev;\ } #define POBJ_TAILQ_HEAD(name, type)\ struct name {\ TOID(type) pe_first;\ TOID(type) pe_last;\ } /* * Tail-queue access methods. */ #define POBJ_TAILQ_FIRST(head) ((head)->pe_first) #define POBJ_TAILQ_LAST(head) ((head)->pe_last) #define POBJ_TAILQ_EMPTY(head) (TOID_IS_NULL((head)->pe_first)) #define POBJ_TAILQ_NEXT(elm, field) (D_RO(elm)->field.pe_next) #define POBJ_TAILQ_PREV(elm, field) (D_RO(elm)->field.pe_prev) /* * Tail-queue List internal methods. */ #define _POBJ_SWAP_PTR(elm, field) do {\ TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\ TX_ADD_DIRECT(&elm_ptr->field);\ __typeof__(elm) temp = elm_ptr->field.pe_prev;\ elm_ptr->field.pe_prev = elm_ptr->field.pe_next;\ elm_ptr->field.pe_next = temp;\ } while (0) /* * Tail-queue functions. */ #define POBJ_TAILQ_SWAP_HEAD_TAIL(head, field) do {\ __typeof__((head)->pe_first) temp = (head)->pe_first;\ TX_ADD_DIRECT(head);\ (head)->pe_first = (head)->pe_last;\ (head)->pe_last = temp;\ } while (0) #define POBJ_TAILQ_FOREACH(var, head, field)\ for ((var) = POBJ_TAILQ_FIRST(head);\ !TOID_IS_NULL(var);\ var = POBJ_TAILQ_NEXT(var, field)) #define POBJ_TAILQ_FOREACH_REVERSE(var, head, field)\ for ((var) = POBJ_TAILQ_LAST(head);\ !TOID_IS_NULL(var);\ var = POBJ_TAILQ_PREV(var, field)) #define POBJ_TAILQ_INIT(head) do {\ TX_ADD_FIELD_DIRECT(head, pe_first);\ TOID_ASSIGN((head)->pe_first, OID_NULL);\ TX_ADD_FIELD_DIRECT(head, pe_last);\ TOID_ASSIGN((head)->pe_last, OID_NULL);\ } while (0) #define POBJ_TAILQ_INSERT_HEAD(head, elm, field) do {\ TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\ if (TOID_IS_NULL((head)->pe_first)) {\ TX_ADD_DIRECT(&elm_ptr->field);\ elm_ptr->field.pe_prev = (head)->pe_first;\ elm_ptr->field.pe_next = (head)->pe_first;\ TX_ADD_DIRECT(head);\ (head)->pe_first = elm;\ (head)->pe_last = elm;\ } else {\ TOID_TYPEOF(elm) *first = D_RW((head)->pe_first);\ TX_ADD_DIRECT(&elm_ptr->field);\ elm_ptr->field.pe_next = (head)->pe_first;\ elm_ptr->field.pe_prev = first->field.pe_prev;\ TX_ADD_DIRECT(&first->field.pe_prev);\ first->field.pe_prev = elm;\ TX_SET_DIRECT(head, pe_first, elm);\ }\ } while (0) #define POBJ_TAILQ_INSERT_TAIL(head, elm, field) do {\ TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\ if (TOID_IS_NULL((head)->pe_last)) {\ TX_ADD_DIRECT(&elm_ptr->field);\ elm_ptr->field.pe_prev = (head)->pe_last;\ elm_ptr->field.pe_next = (head)->pe_last;\ TX_ADD_DIRECT(head);\ (head)->pe_first = elm;\ (head)->pe_last = elm;\ } else {\ TOID_TYPEOF(elm) *last = D_RW((head)->pe_last);\ TX_ADD_DIRECT(&elm_ptr->field);\ elm_ptr->field.pe_prev = (head)->pe_last;\ elm_ptr->field.pe_next = last->field.pe_next;\ TX_ADD_DIRECT(&last->field.pe_next);\ last->field.pe_next = elm;\ TX_SET_DIRECT(head, pe_last, elm);\ }\ } while (0) #define POBJ_TAILQ_INSERT_AFTER(listelm, elm, field) do {\ TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\ TOID_TYPEOF(listelm) *listelm_ptr = D_RW(listelm);\ TX_ADD_DIRECT(&elm_ptr->field);\ elm_ptr->field.pe_prev = listelm;\ elm_ptr->field.pe_next = listelm_ptr->field.pe_next;\ if (TOID_IS_NULL(listelm_ptr->field.pe_next)) {\ TX_SET_DIRECT(head, pe_last, elm);\ } else {\ TOID_TYPEOF(elm) *next = D_RW(listelm_ptr->field.pe_next);\ TX_ADD_DIRECT(&next->field.pe_prev);\ next->field.pe_prev = elm;\ }\ TX_ADD_DIRECT(&listelm_ptr->field.pe_next);\ listelm_ptr->field.pe_next = elm;\ } while (0) #define POBJ_TAILQ_INSERT_BEFORE(listelm, elm, field) do {\ TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\ TOID_TYPEOF(listelm) *listelm_ptr = D_RW(listelm);\ TX_ADD_DIRECT(&elm_ptr->field);\ elm_ptr->field.pe_next = listelm;\ elm_ptr->field.pe_prev = listelm_ptr->field.pe_prev;\ if (TOID_IS_NULL(listelm_ptr->field.pe_prev)) {\ TX_SET_DIRECT(head, pe_first, elm);\ } else {\ TOID_TYPEOF(elm) *prev = D_RW(listelm_ptr->field.pe_prev);\ TX_ADD_DIRECT(&prev->field.pe_next);\ prev->field.pe_next = elm; \ }\ TX_ADD_DIRECT(&listelm_ptr->field.pe_prev);\ listelm_ptr->field.pe_prev = elm;\ } while (0) #define POBJ_TAILQ_REMOVE(head, elm, field) do {\ TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\ if (TOID_IS_NULL(elm_ptr->field.pe_prev) &&\ TOID_IS_NULL(elm_ptr->field.pe_next)) {\ TX_ADD_DIRECT(head);\ (head)->pe_first = elm_ptr->field.pe_prev;\ (head)->pe_last = elm_ptr->field.pe_next;\ } else {\ if (TOID_IS_NULL(elm_ptr->field.pe_prev)) {\ TX_SET_DIRECT(head, pe_first, elm_ptr->field.pe_next);\ TOID_TYPEOF(elm) *next = D_RW(elm_ptr->field.pe_next);\ TX_ADD_DIRECT(&next->field.pe_prev);\ next->field.pe_prev = elm_ptr->field.pe_prev;\ } else {\ TOID_TYPEOF(elm) *prev = D_RW(elm_ptr->field.pe_prev);\ TX_ADD_DIRECT(&prev->field.pe_next);\ prev->field.pe_next = elm_ptr->field.pe_next;\ }\ if (TOID_IS_NULL(elm_ptr->field.pe_next)) {\ TX_SET_DIRECT(head, pe_last, elm_ptr->field.pe_prev);\ TOID_TYPEOF(elm) *prev = D_RW(elm_ptr->field.pe_prev);\ TX_ADD_DIRECT(&prev->field.pe_next);\ prev->field.pe_next = elm_ptr->field.pe_next;\ } else {\ TOID_TYPEOF(elm) *next = D_RW(elm_ptr->field.pe_next);\ TX_ADD_DIRECT(&next->field.pe_prev);\ next->field.pe_prev = elm_ptr->field.pe_prev;\ }\ }\ } while (0) #define POBJ_TAILQ_REMOVE_FREE(head, elm, field) do {\ POBJ_TAILQ_REMOVE(head, elm, field);\ TX_FREE(elm);\ } while (0) /* * 2 cases: only two elements, the rest possibilities * including that elm is the last one */ #define POBJ_TAILQ_MOVE_ELEMENT_HEAD(head, elm, field) do {\ TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\ if (TOID_EQUALS((head)->pe_last, elm) &&\ TOID_EQUALS(D_RO((head)->pe_first)->field.pe_next, elm)) {\ _POBJ_SWAP_PTR(elm, field);\ _POBJ_SWAP_PTR((head)->pe_first, field);\ POBJ_TAILQ_SWAP_HEAD_TAIL(head, field);\ } else {\ TOID_TYPEOF(elm) *prev = D_RW(elm_ptr->field.pe_prev);\ TX_ADD_DIRECT(&prev->field.pe_next);\ prev->field.pe_next = elm_ptr->field.pe_next;\ if (TOID_EQUALS((head)->pe_last, elm)) {\ TX_SET_DIRECT(head, pe_last, elm_ptr->field.pe_prev);\ } else {\ TOID_TYPEOF(elm) *next = D_RW(elm_ptr->field.pe_next);\ TX_ADD_DIRECT(&next->field.pe_prev);\ next->field.pe_prev = elm_ptr->field.pe_prev;\ }\ TX_ADD_DIRECT(&elm_ptr->field);\ elm_ptr->field.pe_prev = D_RO((head)->pe_first)->field.pe_prev;\ elm_ptr->field.pe_next = (head)->pe_first;\ TOID_TYPEOF(elm) *first = D_RW((head)->pe_first);\ TX_ADD_DIRECT(&first->field.pe_prev);\ first->field.pe_prev = elm;\ TX_SET_DIRECT(head, pe_first, elm);\ }\ } while (0) #define POBJ_TAILQ_MOVE_ELEMENT_TAIL(head, elm, field) do {\ TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\ if (TOID_EQUALS((head)->pe_first, elm) &&\ TOID_EQUALS(D_RO((head)->pe_last)->field.pe_prev, elm)) {\ _POBJ_SWAP_PTR(elm, field);\ _POBJ_SWAP_PTR((head)->pe_last, field);\ POBJ_TAILQ_SWAP_HEAD_TAIL(head, field);\ } else {\ TOID_TYPEOF(elm) *next = D_RW(elm_ptr->field.pe_next);\ TX_ADD_DIRECT(&next->field.pe_prev);\ next->field.pe_prev = elm_ptr->field.pe_prev;\ if (TOID_EQUALS((head)->pe_first, elm)) {\ TX_SET_DIRECT(head, pe_first, elm_ptr->field.pe_next);\ } else { \ TOID_TYPEOF(elm) *prev = D_RW(elm_ptr->field.pe_prev);\ TX_ADD_DIRECT(&prev->field.pe_next);\ prev->field.pe_next = elm_ptr->field.pe_next;\ }\ TX_ADD_DIRECT(&elm_ptr->field);\ elm_ptr->field.pe_prev = (head)->pe_last;\ elm_ptr->field.pe_next = D_RO((head)->pe_last)->field.pe_next;\ __typeof__(elm_ptr) last = D_RW((head)->pe_last);\ TX_ADD_DIRECT(&last->field.pe_next);\ last->field.pe_next = elm;\ TX_SET_DIRECT(head, pe_last, elm);\ } \ } while (0) #endif /* PMEMOBJ_LISTS_H */
11,243
30.762712
66
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/map_hashmap_atomic.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * map_hashmap_atomic.h -- common interface for maps */ #ifndef MAP_HASHMAP_ATOMIC_H #define MAP_HASHMAP_ATOMIC_H #include "map.h" #ifdef __cplusplus extern "C" { #endif extern struct map_ops hashmap_atomic_ops; #define MAP_HASHMAP_ATOMIC (&hashmap_atomic_ops) #ifdef __cplusplus } #endif #endif /* MAP_HASHMAP_ATOMIC_H */
421
15.230769
52
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/kv_server_test.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2015-2016, Intel Corporation set -euo pipefail MAP=ctree PORT=9100 POOL=$1 # start a new server instance ./kv_server $MAP $POOL $PORT & # wait for the server to properly start sleep 1 # insert a new key value pair and disconnect RESP=`echo -e "INSERT foo bar\nGET foo\nBYE" | nc 127.0.0.1 $PORT` echo $RESP # remove previously inserted key value pair and shutdown the server RESP=`echo -e "GET foo\nREMOVE foo\nGET foo\nKILL" | nc 127.0.0.1 $PORT` echo $RESP
537
21.416667
72
sh
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/map_btree.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * map_ctree.h -- common interface for maps */ #ifndef MAP_BTREE_H #define MAP_BTREE_H #include "map.h" #ifdef __cplusplus extern "C" { #endif extern struct map_ops btree_map_ops; #define MAP_BTREE (&btree_map_ops) #ifdef __cplusplus } #endif #endif /* MAP_BTREE_H */
366
13.115385
44
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/map_rtree.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * map_rtree.h -- common interface for maps */ #ifndef MAP_RTREE_H #define MAP_RTREE_H #include "map.h" #ifdef __cplusplus extern "C" { #endif extern struct map_ops rtree_map_ops; #define MAP_RTREE (&rtree_map_ops) #ifdef __cplusplus } #endif #endif /* MAP_RTREE_H */
366
13.115385
44
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/map_skiplist.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * map_skiplist.h -- common interface for maps */ #ifndef MAP_SKIPLIST_H #define MAP_SKIPLIST_H #include "map.h" extern struct map_ops skiplist_map_ops; #define MAP_SKIPLIST (&skiplist_map_ops) #endif /* MAP_SKIPLIST_H */
313
16.444444
46
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/map.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * map.h -- common interface for maps */ #ifndef MAP_H #define MAP_H #include <libpmemobj.h> #ifdef __cplusplus extern "C" { #endif #ifndef MAP_TYPE_OFFSET #define MAP_TYPE_OFFSET 1000 #endif TOID_DECLARE(struct map, MAP_TYPE_OFFSET + 0); struct map; struct map_ctx; struct map_ops { int(*check)(PMEMobjpool *pop, TOID(struct map) map); int(*create)(PMEMobjpool *pop, TOID(struct map) *map, void *arg); int(*destroy)(PMEMobjpool *pop, TOID(struct map) *map); int(*init)(PMEMobjpool *pop, TOID(struct map) map); int(*insert)(PMEMobjpool *pop, TOID(struct map) map, uint64_t key, PMEMoid value); int(*insert_new)(PMEMobjpool *pop, TOID(struct map) map, uint64_t key, size_t size, unsigned type_num, void(*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg); PMEMoid(*remove)(PMEMobjpool *pop, TOID(struct map) map, uint64_t key); int(*remove_free)(PMEMobjpool *pop, TOID(struct map) map, uint64_t key); int(*clear)(PMEMobjpool *pop, TOID(struct map) map); PMEMoid(*get)(PMEMobjpool *pop, TOID(struct map) map, uint64_t key); int(*lookup)(PMEMobjpool *pop, TOID(struct map) map, uint64_t key); int(*foreach)(PMEMobjpool *pop, TOID(struct map) map, int(*cb)(uint64_t key, PMEMoid value, void *arg), void *arg); int(*is_empty)(PMEMobjpool *pop, TOID(struct map) map); size_t(*count)(PMEMobjpool *pop, TOID(struct map) map); int(*cmd)(PMEMobjpool *pop, TOID(struct map) map, unsigned cmd, uint64_t arg); }; struct map_ctx { PMEMobjpool *pop; const struct map_ops *ops; }; struct map_ctx *map_ctx_init(const struct map_ops *ops, PMEMobjpool *pop); void map_ctx_free(struct map_ctx *mapc); int map_check(struct map_ctx *mapc, TOID(struct map) map); int map_create(struct map_ctx *mapc, TOID(struct map) *map, void *arg); int map_destroy(struct map_ctx *mapc, TOID(struct map) *map); int map_init(struct map_ctx *mapc, TOID(struct map) map); int map_insert(struct map_ctx *mapc, TOID(struct map) map, uint64_t key, PMEMoid value); int map_insert_new(struct map_ctx *mapc, TOID(struct map) map, uint64_t key, size_t size, unsigned type_num, void(*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg); PMEMoid map_remove(struct map_ctx *mapc, TOID(struct map) map, uint64_t key); int map_remove_free(struct map_ctx *mapc, TOID(struct map) map, uint64_t key); int map_clear(struct map_ctx *mapc, TOID(struct map) map); PMEMoid map_get(struct map_ctx *mapc, TOID(struct map) map, uint64_t key); int map_lookup(struct map_ctx *mapc, TOID(struct map) map, uint64_t key); int map_foreach(struct map_ctx *mapc, TOID(struct map) map, int(*cb)(uint64_t key, PMEMoid value, void *arg), void *arg); int map_is_empty(struct map_ctx *mapc, TOID(struct map) map); size_t map_count(struct map_ctx *mapc, TOID(struct map) map); int map_cmd(struct map_ctx *mapc, TOID(struct map) map, unsigned cmd, uint64_t arg); #ifdef __cplusplus } #endif #endif /* MAP_H */
3,010
31.728261
78
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/map_hashmap_rp.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018, Intel Corporation */ /* * map_hashmap_rp.h -- common interface for maps */ #ifndef MAP_HASHMAP_RP_H #define MAP_HASHMAP_RP_H #include "map.h" #ifdef __cplusplus extern "C" { #endif extern struct map_ops hashmap_rp_ops; #define MAP_HASHMAP_RP (&hashmap_rp_ops) #ifdef __cplusplus } #endif #endif /* MAP_HASHMAP_RP_H */
388
13.961538
48
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/run.sh
#!/usr/bin/env bash sudo rm -rf /mnt/mem/* sudo ./data_store $1 /mnt/mem/map 10000 > out tx=$(grep "TX" out) tot=$(grep "tottime" out) grep "cp" out > time cp=$(awk '{sum+= $2;} END{print sum;}' time) echo $1$tx echo $1$tot echo $1'cp' $cp
242
17.692308
45
sh
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/map_hashmap_tx.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * map_hashmap_tx.h -- common interface for maps */ #ifndef MAP_HASHMAP_TX_H #define MAP_HASHMAP_TX_H #include "map.h" #ifdef __cplusplus extern "C" { #endif extern struct map_ops hashmap_tx_ops; #define MAP_HASHMAP_TX (&hashmap_tx_ops) #ifdef __cplusplus } #endif #endif /* MAP_HASHMAP_TX_H */
393
14.153846
48
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/map_rbtree.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * map_rbtree.h -- common interface for maps */ #ifndef MAP_RBTREE_H #define MAP_RBTREE_H #include "map.h" #ifdef __cplusplus extern "C" { #endif extern struct map_ops rbtree_map_ops; #define MAP_RBTREE (&rbtree_map_ops) #ifdef __cplusplus } #endif #endif /* MAP_RBTREE_H */
373
13.384615
44
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/kv_protocol.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2016, Intel Corporation */ /* * kv_protocol.h -- kv store text protocol */ #ifndef KV_PROTOCOL_H #define KV_PROTOCOL_H #include <stdint.h> #define MAX_KEY_LEN 255 /* * All client messages must start with a valid message token and be terminated * by a newline character ('\n'). The message parser is case-sensitive. * * Server responds with newline terminated string literals. * If invalid message token is received RESP_MSG_UNKNOWN is sent. */ enum kv_cmsg { /* * INSERT client message * Syntax: INSERT [key] [value]\n * * The key is limited to 255 characters, the size of a value is limited * by the pmemobj maximum allocation size (~16 gigabytes). * * Operation adds a new key value pair to the map. * Returns RESP_MSG_SUCCESS if successful or RESP_MSG_FAIL otherwise. */ CMSG_INSERT, /* * REMOVE client message * Syntax: REMOVE [key]\n * * Operation removes a key value pair from the map. * Returns RESP_MSG_SUCCESS if successful or RESP_MSG_FAIL otherwise. */ CMSG_REMOVE, /* * GET client message * Syntax: GET [key]\n * * Operation retrieves a key value pair from the map. * Returns the value if found or RESP_MSG_NULL otherwise. */ CMSG_GET, /* * BYE client message * Syntax: BYE\n * * Operation terminates the client connection. * No return value. */ CMSG_BYE, /* * KILL client message * Syntax: KILL\n * * Operation terminates the client connection and gracefully shutdowns * the server. * No return value. */ CMSG_KILL, MAX_CMSG }; enum resp_messages { RESP_MSG_SUCCESS, RESP_MSG_FAIL, RESP_MSG_NULL, RESP_MSG_UNKNOWN, MAX_RESP_MSG }; static const char *resp_msg[MAX_RESP_MSG] = { [RESP_MSG_SUCCESS] = "SUCCESS\n", [RESP_MSG_FAIL] = "FAIL\n", [RESP_MSG_NULL] = "NULL\n", [RESP_MSG_UNKNOWN] = "UNKNOWN\n" }; static const char *kv_cmsg_token[MAX_CMSG] = { [CMSG_INSERT] = "INSERT", [CMSG_REMOVE] = "REMOVE", [CMSG_GET] = "GET", [CMSG_BYE] = "BYE", [CMSG_KILL] = "KILL" }; #endif /* KV_PROTOCOL_H */
2,082
19.623762
78
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/map/map_ctree.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * map_ctree.h -- common interface for maps */ #ifndef MAP_CTREE_H #define MAP_CTREE_H #include "map.h" #ifdef __cplusplus extern "C" { #endif extern struct map_ops ctree_map_ops; #define MAP_CTREE (&ctree_map_ops) #ifdef __cplusplus } #endif #endif /* MAP_CTREE_H */
366
13.115385
44
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/string_store_tx/layout.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * layout.h -- example from introduction part 2 */ #define LAYOUT_NAME "intro_2" #define MAX_BUF_LEN 10 struct my_root { char buf[MAX_BUF_LEN]; };
241
16.285714
47
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/string_store/layout.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * layout.h -- example from introduction part 1 */ #define LAYOUT_NAME "intro_1" #define MAX_BUF_LEN 10 struct my_root { size_t len; char buf[MAX_BUF_LEN]; };
254
16
47
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/tree_map/ctree_map.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * ctree_map.h -- TreeMap sorted collection implementation */ #ifndef CTREE_MAP_H #define CTREE_MAP_H #include <libpmemobj.h> #ifndef CTREE_MAP_TYPE_OFFSET #define CTREE_MAP_TYPE_OFFSET 1008 #endif struct ctree_map; TOID_DECLARE(struct ctree_map, CTREE_MAP_TYPE_OFFSET + 0); int ctree_map_check(PMEMobjpool *pop, TOID(struct ctree_map) map); int ctree_map_create(PMEMobjpool *pop, TOID(struct ctree_map) *map, void *arg); int ctree_map_destroy(PMEMobjpool *pop, TOID(struct ctree_map) *map); int ctree_map_insert(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key, PMEMoid value); int ctree_map_insert_new(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key, size_t size, unsigned type_num, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg); PMEMoid ctree_map_remove(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key); int ctree_map_remove_free(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key); int ctree_map_clear(PMEMobjpool *pop, TOID(struct ctree_map) map); PMEMoid ctree_map_get(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key); int ctree_map_lookup(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key); int ctree_map_foreach(PMEMobjpool *pop, TOID(struct ctree_map) map, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg); int ctree_map_is_empty(PMEMobjpool *pop, TOID(struct ctree_map) map); #endif /* CTREE_MAP_H */
1,523
34.44186
79
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/tree_map/rtree_map.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * rtree_map.h -- Radix TreeMap collection implementation */ #ifndef RTREE_MAP_H #define RTREE_MAP_H #include <libpmemobj.h> #ifndef RTREE_MAP_TYPE_OFFSET #define RTREE_MAP_TYPE_OFFSET 1020 #endif struct rtree_map; TOID_DECLARE(struct rtree_map, RTREE_MAP_TYPE_OFFSET + 0); int rtree_map_check(PMEMobjpool *pop, TOID(struct rtree_map) map); int rtree_map_create(PMEMobjpool *pop, TOID(struct rtree_map) *map, void *arg); int rtree_map_destroy(PMEMobjpool *pop, TOID(struct rtree_map) *map); int rtree_map_insert(PMEMobjpool *pop, TOID(struct rtree_map) map, const unsigned char *key, uint64_t key_size, PMEMoid value); int rtree_map_insert_new(PMEMobjpool *pop, TOID(struct rtree_map) map, const unsigned char *key, uint64_t key_size, size_t size, unsigned type_num, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg); PMEMoid rtree_map_remove(PMEMobjpool *pop, TOID(struct rtree_map) map, const unsigned char *key, uint64_t key_size); int rtree_map_remove_free(PMEMobjpool *pop, TOID(struct rtree_map) map, const unsigned char *key, uint64_t key_size); int rtree_map_clear(PMEMobjpool *pop, TOID(struct rtree_map) map); PMEMoid rtree_map_get(PMEMobjpool *pop, TOID(struct rtree_map) map, const unsigned char *key, uint64_t key_size); int rtree_map_lookup(PMEMobjpool *pop, TOID(struct rtree_map) map, const unsigned char *key, uint64_t key_size); int rtree_map_foreach(PMEMobjpool *pop, TOID(struct rtree_map) map, int (*cb)(const unsigned char *key, uint64_t key_size, PMEMoid value, void *arg), void *arg); int rtree_map_is_empty(PMEMobjpool *pop, TOID(struct rtree_map) map); #endif /* RTREE_MAP_H */
1,739
36.826087
79
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/tree_map/rbtree_map.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * rbtree_map.h -- TreeMap sorted collection implementation */ #ifndef RBTREE_MAP_H #define RBTREE_MAP_H #include <libpmemobj.h> #ifndef RBTREE_MAP_TYPE_OFFSET #define RBTREE_MAP_TYPE_OFFSET 1016 #endif struct rbtree_map; TOID_DECLARE(struct rbtree_map, RBTREE_MAP_TYPE_OFFSET + 0); int rbtree_map_check(PMEMobjpool *pop, TOID(struct rbtree_map) map); int rbtree_map_create(PMEMobjpool *pop, TOID(struct rbtree_map) *map, void *arg); int rbtree_map_destroy(PMEMobjpool *pop, TOID(struct rbtree_map) *map); int rbtree_map_insert(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key, PMEMoid value); int rbtree_map_insert_new(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key, size_t size, unsigned type_num, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg); PMEMoid rbtree_map_remove(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key); int rbtree_map_remove_free(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key); int rbtree_map_clear(PMEMobjpool *pop, TOID(struct rbtree_map) map); PMEMoid rbtree_map_get(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key); int rbtree_map_lookup(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key); int rbtree_map_foreach(PMEMobjpool *pop, TOID(struct rbtree_map) map, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg); int rbtree_map_is_empty(PMEMobjpool *pop, TOID(struct rbtree_map) map); #endif /* RBTREE_MAP_H */
1,557
34.409091
73
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/examples/libpmemobj/tree_map/btree_map.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * btree_map.h -- TreeMap sorted collection implementation */ #ifndef BTREE_MAP_H #define BTREE_MAP_H #include <libpmemobj.h> #ifndef BTREE_MAP_TYPE_OFFSET #define BTREE_MAP_TYPE_OFFSET 1012 #endif struct btree_map; TOID_DECLARE(struct btree_map, BTREE_MAP_TYPE_OFFSET + 0); int btree_map_check(PMEMobjpool *pop, TOID(struct btree_map) map); int btree_map_create(PMEMobjpool *pop, TOID(struct btree_map) *map, void *arg); int btree_map_destroy(PMEMobjpool *pop, TOID(struct btree_map) *map); int btree_map_insert(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key, PMEMoid value); int btree_map_insert_new(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key, size_t size, unsigned type_num, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg); PMEMoid btree_map_remove(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key); int btree_map_remove_free(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key); int btree_map_clear(PMEMobjpool *pop, TOID(struct btree_map) map); PMEMoid btree_map_get(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key); int btree_map_lookup(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key); int btree_map_foreach(PMEMobjpool *pop, TOID(struct btree_map) map, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg); int btree_map_is_empty(PMEMobjpool *pop, TOID(struct btree_map) map); #endif /* BTREE_MAP_H */
1,523
34.44186
79
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/librpmem/rpmem_ssh.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * rpmem_ssh.h -- rpmem ssh transport layer header file */ #ifndef RPMEM_SSH_H #define RPMEM_SSH_H 1 #include <stddef.h> #ifdef __cplusplus extern "C" { #endif struct rpmem_ssh; struct rpmem_ssh *rpmem_ssh_open(const struct rpmem_target_info *info); struct rpmem_ssh *rpmem_ssh_exec(const struct rpmem_target_info *info, ...); struct rpmem_ssh *rpmem_ssh_execv(const struct rpmem_target_info *info, const char **argv); int rpmem_ssh_close(struct rpmem_ssh *rps); int rpmem_ssh_send(struct rpmem_ssh *rps, const void *buff, size_t len); int rpmem_ssh_recv(struct rpmem_ssh *rps, void *buff, size_t len); int rpmem_ssh_monitor(struct rpmem_ssh *rps, int nonblock); const char *rpmem_ssh_strerror(struct rpmem_ssh *rps, int oerrno); #ifdef __cplusplus } #endif #endif
866
23.771429
76
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/librpmem/rpmem_fip.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * rpmem_fip.h -- rpmem libfabric provider module header file */ #ifndef RPMEM_FIP_H #define RPMEM_FIP_H #include <stdint.h> #include <netinet/in.h> #include <sys/types.h> #include <sys/socket.h> #ifdef __cplusplus extern "C" { #endif struct rpmem_fip; struct rpmem_fip_attr { enum rpmem_provider provider; size_t max_wq_size; enum rpmem_persist_method persist_method; void *laddr; size_t size; size_t buff_size; unsigned nlanes; void *raddr; uint64_t rkey; }; struct rpmem_fip *rpmem_fip_init(const char *node, const char *service, struct rpmem_fip_attr *attr, unsigned *nlanes); void rpmem_fip_fini(struct rpmem_fip *fip); int rpmem_fip_connect(struct rpmem_fip *fip); int rpmem_fip_close(struct rpmem_fip *fip); int rpmem_fip_process_start(struct rpmem_fip *fip); int rpmem_fip_process_stop(struct rpmem_fip *fip); int rpmem_fip_flush(struct rpmem_fip *fip, size_t offset, size_t len, unsigned lane, unsigned flags); int rpmem_fip_drain(struct rpmem_fip *fip, unsigned lane); int rpmem_fip_persist(struct rpmem_fip *fip, size_t offset, size_t len, unsigned lane, unsigned flags); int rpmem_fip_read(struct rpmem_fip *fip, void *buff, size_t len, size_t off, unsigned lane); void rpmem_fip_probe_fork_safety(void); size_t rpmem_fip_get_wq_size(struct rpmem_fip *fip); #ifdef __cplusplus } #endif #endif
1,427
22.032258
71
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/librpmem/rpmem.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * rpmem.h -- internal definitions for librpmem */ #include "alloc.h" #include "fault_injection.h" #define RPMEM_LOG_PREFIX "librpmem" #define RPMEM_LOG_LEVEL_VAR "RPMEM_LOG_LEVEL" #define RPMEM_LOG_FILE_VAR "RPMEM_LOG_FILE" #if FAULT_INJECTION void rpmem_inject_fault_at(enum pmem_allocation_type type, int nth, const char *at); int rpmem_fault_injection_enabled(void); #else static inline void rpmem_inject_fault_at(enum pmem_allocation_type type, int nth, const char *at) { abort(); } static inline int rpmem_fault_injection_enabled(void) { return 0; } #endif
672
18.228571
62
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/librpmem/rpmem_util.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * rpmem_util.h -- util functions for librpmem header file */ #ifndef RPMEM_UTIL_H #define RPMEM_UTIL_H 1 #ifdef __cplusplus extern "C" { #endif enum { LERR = 1, LWARN = 2, LNOTICE = 3, LINFO = 4, _LDBG = 10, }; #define RPMEM_LOG(level, fmt, args...) LOG(L##level, fmt, ## args) #define RPMEM_DBG(fmt, args...) LOG(_LDBG, fmt, ## args) #define RPMEM_FATAL(fmt, args...) FATAL(fmt, ## args) #define RPMEM_ASSERT(cond) ASSERT(cond) #define RPMEM_PERSIST_FLAGS_ALL RPMEM_PERSIST_RELAXED #define RPMEM_PERSIST_FLAGS_MASK ((unsigned)(~RPMEM_PERSIST_FLAGS_ALL)) #define RPMEM_FLUSH_FLAGS_ALL RPMEM_FLUSH_RELAXED #define RPMEM_FLUSH_FLAGS_MASK ((unsigned)(~RPMEM_FLUSH_FLAGS_ALL)) const char *rpmem_util_proto_errstr(enum rpmem_err err); int rpmem_util_proto_errno(enum rpmem_err err); void rpmem_util_cmds_init(void); void rpmem_util_cmds_fini(void); const char *rpmem_util_cmd_get(void); void rpmem_util_get_env_max_nlanes(unsigned *max_nlanes); void rpmem_util_get_env_wq_size(unsigned *wq_size); #ifdef __cplusplus } #endif #endif
1,137
22.708333
71
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/librpmem/rpmem_obc.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * rpmem_obc.h -- rpmem out-of-band connection client header file */ #ifndef RPMEM_OBC_H #define RPMEM_OBC_H 1 #include <sys/types.h> #include <sys/socket.h> #include "librpmem.h" #ifdef __cplusplus extern "C" { #endif struct rpmem_obc; struct rpmem_obc *rpmem_obc_init(void); void rpmem_obc_fini(struct rpmem_obc *rpc); int rpmem_obc_connect(struct rpmem_obc *rpc, const struct rpmem_target_info *info); int rpmem_obc_disconnect(struct rpmem_obc *rpc); int rpmem_obc_monitor(struct rpmem_obc *rpc, int nonblock); int rpmem_obc_create(struct rpmem_obc *rpc, const struct rpmem_req_attr *req, struct rpmem_resp_attr *res, const struct rpmem_pool_attr *pool_attr); int rpmem_obc_open(struct rpmem_obc *rpc, const struct rpmem_req_attr *req, struct rpmem_resp_attr *res, struct rpmem_pool_attr *pool_attr); int rpmem_obc_set_attr(struct rpmem_obc *rpc, const struct rpmem_pool_attr *pool_attr); int rpmem_obc_close(struct rpmem_obc *rpc, int flags); #ifdef __cplusplus } #endif #endif
1,100
21.9375
65
h