repo
stringlengths
1
152
file
stringlengths
15
205
code
stringlengths
0
41.6M
file_length
int64
0
41.6M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
90 values
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/msvc_compat/strings.h
#ifndef strings_h #define strings_h /* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided * for both */ #ifdef _MSC_VER # include <intrin.h> # pragma intrinsic(_BitScanForward) static __forceinline int ffsl(long x) { unsigned long i; if (_BitScanForward(&i, x)) return (i + 1); return (0); } static __forceinline int ffs(int x) { return (ffsl(x)); } # ifdef _M_X64 # pragma intrinsic(_BitScanForward64) # endif static __forceinline int ffsll(unsigned __int64 x) { unsigned long i; #ifdef _M_X64 if (_BitScanForward64(&i, x)) return (i + 1); return (0); #else // Fallback for 32-bit build where 64-bit version not available // assuming little endian union { unsigned __int64 ll; unsigned long l[2]; } s; s.ll = x; if (_BitScanForward(&i, s.l[0])) return (i + 1); else if(_BitScanForward(&i, s.l[1])) return (i + 33); return (0); #endif } #else # define ffsll(x) __builtin_ffsll(x) # define ffsl(x) __builtin_ffsl(x) # define ffs(x) __builtin_ffs(x) #endif #endif /* strings_h */
1,047
16.466667
72
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/msvc_compat/C99/stdbool.h
#ifndef stdbool_h #define stdbool_h #include <wtypes.h> /* MSVC doesn't define _Bool or bool in C, but does have BOOL */ /* Note this doesn't pass autoconf's test because (bool) 0.5 != true */ /* Clang-cl uses MSVC headers, so needs msvc_compat, but has _Bool as * a built-in type. */ #ifndef __clang__ typedef BOOL _Bool; #endif #define bool _Bool #define true 1 #define false 0 #define __bool_true_false_are_defined 1 #endif /* stdbool_h */
449
20.428571
71
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/msvc_compat/C99/stdint.h
// ISO C9x compliant stdint.h for Microsoft Visual Studio // Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 // // Copyright (c) 2006-2008 Alexander Chemeris // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. The name of the author may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO // EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////////// #ifndef _MSC_VER // [ #error "Use this header only with Microsoft Visual C++ compilers!" #endif // _MSC_VER ] #ifndef _MSC_STDINT_H_ // [ #define _MSC_STDINT_H_ #if _MSC_VER > 1000 #pragma once #endif #include <limits.h> // For Visual Studio 6 in C++ mode and for many Visual Studio versions when // compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}' // or compiler give many errors like this: // error C2733: second C linkage of overloaded function 'wmemchr' not allowed #ifdef __cplusplus extern "C" { #endif # include <wchar.h> #ifdef __cplusplus } #endif // Define _W64 macros to mark types changing their size, like intptr_t. #ifndef _W64 # if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 # define _W64 __w64 # else # define _W64 # endif #endif // 7.18.1 Integer types // 7.18.1.1 Exact-width integer types // Visual Studio 6 and Embedded Visual C++ 4 doesn't // realize that, e.g. char has the same size as __int8 // so we give up on __intX for them. #if (_MSC_VER < 1300) typedef signed char int8_t; typedef signed short int16_t; typedef signed int int32_t; typedef unsigned char uint8_t; typedef unsigned short uint16_t; typedef unsigned int uint32_t; #else typedef signed __int8 int8_t; typedef signed __int16 int16_t; typedef signed __int32 int32_t; typedef unsigned __int8 uint8_t; typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; #endif typedef signed __int64 int64_t; typedef unsigned __int64 uint64_t; // 7.18.1.2 Minimum-width integer types typedef int8_t int_least8_t; typedef int16_t int_least16_t; typedef int32_t int_least32_t; typedef int64_t int_least64_t; typedef uint8_t uint_least8_t; typedef uint16_t uint_least16_t; typedef uint32_t uint_least32_t; typedef uint64_t uint_least64_t; // 7.18.1.3 Fastest minimum-width integer types typedef int8_t int_fast8_t; typedef int16_t int_fast16_t; typedef int32_t int_fast32_t; typedef int64_t int_fast64_t; typedef uint8_t uint_fast8_t; typedef uint16_t uint_fast16_t; typedef uint32_t uint_fast32_t; typedef uint64_t uint_fast64_t; // 7.18.1.4 Integer types capable of holding object pointers #ifdef _WIN64 // [ typedef signed __int64 intptr_t; typedef unsigned __int64 uintptr_t; #else // _WIN64 ][ typedef _W64 signed int intptr_t; typedef _W64 unsigned int uintptr_t; #endif // _WIN64 ] // 7.18.1.5 Greatest-width integer types typedef int64_t intmax_t; typedef uint64_t uintmax_t; // 7.18.2 Limits of specified-width integer types #if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 // 7.18.2.1 Limits of exact-width integer types #define INT8_MIN ((int8_t)_I8_MIN) #define INT8_MAX _I8_MAX #define INT16_MIN ((int16_t)_I16_MIN) #define INT16_MAX _I16_MAX #define INT32_MIN ((int32_t)_I32_MIN) #define INT32_MAX _I32_MAX #define INT64_MIN ((int64_t)_I64_MIN) #define INT64_MAX _I64_MAX #define UINT8_MAX _UI8_MAX #define UINT16_MAX _UI16_MAX #define UINT32_MAX _UI32_MAX #define UINT64_MAX _UI64_MAX // 7.18.2.2 Limits of minimum-width integer types #define INT_LEAST8_MIN INT8_MIN #define INT_LEAST8_MAX INT8_MAX #define INT_LEAST16_MIN INT16_MIN #define INT_LEAST16_MAX INT16_MAX #define INT_LEAST32_MIN INT32_MIN #define INT_LEAST32_MAX INT32_MAX #define INT_LEAST64_MIN INT64_MIN #define INT_LEAST64_MAX INT64_MAX #define UINT_LEAST8_MAX UINT8_MAX #define UINT_LEAST16_MAX UINT16_MAX #define UINT_LEAST32_MAX UINT32_MAX #define UINT_LEAST64_MAX UINT64_MAX // 7.18.2.3 Limits of fastest minimum-width integer types #define INT_FAST8_MIN INT8_MIN #define INT_FAST8_MAX INT8_MAX #define INT_FAST16_MIN INT16_MIN #define INT_FAST16_MAX INT16_MAX #define INT_FAST32_MIN INT32_MIN #define INT_FAST32_MAX INT32_MAX #define INT_FAST64_MIN INT64_MIN #define INT_FAST64_MAX INT64_MAX #define UINT_FAST8_MAX UINT8_MAX #define UINT_FAST16_MAX UINT16_MAX #define UINT_FAST32_MAX UINT32_MAX #define UINT_FAST64_MAX UINT64_MAX // 7.18.2.4 Limits of integer types capable of holding object pointers #ifdef _WIN64 // [ # define INTPTR_MIN INT64_MIN # define INTPTR_MAX INT64_MAX # define UINTPTR_MAX UINT64_MAX #else // _WIN64 ][ # define INTPTR_MIN INT32_MIN # define INTPTR_MAX INT32_MAX # define UINTPTR_MAX UINT32_MAX #endif // _WIN64 ] // 7.18.2.5 Limits of greatest-width integer types #define INTMAX_MIN INT64_MIN #define INTMAX_MAX INT64_MAX #define UINTMAX_MAX UINT64_MAX // 7.18.3 Limits of other integer types #ifdef _WIN64 // [ # define PTRDIFF_MIN _I64_MIN # define PTRDIFF_MAX _I64_MAX #else // _WIN64 ][ # define PTRDIFF_MIN _I32_MIN # define PTRDIFF_MAX _I32_MAX #endif // _WIN64 ] #define SIG_ATOMIC_MIN INT_MIN #define SIG_ATOMIC_MAX INT_MAX #ifndef SIZE_MAX // [ # ifdef _WIN64 // [ # define SIZE_MAX _UI64_MAX # else // _WIN64 ][ # define SIZE_MAX _UI32_MAX # endif // _WIN64 ] #endif // SIZE_MAX ] // WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h> #ifndef WCHAR_MIN // [ # define WCHAR_MIN 0 #endif // WCHAR_MIN ] #ifndef WCHAR_MAX // [ # define WCHAR_MAX _UI16_MAX #endif // WCHAR_MAX ] #define WINT_MIN 0 #define WINT_MAX _UI16_MAX #endif // __STDC_LIMIT_MACROS ] // 7.18.4 Limits of other integer types #if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 // 7.18.4.1 Macros for minimum-width integer constants #define INT8_C(val) val##i8 #define INT16_C(val) val##i16 #define INT32_C(val) val##i32 #define INT64_C(val) val##i64 #define UINT8_C(val) val##ui8 #define UINT16_C(val) val##ui16 #define UINT32_C(val) val##ui32 #define UINT64_C(val) val##ui64 // 7.18.4.2 Macros for greatest-width integer constants #define INTMAX_C INT64_C #define UINTMAX_C UINT64_C #endif // __STDC_CONSTANT_MACROS ] #endif // _MSC_STDINT_H_ ]
7,728
30.165323
122
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/jemalloc_rename.sh
#!/bin/sh public_symbols_txt=$1 cat <<EOF /* * Name mangling for public symbols is controlled by --with-mangling and * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by * these macro definitions. */ #ifndef JEMALLOC_NO_RENAME EOF for nm in `cat ${public_symbols_txt}` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'` echo "# define je_${n} ${m}" done cat <<EOF #endif EOF
460
19.043478
79
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/jemalloc.sh
#!/bin/sh objroot=$1 cat <<EOF #ifndef JEMALLOC_H_ #define JEMALLOC_H_ #ifdef __cplusplus extern "C" { #endif EOF for hdr in jemalloc_defs.h jemalloc_rename.h jemalloc_macros.h \ jemalloc_protos.h jemalloc_typedefs.h jemalloc_mangle.h ; do cat "${objroot}include/jemalloc/${hdr}" \ | grep -v 'Generated from .* by configure\.' \ | sed -e 's/^#define /#define /g' \ | sed -e 's/ $//g' echo done cat <<EOF #ifdef __cplusplus } #endif #endif /* JEMALLOC_H_ */ EOF
499
16.241379
71
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/jemalloc_mangle.sh
#!/bin/sh public_symbols_txt=$1 symbol_prefix=$2 cat <<EOF /* * By default application code must explicitly refer to mangled symbol names, * so that it is possible to use jemalloc in conjunction with another allocator * in the same application. Define JEMALLOC_MANGLE in order to cause automatic * name mangling that matches the API prefixing that happened as a result of * --with-mangling and/or --with-jemalloc-prefix configuration settings. */ #ifdef JEMALLOC_MANGLE # ifndef JEMALLOC_NO_DEMANGLE # define JEMALLOC_NO_DEMANGLE # endif EOF for nm in `cat ${public_symbols_txt}` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` echo "# define ${n} ${symbol_prefix}${n}" done cat <<EOF #endif /* * The ${symbol_prefix}* macros can be used as stable alternative names for the * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily * meant for use in jemalloc itself, but it can be used by application code to * provide isolation from the name mangling specified via --with-mangling * and/or --with-jemalloc-prefix. */ #ifndef JEMALLOC_NO_DEMANGLE EOF for nm in `cat ${public_symbols_txt}` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` echo "# undef ${symbol_prefix}${n}" done cat <<EOF #endif EOF
1,258
26.369565
79
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/public_unnamespace.sh
#!/bin/sh for nm in `cat $1` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` echo "#undef je_${n}" done
111
15
46
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/mutex.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct malloc_mutex_s malloc_mutex_t; #ifdef _WIN32 # define MALLOC_MUTEX_INITIALIZER #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) # define MALLOC_MUTEX_INITIALIZER \ {OS_UNFAIR_LOCK_INIT, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} #elif (defined(JEMALLOC_OSSPIN)) # define MALLOC_MUTEX_INITIALIZER {0, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} #elif (defined(JEMALLOC_MUTEX_INIT_CB)) # define MALLOC_MUTEX_INITIALIZER \ {PTHREAD_MUTEX_INITIALIZER, NULL, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} #else # if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \ defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)) # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP # define MALLOC_MUTEX_INITIALIZER \ {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, \ WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} # else # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT # define MALLOC_MUTEX_INITIALIZER \ {PTHREAD_MUTEX_INITIALIZER, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} # endif #endif #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct malloc_mutex_s { #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 SRWLOCK lock; # else CRITICAL_SECTION lock; # endif #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) os_unfair_lock lock; #elif (defined(JEMALLOC_OSSPIN)) OSSpinLock lock; #elif (defined(JEMALLOC_MUTEX_INIT_CB)) pthread_mutex_t lock; malloc_mutex_t *postponed_next; #else pthread_mutex_t lock; #endif witness_t witness; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_LAZY_LOCK extern bool isthreaded; #else # undef isthreaded /* Undo private_namespace.h definition. */ # define isthreaded true #endif bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank); void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex); void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex); void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex); bool malloc_mutex_boot(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex); void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex); void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex); void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) JEMALLOC_INLINE void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) { if (isthreaded) { witness_assert_not_owner(tsdn, &mutex->witness); #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 AcquireSRWLockExclusive(&mutex->lock); # else EnterCriticalSection(&mutex->lock); # endif #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) os_unfair_lock_lock(&mutex->lock); #elif (defined(JEMALLOC_OSSPIN)) OSSpinLockLock(&mutex->lock); #else pthread_mutex_lock(&mutex->lock); #endif witness_lock(tsdn, &mutex->witness); } } JEMALLOC_INLINE void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) { if (isthreaded) { witness_unlock(tsdn, &mutex->witness); #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 ReleaseSRWLockExclusive(&mutex->lock); # else LeaveCriticalSection(&mutex->lock); # endif #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) os_unfair_lock_unlock(&mutex->lock); #elif (defined(JEMALLOC_OSSPIN)) OSSpinLockUnlock(&mutex->lock); #else pthread_mutex_unlock(&mutex->lock); #endif } } JEMALLOC_INLINE void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) { if (isthreaded) witness_assert_owner(tsdn, &mutex->witness); } JEMALLOC_INLINE void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) { if (isthreaded) witness_assert_not_owner(tsdn, &mutex->witness); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
4,264
27.817568
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/ctl.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct ctl_node_s ctl_node_t; typedef struct ctl_named_node_s ctl_named_node_t; typedef struct ctl_indexed_node_s ctl_indexed_node_t; typedef struct ctl_arena_stats_s ctl_arena_stats_t; typedef struct ctl_stats_s ctl_stats_t; #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct ctl_node_s { bool named; }; struct ctl_named_node_s { struct ctl_node_s node; const char *name; /* If (nchildren == 0), this is a terminal node. */ unsigned nchildren; const ctl_node_t *children; int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *, size_t); }; struct ctl_indexed_node_s { struct ctl_node_s node; const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t, size_t); }; struct ctl_arena_stats_s { bool initialized; unsigned nthreads; const char *dss; ssize_t lg_dirty_mult; ssize_t decay_time; size_t pactive; size_t pdirty; /* The remainder are only populated if config_stats is true. */ arena_stats_t astats; /* Aggregate stats for small size classes, based on bin stats. */ size_t allocated_small; uint64_t nmalloc_small; uint64_t ndalloc_small; uint64_t nrequests_small; malloc_bin_stats_t bstats[NBINS]; malloc_large_stats_t *lstats; /* nlclasses elements. */ malloc_huge_stats_t *hstats; /* nhclasses elements. */ }; struct ctl_stats_s { size_t allocated; size_t active; size_t metadata; size_t resident; size_t mapped; size_t retained; unsigned narenas; ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp); int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); bool ctl_boot(void); void ctl_prefork(tsdn_t *tsdn); void ctl_postfork_parent(tsdn_t *tsdn); void ctl_postfork_child(tsdn_t *tsdn); #define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ != 0) { \ malloc_printf( \ "<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \ name); \ abort(); \ } \ } while (0) #define xmallctlnametomib(name, mibp, miblenp) do { \ if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ malloc_printf("<jemalloc>: Failure in " \ "xmallctlnametomib(\"%s\", ...)\n", name); \ abort(); \ } \ } while (0) #define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ newlen) != 0) { \ malloc_write( \ "<jemalloc>: Failure in xmallctlbymib()\n"); \ abort(); \ } \ } while (0) #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
3,389
27.487395
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/arena.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS) /* Maximum number of regions in one run. */ #define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN) #define RUN_MAXREGS (1U << LG_RUN_MAXREGS) /* * Minimum redzone size. Redzones may be larger than this if necessary to * preserve region alignment. */ #define REDZONE_MINSIZE 16 /* * The minimum ratio of active:dirty pages per arena is computed as: * * (nactive >> lg_dirty_mult) >= ndirty * * So, supposing that lg_dirty_mult is 3, there can be no less than 8 times as * many active pages as dirty pages. */ #define LG_DIRTY_MULT_DEFAULT 3 typedef enum { purge_mode_ratio = 0, purge_mode_decay = 1, purge_mode_limit = 2 } purge_mode_t; #define PURGE_DEFAULT purge_mode_ratio /* Default decay time in seconds. */ #define DECAY_TIME_DEFAULT 10 /* Number of event ticks between time checks. */ #define DECAY_NTICKS_PER_UPDATE 1000 typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t; typedef struct arena_avail_links_s arena_avail_links_t; typedef struct arena_run_s arena_run_t; typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t; typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t; typedef struct arena_chunk_s arena_chunk_t; typedef struct arena_bin_info_s arena_bin_info_t; typedef struct arena_decay_s arena_decay_t; typedef struct arena_bin_s arena_bin_t; typedef struct arena_s arena_t; typedef struct arena_tdata_s arena_tdata_t; #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #ifdef JEMALLOC_ARENA_STRUCTS_A struct arena_run_s { /* Index of bin this run is associated with. */ szind_t binind; /* Number of free regions in run. */ unsigned nfree; /* Per region allocated/deallocated bitmap. */ bitmap_t bitmap[BITMAP_GROUPS_MAX]; }; /* Each element of the chunk map corresponds to one page within the chunk. */ struct arena_chunk_map_bits_s { /* * Run address (or size) and various flags are stored together. The bit * layout looks like (assuming 32-bit system): * * ???????? ???????? ???nnnnn nnndumla * * ? : Unallocated: Run address for first/last pages, unset for internal * pages. * Small: Run page offset. * Large: Run page count for first page, unset for trailing pages. * n : binind for small size class, BININD_INVALID for large size class. * d : dirty? * u : unzeroed? * m : decommitted? * l : large? * a : allocated? * * Following are example bit patterns for the three types of runs. * * p : run page offset * s : run size * n : binind for size class; large objects set these to BININD_INVALID * x : don't care * - : 0 * + : 1 * [DUMLA] : bit set * [dumla] : bit unset * * Unallocated (clean): * ssssssss ssssssss sss+++++ +++dum-a * xxxxxxxx xxxxxxxx xxxxxxxx xxx-Uxxx * ssssssss ssssssss sss+++++ +++dUm-a * * Unallocated (dirty): * ssssssss ssssssss sss+++++ +++D-m-a * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx * ssssssss ssssssss sss+++++ +++D-m-a * * Small: * pppppppp pppppppp pppnnnnn nnnd---A * pppppppp pppppppp pppnnnnn nnn----A * pppppppp pppppppp pppnnnnn nnnd---A * * Large: * ssssssss ssssssss sss+++++ +++D--LA * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx * -------- -------- ---+++++ +++D--LA * * Large (sampled, size <= LARGE_MINCLASS): * ssssssss ssssssss sssnnnnn nnnD--LA * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx * -------- -------- ---+++++ +++D--LA * * Large (not sampled, size == LARGE_MINCLASS): * ssssssss ssssssss sss+++++ +++D--LA * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx * -------- -------- ---+++++ +++D--LA */ size_t bits; #define CHUNK_MAP_ALLOCATED ((size_t)0x01U) #define CHUNK_MAP_LARGE ((size_t)0x02U) #define CHUNK_MAP_STATE_MASK ((size_t)0x3U) #define CHUNK_MAP_DECOMMITTED ((size_t)0x04U) #define CHUNK_MAP_UNZEROED ((size_t)0x08U) #define CHUNK_MAP_DIRTY ((size_t)0x10U) #define CHUNK_MAP_FLAGS_MASK ((size_t)0x1cU) #define CHUNK_MAP_BININD_SHIFT 5 #define BININD_INVALID ((size_t)0xffU) #define CHUNK_MAP_BININD_MASK (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) #define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK #define CHUNK_MAP_RUNIND_SHIFT (CHUNK_MAP_BININD_SHIFT + 8) #define CHUNK_MAP_SIZE_SHIFT (CHUNK_MAP_RUNIND_SHIFT - LG_PAGE) #define CHUNK_MAP_SIZE_MASK \ (~(CHUNK_MAP_BININD_MASK | CHUNK_MAP_FLAGS_MASK | CHUNK_MAP_STATE_MASK)) }; struct arena_runs_dirty_link_s { qr(arena_runs_dirty_link_t) rd_link; }; /* * Each arena_chunk_map_misc_t corresponds to one page within the chunk, just * like arena_chunk_map_bits_t. Two separate arrays are stored within each * chunk header in order to improve cache locality. */ struct arena_chunk_map_misc_s { /* * Linkage for run heaps. There are two disjoint uses: * * 1) arena_t's runs_avail heaps. * 2) arena_run_t conceptually uses this linkage for in-use non-full * runs, rather than directly embedding linkage. */ phn(arena_chunk_map_misc_t) ph_link; union { /* Linkage for list of dirty runs. */ arena_runs_dirty_link_t rd; /* Profile counters, used for large object runs. */ union { void *prof_tctx_pun; prof_tctx_t *prof_tctx; }; /* Small region run metadata. */ arena_run_t run; }; }; typedef ph(arena_chunk_map_misc_t) arena_run_heap_t; #endif /* JEMALLOC_ARENA_STRUCTS_A */ #ifdef JEMALLOC_ARENA_STRUCTS_B /* Arena chunk header. */ struct arena_chunk_s { /* * A pointer to the arena that owns the chunk is stored within the node. * This field as a whole is used by chunks_rtree to support both * ivsalloc() and core-based debugging. */ extent_node_t node; /* * True if memory could be backed by transparent huge pages. This is * only directly relevant to Linux, since it is the only supported * platform on which jemalloc interacts with explicit transparent huge * page controls. */ bool hugepage; /* * Map of pages within chunk that keeps track of free/large/small. The * first map_bias entries are omitted, since the chunk header does not * need to be tracked in the map. This omission saves a header page * for common chunk sizes (e.g. 4 MiB). */ arena_chunk_map_bits_t map_bits[1]; /* Dynamically sized. */ }; /* * Read-only information associated with each element of arena_t's bins array * is stored separately, partly to reduce memory usage (only one copy, rather * than one per arena), but mainly to avoid false cacheline sharing. * * Each run has the following layout: * * /--------------------\ * | pad? | * |--------------------| * | redzone | * reg0_offset | region 0 | * | redzone | * |--------------------| \ * | redzone | | * | region 1 | > reg_interval * | redzone | / * |--------------------| * | ... | * | ... | * | ... | * |--------------------| * | redzone | * | region nregs-1 | * | redzone | * |--------------------| * | alignment pad? | * \--------------------/ * * reg_interval has at least the same minimum alignment as reg_size; this * preserves the alignment constraint that sa2u() depends on. Alignment pad is * either 0 or redzone_size; it is present only if needed to align reg0_offset. */ struct arena_bin_info_s { /* Size of regions in a run for this bin's size class. */ size_t reg_size; /* Redzone size. */ size_t redzone_size; /* Interval between regions (reg_size + (redzone_size << 1)). */ size_t reg_interval; /* Total size of a run for this bin's size class. */ size_t run_size; /* Total number of regions in a run for this bin's size class. */ uint32_t nregs; /* * Metadata used to manipulate bitmaps for runs associated with this * bin. */ bitmap_info_t bitmap_info; /* Offset of first region in a run for this bin's size class. */ uint32_t reg0_offset; }; struct arena_decay_s { /* * Approximate time in seconds from the creation of a set of unused * dirty pages until an equivalent set of unused dirty pages is purged * and/or reused. */ ssize_t time; /* time / SMOOTHSTEP_NSTEPS. */ nstime_t interval; /* * Time at which the current decay interval logically started. We do * not actually advance to a new epoch until sometime after it starts * because of scheduling and computation delays, and it is even possible * to completely skip epochs. In all cases, during epoch advancement we * merge all relevant activity into the most recently recorded epoch. */ nstime_t epoch; /* Deadline randomness generator. */ uint64_t jitter_state; /* * Deadline for current epoch. This is the sum of interval and per * epoch jitter which is a uniform random variable in [0..interval). * Epochs always advance by precise multiples of interval, but we * randomize the deadline to reduce the likelihood of arenas purging in * lockstep. */ nstime_t deadline; /* * Number of dirty pages at beginning of current epoch. During epoch * advancement we use the delta between arena->decay.ndirty and * arena->ndirty to determine how many dirty pages, if any, were * generated. */ size_t ndirty; /* * Trailing log of how many unused dirty pages were generated during * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last * element is the most recent epoch. Corresponding epoch times are * relative to epoch. */ size_t backlog[SMOOTHSTEP_NSTEPS]; }; struct arena_bin_s { /* * All operations on runcur, runs, and stats require that lock be * locked. Run allocation/deallocation are protected by the arena lock, * which may be acquired while holding one or more bin locks, but not * vise versa. */ malloc_mutex_t lock; /* * Current run being used to service allocations of this bin's size * class. */ arena_run_t *runcur; /* * Heap of non-full runs. This heap is used when looking for an * existing run when runcur is no longer usable. We choose the * non-full run that is lowest in memory; this policy tends to keep * objects packed well, and it can also help reduce the number of * almost-empty chunks. */ arena_run_heap_t runs; /* Bin statistics. */ malloc_bin_stats_t stats; }; struct arena_s { /* This arena's index within the arenas array. */ unsigned ind; /* * Number of threads currently assigned to this arena, synchronized via * atomic operations. Each thread has two distinct assignments, one for * application-serving allocation, and the other for internal metadata * allocation. Internal metadata must not be allocated from arenas * created via the arenas.extend mallctl, because the arena.<i>.reset * mallctl indiscriminately discards all allocations for the affected * arena. * * 0: Application allocation. * 1: Internal metadata allocation. */ unsigned nthreads[2]; /* * There are three classes of arena operations from a locking * perspective: * 1) Thread assignment (modifies nthreads) is synchronized via atomics. * 2) Bin-related operations are protected by bin locks. * 3) Chunk- and run-related operations are protected by this mutex. */ malloc_mutex_t lock; arena_stats_t stats; /* * List of tcaches for extant threads associated with this arena. * Stats from these are merged incrementally, and at exit if * opt_stats_print is enabled. */ ql_head(tcache_t) tcache_ql; uint64_t prof_accumbytes; /* * PRNG state for cache index randomization of large allocation base * pointers. */ size_t offset_state; dss_prec_t dss_prec; /* Extant arena chunks. */ ql_head(extent_node_t) achunks; /* Extent serial number generator state. */ size_t extent_sn_next; /* * In order to avoid rapid chunk allocation/deallocation when an arena * oscillates right on the cusp of needing a new chunk, cache the most * recently freed chunk. The spare is left in the arena's chunk trees * until it is deleted. * * There is one spare chunk per arena, rather than one spare total, in * order to avoid interactions between multiple threads that could make * a single spare inadequate. */ arena_chunk_t *spare; /* Minimum ratio (log base 2) of nactive:ndirty. */ ssize_t lg_dirty_mult; /* True if a thread is currently executing arena_purge_to_limit(). */ bool purging; /* Number of pages in active runs and huge regions. */ size_t nactive; /* * Current count of pages within unused runs that are potentially * dirty, and for which madvise(... MADV_DONTNEED) has not been called. * By tracking this, we can institute a limit on how much dirty unused * memory is mapped for each arena. */ size_t ndirty; /* * Unused dirty memory this arena manages. Dirty memory is conceptually * tracked as an arbitrarily interleaved LRU of dirty runs and cached * chunks, but the list linkage is actually semi-duplicated in order to * avoid extra arena_chunk_map_misc_t space overhead. * * LRU-----------------------------------------------------------MRU * * /-- arena ---\ * | | * | | * |------------| /- chunk -\ * ...->|chunks_cache|<--------------------------->| /----\ |<--... * |------------| | |node| | * | | | | | | * | | /- run -\ /- run -\ | | | | * | | | | | | | | | | * | | | | | | | | | | * |------------| |-------| |-------| | |----| | * ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----... * |------------| |-------| |-------| | |----| | * | | | | | | | | | | * | | | | | | | \----/ | * | | \-------/ \-------/ | | * | | | | * | | | | * \------------/ \---------/ */ arena_runs_dirty_link_t runs_dirty; extent_node_t chunks_cache; /* Decay-based purging state. */ arena_decay_t decay; /* Extant huge allocations. */ ql_head(extent_node_t) huge; /* Synchronizes all huge allocation/update/deallocation. */ malloc_mutex_t huge_mtx; /* * Trees of chunks that were previously allocated (trees differ only in * node ordering). These are used when allocating chunks, in an attempt * to re-use address space. Depending on function, different tree * orderings are needed, which is why there are two trees with the same * contents. */ extent_tree_t chunks_szsnad_cached; extent_tree_t chunks_ad_cached; extent_tree_t chunks_szsnad_retained; extent_tree_t chunks_ad_retained; malloc_mutex_t chunks_mtx; /* Cache of nodes that were allocated via base_alloc(). */ ql_head(extent_node_t) node_cache; malloc_mutex_t node_cache_mtx; /* User-configurable chunk hook functions. */ chunk_hooks_t chunk_hooks; /* bins is used to store trees of free regions. */ arena_bin_t bins[NBINS]; /* * Size-segregated address-ordered heaps of this arena's available runs, * used for first-best-fit run allocation. Runs are quantized, i.e. * they reside in the last heap which corresponds to a size class less * than or equal to the run size. */ arena_run_heap_t runs_avail[NPSIZES]; }; /* Used in conjunction with tsd for fast arena-related context lookup. */ struct arena_tdata_s { ticker_t decay_ticker; }; #endif /* JEMALLOC_ARENA_STRUCTS_B */ #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS static const size_t large_pad = #ifdef JEMALLOC_CACHE_OBLIVIOUS PAGE #else 0 #endif ; extern purge_mode_t opt_purge; extern const char *purge_mode_names[]; extern ssize_t opt_lg_dirty_mult; extern ssize_t opt_decay_time; extern arena_bin_info_t arena_bin_info[NBINS]; extern size_t map_bias; /* Number of arena chunk header pages. */ extern size_t map_misc_offset; extern size_t arena_maxrun; /* Max run size for arenas. */ extern size_t large_maxclass; /* Max large size class. */ extern unsigned nlclasses; /* Number of large size classes. */ extern unsigned nhclasses; /* Number of huge size classes. */ #ifdef JEMALLOC_JET typedef size_t (run_quantize_t)(size_t); extern run_quantize_t *run_quantize_floor; extern run_quantize_t *run_quantize_ceil; #endif void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache); void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool cache); extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena); void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node); void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, size_t *sn, bool *zero); void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize, size_t sn); void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t oldsize, size_t usize); void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t oldsize, size_t usize, size_t sn); bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t oldsize, size_t usize, bool *zero); ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena); bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult); ssize_t arena_decay_time_get(tsdn_t *tsdn, arena_t *arena); bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time); void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all); void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena); void arena_reset(tsd_t *tsd, arena_t *arena); void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes); void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero); #ifdef JEMALLOC_JET typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t, uint8_t); extern arena_redzone_corruption_t *arena_redzone_corruption; typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *); extern arena_dalloc_junk_small_t *arena_dalloc_junk_small; #else void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); #endif void arena_quarantine_junk_small(void *ptr, size_t usize); void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind, bool zero); void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero); void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache); void arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size); void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm); void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm); void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t pageind); #ifdef JEMALLOC_JET typedef void (arena_dalloc_junk_large_t)(void *, size_t); extern arena_dalloc_junk_large_t *arena_dalloc_junk_large; #else void arena_dalloc_junk_large(void *ptr, size_t usize); #endif void arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr); void arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr); #ifdef JEMALLOC_JET typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t); extern arena_ralloc_junk_large_t *arena_ralloc_junk_large; #endif bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, bool zero); void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache); dss_prec_t arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena); bool arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec); ssize_t arena_lg_dirty_mult_default_get(void); bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult); ssize_t arena_decay_time_default_get(void); bool arena_decay_time_default_set(ssize_t decay_time); void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive, size_t *ndirty); void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive, size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats); unsigned arena_nthreads_get(arena_t *arena, bool internal); void arena_nthreads_inc(arena_t *arena, bool internal); void arena_nthreads_dec(arena_t *arena, bool internal); size_t arena_extent_sn_next(arena_t *arena); arena_t *arena_new(tsdn_t *tsdn, unsigned ind); void arena_boot(void); void arena_prefork0(tsdn_t *tsdn, arena_t *arena); void arena_prefork1(tsdn_t *tsdn, arena_t *arena); void arena_prefork2(tsdn_t *tsdn, arena_t *arena); void arena_prefork3(tsdn_t *tsdn, arena_t *arena); void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena); void arena_postfork_child(tsdn_t *tsdn, arena_t *arena); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE arena_chunk_map_bits_t *arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind); const arena_chunk_map_bits_t *arena_bitselm_get_const( const arena_chunk_t *chunk, size_t pageind); arena_chunk_map_misc_t *arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind); const arena_chunk_map_misc_t *arena_miscelm_get_const( const arena_chunk_t *chunk, size_t pageind); size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm); void *arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm); arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd); arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run); size_t *arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind); const size_t *arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind); size_t arena_mapbitsp_read(const size_t *mapbitsp); size_t arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_size_decode(size_t mapbits); size_t arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind); szind_t arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind); void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits); size_t arena_mapbits_size_encode(size_t size); void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags); void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, size_t size); void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags); void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags); void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, szind_t binind); void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, szind_t binind, size_t flags); void arena_metadata_allocated_add(arena_t *arena, size_t size); void arena_metadata_allocated_sub(arena_t *arena, size_t size); size_t arena_metadata_allocated_get(arena_t *arena); bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes); bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes); bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes); szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin); size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr); prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr); void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx); void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr, prof_tctx_t *old_tctx); void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks); void arena_decay_tick(tsdn_t *tsdn, arena_t *arena); void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, tcache_t *tcache, bool slow_path); arena_t *arena_aalloc(const void *ptr); size_t arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote); void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path); void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, bool slow_path); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) # ifdef JEMALLOC_ARENA_INLINE_A JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t * arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind) { assert(pageind >= map_bias); assert(pageind < chunk_npages); return (&chunk->map_bits[pageind-map_bias]); } JEMALLOC_ALWAYS_INLINE const arena_chunk_map_bits_t * arena_bitselm_get_const(const arena_chunk_t *chunk, size_t pageind) { return (arena_bitselm_get_mutable((arena_chunk_t *)chunk, pageind)); } JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind) { assert(pageind >= map_bias); assert(pageind < chunk_npages); return ((arena_chunk_map_misc_t *)((uintptr_t)chunk + (uintptr_t)map_misc_offset) + pageind-map_bias); } JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t * arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind) { return (arena_miscelm_get_mutable((arena_chunk_t *)chunk, pageind)); } JEMALLOC_ALWAYS_INLINE size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm) { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk + map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias; assert(pageind >= map_bias); assert(pageind < chunk_npages); return (pageind); } JEMALLOC_ALWAYS_INLINE void * arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm) { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); size_t pageind = arena_miscelm_to_pageind(miscelm); return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE))); } JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * arena_rd_to_miscelm(arena_runs_dirty_link_t *rd) { arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t *)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, rd)); assert(arena_miscelm_to_pageind(miscelm) >= map_bias); assert(arena_miscelm_to_pageind(miscelm) < chunk_npages); return (miscelm); } JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * arena_run_to_miscelm(arena_run_t *run) { arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t *)((uintptr_t)run - offsetof(arena_chunk_map_misc_t, run)); assert(arena_miscelm_to_pageind(miscelm) >= map_bias); assert(arena_miscelm_to_pageind(miscelm) < chunk_npages); return (miscelm); } JEMALLOC_ALWAYS_INLINE size_t * arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind) { return (&arena_bitselm_get_mutable(chunk, pageind)->bits); } JEMALLOC_ALWAYS_INLINE const size_t * arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind) { return (arena_mapbitsp_get_mutable((arena_chunk_t *)chunk, pageind)); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbitsp_read(const size_t *mapbitsp) { return (*mapbitsp); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind) { return (arena_mapbitsp_read(arena_mapbitsp_get_const(chunk, pageind))); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_size_decode(size_t mapbits) { size_t size; #if CHUNK_MAP_SIZE_SHIFT > 0 size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT; #elif CHUNK_MAP_SIZE_SHIFT == 0 size = mapbits & CHUNK_MAP_SIZE_MASK; #else size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT; #endif return (size); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); return (arena_mapbits_size_decode(mapbits)); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)); return (arena_mapbits_size_decode(mapbits)); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == CHUNK_MAP_ALLOCATED); return (mapbits >> CHUNK_MAP_RUNIND_SHIFT); } JEMALLOC_ALWAYS_INLINE szind_t arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; szind_t binind; mapbits = arena_mapbits_get(chunk, pageind); binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; assert(binind < NBINS || binind == BININD_INVALID); return (binind); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); return (mapbits & CHUNK_MAP_DIRTY); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); return (mapbits & CHUNK_MAP_UNZEROED); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); return (mapbits & CHUNK_MAP_DECOMMITTED); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); return (mapbits & CHUNK_MAP_LARGE); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); return (mapbits & CHUNK_MAP_ALLOCATED); } JEMALLOC_ALWAYS_INLINE void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits) { *mapbitsp = mapbits; } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_size_encode(size_t size) { size_t mapbits; #if CHUNK_MAP_SIZE_SHIFT > 0 mapbits = size << CHUNK_MAP_SIZE_SHIFT; #elif CHUNK_MAP_SIZE_SHIFT == 0 mapbits = size; #else mapbits = size >> -CHUNK_MAP_SIZE_SHIFT; #endif assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0); return (mapbits); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags) { size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); assert((size & PAGE_MASK) == 0); assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | CHUNK_MAP_BININD_INVALID | flags); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, size_t size) { size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); size_t mapbits = arena_mapbitsp_read(mapbitsp); assert((size & PAGE_MASK) == 0); assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | (mapbits & ~CHUNK_MAP_SIZE_MASK)); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags) { size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); assert((flags & CHUNK_MAP_UNZEROED) == flags); arena_mapbitsp_write(mapbitsp, flags); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags) { size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); assert((size & PAGE_MASK) == 0); assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, szind_t binind) { size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); size_t mapbits = arena_mapbitsp_read(mapbitsp); assert(binind <= BININD_INVALID); assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS + large_pad); arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) | (binind << CHUNK_MAP_BININD_SHIFT)); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, szind_t binind, size_t flags) { size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); assert(binind < BININD_INVALID); assert(pageind - runind >= map_bias); assert((flags & CHUNK_MAP_UNZEROED) == flags); arena_mapbitsp_write(mapbitsp, (runind << CHUNK_MAP_RUNIND_SHIFT) | (binind << CHUNK_MAP_BININD_SHIFT) | flags | CHUNK_MAP_ALLOCATED); } JEMALLOC_INLINE void arena_metadata_allocated_add(arena_t *arena, size_t size) { atomic_add_z(&arena->stats.metadata_allocated, size); } JEMALLOC_INLINE void arena_metadata_allocated_sub(arena_t *arena, size_t size) { atomic_sub_z(&arena->stats.metadata_allocated, size); } JEMALLOC_INLINE size_t arena_metadata_allocated_get(arena_t *arena) { return (atomic_read_z(&arena->stats.metadata_allocated)); } JEMALLOC_INLINE bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes) { cassert(config_prof); assert(prof_interval != 0); arena->prof_accumbytes += accumbytes; if (arena->prof_accumbytes >= prof_interval) { arena->prof_accumbytes -= prof_interval; return (true); } return (false); } JEMALLOC_INLINE bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes) { cassert(config_prof); if (likely(prof_interval == 0)) return (false); return (arena_prof_accum_impl(arena, accumbytes)); } JEMALLOC_INLINE bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) { cassert(config_prof); if (likely(prof_interval == 0)) return (false); { bool ret; malloc_mutex_lock(tsdn, &arena->lock); ret = arena_prof_accum_impl(arena, accumbytes); malloc_mutex_unlock(tsdn, &arena->lock); return (ret); } } JEMALLOC_ALWAYS_INLINE szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits) { szind_t binind; binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; if (config_debug) { arena_chunk_t *chunk; arena_t *arena; size_t pageind; size_t actual_mapbits; size_t rpages_ind; const arena_run_t *run; arena_bin_t *bin; szind_t run_binind, actual_binind; arena_bin_info_t *bin_info; const arena_chunk_map_misc_t *miscelm; const void *rpages; assert(binind != BININD_INVALID); assert(binind < NBINS); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); arena = extent_node_arena_get(&chunk->node); pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; actual_mapbits = arena_mapbits_get(chunk, pageind); assert(mapbits == actual_mapbits); assert(arena_mapbits_large_get(chunk, pageind) == 0); assert(arena_mapbits_allocated_get(chunk, pageind) != 0); rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); miscelm = arena_miscelm_get_const(chunk, rpages_ind); run = &miscelm->run; run_binind = run->binind; bin = &arena->bins[run_binind]; actual_binind = (szind_t)(bin - arena->bins); assert(run_binind == actual_binind); bin_info = &arena_bin_info[actual_binind]; rpages = arena_miscelm_to_rpages(miscelm); assert(((uintptr_t)ptr - ((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval == 0); } return (binind); } # endif /* JEMALLOC_ARENA_INLINE_A */ # ifdef JEMALLOC_ARENA_INLINE_B JEMALLOC_INLINE szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin) { szind_t binind = (szind_t)(bin - arena->bins); assert(binind < NBINS); return (binind); } JEMALLOC_INLINE size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) { size_t diff, interval, shift, regind; arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); void *rpages = arena_miscelm_to_rpages(miscelm); /* * Freeing a pointer lower than region zero can cause assertion * failure. */ assert((uintptr_t)ptr >= (uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset); /* * Avoid doing division with a variable divisor if possible. Using * actual division here can reduce allocator throughput by over 20%! */ diff = (size_t)((uintptr_t)ptr - (uintptr_t)rpages - bin_info->reg0_offset); /* Rescale (factor powers of 2 out of the numerator and denominator). */ interval = bin_info->reg_interval; shift = ffs_zu(interval) - 1; diff >>= shift; interval >>= shift; if (interval == 1) { /* The divisor was a power of 2. */ regind = diff; } else { /* * To divide by a number D that is not a power of two we * multiply by (2^21 / D) and then right shift by 21 positions. * * X / D * * becomes * * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT * * We can omit the first three elements, because we never * divide by 0, and 1 and 2 are both powers of two, which are * handled above. */ #define SIZE_INV_SHIFT ((sizeof(size_t) << 3) - LG_RUN_MAXREGS) #define SIZE_INV(s) (((ZU(1) << SIZE_INV_SHIFT) / (s)) + 1) static const size_t interval_invs[] = { SIZE_INV(3), SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) }; if (likely(interval <= ((sizeof(interval_invs) / sizeof(size_t)) + 2))) { regind = (diff * interval_invs[interval - 3]) >> SIZE_INV_SHIFT; } else regind = diff / interval; #undef SIZE_INV #undef SIZE_INV_SHIFT } assert(diff == regind * interval); assert(regind < bin_info->nregs); return (regind); } JEMALLOC_INLINE prof_tctx_t * arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr) { prof_tctx_t *ret; arena_chunk_t *chunk; cassert(config_prof); assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (likely(chunk != ptr)) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; size_t mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) ret = (prof_tctx_t *)(uintptr_t)1U; else { arena_chunk_map_misc_t *elm = arena_miscelm_get_mutable(chunk, pageind); ret = atomic_read_p(&elm->prof_tctx_pun); } } else ret = huge_prof_tctx_get(tsdn, ptr); return (ret); } JEMALLOC_INLINE void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx) { arena_chunk_t *chunk; cassert(config_prof); assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (likely(chunk != ptr)) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; assert(arena_mapbits_allocated_get(chunk, pageind) != 0); if (unlikely(usize > SMALL_MAXCLASS || (uintptr_t)tctx > (uintptr_t)1U)) { arena_chunk_map_misc_t *elm; assert(arena_mapbits_large_get(chunk, pageind) != 0); elm = arena_miscelm_get_mutable(chunk, pageind); atomic_write_p(&elm->prof_tctx_pun, tctx); } else { /* * tctx must always be initialized for large runs. * Assert that the surrounding conditional logic is * equivalent to checking whether ptr refers to a large * run. */ assert(arena_mapbits_large_get(chunk, pageind) == 0); } } else huge_prof_tctx_set(tsdn, ptr, tctx); } JEMALLOC_INLINE void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr, prof_tctx_t *old_tctx) { cassert(config_prof); assert(ptr != NULL); if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr && (uintptr_t)old_tctx > (uintptr_t)1U))) { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (likely(chunk != ptr)) { size_t pageind; arena_chunk_map_misc_t *elm; pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; assert(arena_mapbits_allocated_get(chunk, pageind) != 0); assert(arena_mapbits_large_get(chunk, pageind) != 0); elm = arena_miscelm_get_mutable(chunk, pageind); atomic_write_p(&elm->prof_tctx_pun, (prof_tctx_t *)(uintptr_t)1U); } else huge_prof_tctx_reset(tsdn, ptr); } } JEMALLOC_ALWAYS_INLINE void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) { tsd_t *tsd; ticker_t *decay_ticker; if (unlikely(tsdn_null(tsdn))) return; tsd = tsdn_tsd(tsdn); decay_ticker = decay_ticker_get(tsd, arena->ind); if (unlikely(decay_ticker == NULL)) return; if (unlikely(ticker_ticks(decay_ticker, nticks))) arena_purge(tsdn, arena, false); } JEMALLOC_ALWAYS_INLINE void arena_decay_tick(tsdn_t *tsdn, arena_t *arena) { arena_decay_ticks(tsdn, arena, 1); } JEMALLOC_ALWAYS_INLINE void * arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, tcache_t *tcache, bool slow_path) { assert(!tsdn_null(tsdn) || tcache == NULL); assert(size != 0); if (likely(tcache != NULL)) { if (likely(size <= SMALL_MAXCLASS)) { return (tcache_alloc_small(tsdn_tsd(tsdn), arena, tcache, size, ind, zero, slow_path)); } if (likely(size <= tcache_maxclass)) { return (tcache_alloc_large(tsdn_tsd(tsdn), arena, tcache, size, ind, zero, slow_path)); } /* (size > tcache_maxclass) case falls through. */ assert(size > tcache_maxclass); } return (arena_malloc_hard(tsdn, arena, size, ind, zero)); } JEMALLOC_ALWAYS_INLINE arena_t * arena_aalloc(const void *ptr) { arena_chunk_t *chunk; chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (likely(chunk != ptr)) return (extent_node_arena_get(&chunk->node)); else return (huge_aalloc(ptr)); } /* Return the size of the allocation pointed to by ptr. */ JEMALLOC_ALWAYS_INLINE size_t arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote) { size_t ret; arena_chunk_t *chunk; size_t pageind; szind_t binind; assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (likely(chunk != ptr)) { pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; assert(arena_mapbits_allocated_get(chunk, pageind) != 0); binind = arena_mapbits_binind_get(chunk, pageind); if (unlikely(binind == BININD_INVALID || (config_prof && !demote && arena_mapbits_large_get(chunk, pageind) != 0))) { /* * Large allocation. In the common case (demote), and * as this is an inline function, most callers will only * end up looking at binind to determine that ptr is a * small allocation. */ assert(config_cache_oblivious || ((uintptr_t)ptr & PAGE_MASK) == 0); ret = arena_mapbits_large_size_get(chunk, pageind) - large_pad; assert(ret != 0); assert(pageind + ((ret+large_pad)>>LG_PAGE) <= chunk_npages); assert(arena_mapbits_dirty_get(chunk, pageind) == arena_mapbits_dirty_get(chunk, pageind+((ret+large_pad)>>LG_PAGE)-1)); } else { /* * Small allocation (possibly promoted to a large * object). */ assert(arena_mapbits_large_get(chunk, pageind) != 0 || arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, pageind)) == binind); ret = index2size(binind); } } else ret = huge_salloc(tsdn, ptr); return (ret); } JEMALLOC_ALWAYS_INLINE void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path) { arena_chunk_t *chunk; size_t pageind, mapbits; assert(!tsdn_null(tsdn) || tcache == NULL); assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (likely(chunk != ptr)) { pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; mapbits = arena_mapbits_get(chunk, pageind); assert(arena_mapbits_allocated_get(chunk, pageind) != 0); if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { /* Small allocation. */ if (likely(tcache != NULL)) { szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, binind, slow_path); } else { arena_dalloc_small(tsdn, extent_node_arena_get(&chunk->node), chunk, ptr, pageind); } } else { size_t size = arena_mapbits_large_size_get(chunk, pageind); assert(config_cache_oblivious || ((uintptr_t)ptr & PAGE_MASK) == 0); if (likely(tcache != NULL) && size - large_pad <= tcache_maxclass) { tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, size - large_pad, slow_path); } else { arena_dalloc_large(tsdn, extent_node_arena_get(&chunk->node), chunk, ptr); } } } else huge_dalloc(tsdn, ptr); } JEMALLOC_ALWAYS_INLINE void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, bool slow_path) { arena_chunk_t *chunk; assert(!tsdn_null(tsdn) || tcache == NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (likely(chunk != ptr)) { if (config_prof && opt_prof) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; assert(arena_mapbits_allocated_get(chunk, pageind) != 0); if (arena_mapbits_large_get(chunk, pageind) != 0) { /* * Make sure to use promoted size, not request * size. */ size = arena_mapbits_large_size_get(chunk, pageind) - large_pad; } } assert(s2u(size) == s2u(arena_salloc(tsdn, ptr, false))); if (likely(size <= SMALL_MAXCLASS)) { /* Small allocation. */ if (likely(tcache != NULL)) { szind_t binind = size2index(size); tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, binind, slow_path); } else { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; arena_dalloc_small(tsdn, extent_node_arena_get(&chunk->node), chunk, ptr, pageind); } } else { assert(config_cache_oblivious || ((uintptr_t)ptr & PAGE_MASK) == 0); if (likely(tcache != NULL) && size <= tcache_maxclass) { tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, size, slow_path); } else { arena_dalloc_large(tsdn, extent_node_arena_get(&chunk->node), chunk, ptr); } } } else huge_dalloc(tsdn, ptr); } # endif /* JEMALLOC_ARENA_INLINE_B */ #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
49,079
31.120419
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/ql.h
/* List definitions. */ #define ql_head(a_type) \ struct { \ a_type *qlh_first; \ } #define ql_head_initializer(a_head) {NULL} #define ql_elm(a_type) qr(a_type) /* List functions. */ #define ql_new(a_head) do { \ (a_head)->qlh_first = NULL; \ } while (0) #define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) #define ql_first(a_head) ((a_head)->qlh_first) #define ql_last(a_head, a_field) \ ((ql_first(a_head) != NULL) \ ? qr_prev(ql_first(a_head), a_field) : NULL) #define ql_next(a_head, a_elm, a_field) \ ((ql_last(a_head, a_field) != (a_elm)) \ ? qr_next((a_elm), a_field) : NULL) #define ql_prev(a_head, a_elm, a_field) \ ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ : NULL) #define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ qr_before_insert((a_qlelm), (a_elm), a_field); \ if (ql_first(a_head) == (a_qlelm)) { \ ql_first(a_head) = (a_elm); \ } \ } while (0) #define ql_after_insert(a_qlelm, a_elm, a_field) \ qr_after_insert((a_qlelm), (a_elm), a_field) #define ql_head_insert(a_head, a_elm, a_field) do { \ if (ql_first(a_head) != NULL) { \ qr_before_insert(ql_first(a_head), (a_elm), a_field); \ } \ ql_first(a_head) = (a_elm); \ } while (0) #define ql_tail_insert(a_head, a_elm, a_field) do { \ if (ql_first(a_head) != NULL) { \ qr_before_insert(ql_first(a_head), (a_elm), a_field); \ } \ ql_first(a_head) = qr_next((a_elm), a_field); \ } while (0) #define ql_remove(a_head, a_elm, a_field) do { \ if (ql_first(a_head) == (a_elm)) { \ ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ } \ if (ql_first(a_head) != (a_elm)) { \ qr_remove((a_elm), a_field); \ } else { \ ql_first(a_head) = NULL; \ } \ } while (0) #define ql_head_remove(a_head, a_type, a_field) do { \ a_type *t = ql_first(a_head); \ ql_remove((a_head), t, a_field); \ } while (0) #define ql_tail_remove(a_head, a_type, a_field) do { \ a_type *t = ql_last(a_head, a_field); \ ql_remove((a_head), t, a_field); \ } while (0) #define ql_foreach(a_var, a_head, a_field) \ qr_foreach((a_var), ql_first(a_head), a_field) #define ql_reverse_foreach(a_var, a_head, a_field) \ qr_reverse_foreach((a_var), ql_first(a_head), a_field)
2,369
27.902439
65
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/nstime.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct nstime_s nstime_t; /* Maximum supported number of seconds (~584 years). */ #define NSTIME_SEC_MAX KQU(18446744072) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct nstime_s { uint64_t ns; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void nstime_init(nstime_t *time, uint64_t ns); void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec); uint64_t nstime_ns(const nstime_t *time); uint64_t nstime_sec(const nstime_t *time); uint64_t nstime_nsec(const nstime_t *time); void nstime_copy(nstime_t *time, const nstime_t *source); int nstime_compare(const nstime_t *a, const nstime_t *b); void nstime_add(nstime_t *time, const nstime_t *addend); void nstime_subtract(nstime_t *time, const nstime_t *subtrahend); void nstime_imultiply(nstime_t *time, uint64_t multiplier); void nstime_idivide(nstime_t *time, uint64_t divisor); uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor); #ifdef JEMALLOC_JET typedef bool (nstime_monotonic_t)(void); extern nstime_monotonic_t *nstime_monotonic; typedef bool (nstime_update_t)(nstime_t *); extern nstime_update_t *nstime_update; #else bool nstime_monotonic(void); bool nstime_update(nstime_t *time); #endif #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
1,738
34.489796
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/witness.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct witness_s witness_t; typedef unsigned witness_rank_t; typedef ql_head(witness_t) witness_list_t; typedef int witness_comp_t (const witness_t *, const witness_t *); /* * Lock ranks. Witnesses with rank WITNESS_RANK_OMIT are completely ignored by * the witness machinery. */ #define WITNESS_RANK_OMIT 0U #define WITNESS_RANK_INIT 1U #define WITNESS_RANK_CTL 1U #define WITNESS_RANK_ARENAS 2U #define WITNESS_RANK_PROF_DUMP 3U #define WITNESS_RANK_PROF_BT2GCTX 4U #define WITNESS_RANK_PROF_TDATAS 5U #define WITNESS_RANK_PROF_TDATA 6U #define WITNESS_RANK_PROF_GCTX 7U #define WITNESS_RANK_ARENA 8U #define WITNESS_RANK_ARENA_CHUNKS 9U #define WITNESS_RANK_ARENA_NODE_CACHE 10 #define WITNESS_RANK_BASE 11U #define WITNESS_RANK_LEAF 0xffffffffU #define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF #define WITNESS_RANK_ARENA_HUGE WITNESS_RANK_LEAF #define WITNESS_RANK_DSS WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF #define WITNESS_INITIALIZER(rank) {"initializer", rank, NULL, {NULL, NULL}} #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct witness_s { /* Name, used for printing lock order reversal messages. */ const char *name; /* * Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses * must be acquired in order of increasing rank. */ witness_rank_t rank; /* * If two witnesses are of equal rank and they have the samp comp * function pointer, it is called as a last attempt to differentiate * between witnesses of equal rank. */ witness_comp_t *comp; /* Linkage for thread's currently owned locks. */ ql_elm(witness_t) link; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void witness_init(witness_t *witness, const char *name, witness_rank_t rank, witness_comp_t *comp); #ifdef JEMALLOC_JET typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *); extern witness_lock_error_t *witness_lock_error; #else void witness_lock_error(const witness_list_t *witnesses, const witness_t *witness); #endif #ifdef JEMALLOC_JET typedef void (witness_owner_error_t)(const witness_t *); extern witness_owner_error_t *witness_owner_error; #else void witness_owner_error(const witness_t *witness); #endif #ifdef JEMALLOC_JET typedef void (witness_not_owner_error_t)(const witness_t *); extern witness_not_owner_error_t *witness_not_owner_error; #else void witness_not_owner_error(const witness_t *witness); #endif #ifdef JEMALLOC_JET typedef void (witness_lockless_error_t)(const witness_list_t *); extern witness_lockless_error_t *witness_lockless_error; #else void witness_lockless_error(const witness_list_t *witnesses); #endif void witnesses_cleanup(tsd_t *tsd); void witness_fork_cleanup(tsd_t *tsd); void witness_prefork(tsd_t *tsd); void witness_postfork_parent(tsd_t *tsd); void witness_postfork_child(tsd_t *tsd); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE bool witness_owner(tsd_t *tsd, const witness_t *witness); void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness); void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness); void witness_assert_lockless(tsdn_t *tsdn); void witness_lock(tsdn_t *tsdn, witness_t *witness); void witness_unlock(tsdn_t *tsdn, witness_t *witness); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) JEMALLOC_INLINE bool witness_owner(tsd_t *tsd, const witness_t *witness) { witness_list_t *witnesses; witness_t *w; witnesses = tsd_witnessesp_get(tsd); ql_foreach(w, witnesses, link) { if (w == witness) return (true); } return (false); } JEMALLOC_INLINE void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness) { tsd_t *tsd; if (!config_debug) return; if (tsdn_null(tsdn)) return; tsd = tsdn_tsd(tsdn); if (witness->rank == WITNESS_RANK_OMIT) return; if (witness_owner(tsd, witness)) return; witness_owner_error(witness); } JEMALLOC_INLINE void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness) { tsd_t *tsd; witness_list_t *witnesses; witness_t *w; if (!config_debug) return; if (tsdn_null(tsdn)) return; tsd = tsdn_tsd(tsdn); if (witness->rank == WITNESS_RANK_OMIT) return; witnesses = tsd_witnessesp_get(tsd); ql_foreach(w, witnesses, link) { if (w == witness) witness_not_owner_error(witness); } } JEMALLOC_INLINE void witness_assert_lockless(tsdn_t *tsdn) { tsd_t *tsd; witness_list_t *witnesses; witness_t *w; if (!config_debug) return; if (tsdn_null(tsdn)) return; tsd = tsdn_tsd(tsdn); witnesses = tsd_witnessesp_get(tsd); w = ql_last(witnesses, link); if (w != NULL) witness_lockless_error(witnesses); } JEMALLOC_INLINE void witness_lock(tsdn_t *tsdn, witness_t *witness) { tsd_t *tsd; witness_list_t *witnesses; witness_t *w; if (!config_debug) return; if (tsdn_null(tsdn)) return; tsd = tsdn_tsd(tsdn); if (witness->rank == WITNESS_RANK_OMIT) return; witness_assert_not_owner(tsdn, witness); witnesses = tsd_witnessesp_get(tsd); w = ql_last(witnesses, link); if (w == NULL) { /* No other locks; do nothing. */ } else if (tsd_witness_fork_get(tsd) && w->rank <= witness->rank) { /* Forking, and relaxed ranking satisfied. */ } else if (w->rank > witness->rank) { /* Not forking, rank order reversal. */ witness_lock_error(witnesses, witness); } else if (w->rank == witness->rank && (w->comp == NULL || w->comp != witness->comp || w->comp(w, witness) > 0)) { /* * Missing/incompatible comparison function, or comparison * function indicates rank order reversal. */ witness_lock_error(witnesses, witness); } ql_elm_new(witness, link); ql_tail_insert(witnesses, witness, link); } JEMALLOC_INLINE void witness_unlock(tsdn_t *tsdn, witness_t *witness) { tsd_t *tsd; witness_list_t *witnesses; if (!config_debug) return; if (tsdn_null(tsdn)) return; tsd = tsdn_tsd(tsdn); if (witness->rank == WITNESS_RANK_OMIT) return; /* * Check whether owner before removal, rather than relying on * witness_assert_owner() to abort, so that unit tests can test this * function's failure mode without causing undefined behavior. */ if (witness_owner(tsd, witness)) { witnesses = tsd_witnessesp_get(tsd); ql_remove(witnesses, witness, link); } else witness_assert_owner(tsdn, witness); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
7,051
25.411985
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/qr.h
/* Ring definitions. */ #define qr(a_type) \ struct { \ a_type *qre_next; \ a_type *qre_prev; \ } /* Ring functions. */ #define qr_new(a_qr, a_field) do { \ (a_qr)->a_field.qre_next = (a_qr); \ (a_qr)->a_field.qre_prev = (a_qr); \ } while (0) #define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) #define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) #define qr_before_insert(a_qrelm, a_qr, a_field) do { \ (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ (a_qr)->a_field.qre_next = (a_qrelm); \ (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ (a_qrelm)->a_field.qre_prev = (a_qr); \ } while (0) #define qr_after_insert(a_qrelm, a_qr, a_field) \ do \ { \ (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ (a_qr)->a_field.qre_prev = (a_qrelm); \ (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ (a_qrelm)->a_field.qre_next = (a_qr); \ } while (0) #define qr_meld(a_qr_a, a_qr_b, a_field) do { \ void *t; \ (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ t = (a_qr_a)->a_field.qre_prev; \ (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ (a_qr_b)->a_field.qre_prev = t; \ } while (0) /* * qr_meld() and qr_split() are functionally equivalent, so there's no need to * have two copies of the code. */ #define qr_split(a_qr_a, a_qr_b, a_field) \ qr_meld((a_qr_a), (a_qr_b), a_field) #define qr_remove(a_qr, a_field) do { \ (a_qr)->a_field.qre_prev->a_field.qre_next \ = (a_qr)->a_field.qre_next; \ (a_qr)->a_field.qre_next->a_field.qre_prev \ = (a_qr)->a_field.qre_prev; \ (a_qr)->a_field.qre_next = (a_qr); \ (a_qr)->a_field.qre_prev = (a_qr); \ } while (0) #define qr_foreach(var, a_qr, a_field) \ for ((var) = (a_qr); \ (var) != NULL; \ (var) = (((var)->a_field.qre_next != (a_qr)) \ ? (var)->a_field.qre_next : NULL)) #define qr_reverse_foreach(var, a_qr, a_field) \ for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ (var) != NULL; \ (var) = (((var) != (a_qr)) \ ? (var)->a_field.qre_prev : NULL))
2,259
31.285714
78
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/public_namespace.sh
#!/bin/sh for nm in `cat $1` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` echo "#define je_${n} JEMALLOC_N(${n})" done
129
17.571429
46
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/spin.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct spin_s spin_t; #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct spin_s { unsigned iteration; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE void spin_init(spin_t *spin); void spin_adaptive(spin_t *spin); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_SPIN_C_)) JEMALLOC_INLINE void spin_init(spin_t *spin) { spin->iteration = 0; } JEMALLOC_INLINE void spin_adaptive(spin_t *spin) { volatile uint64_t i; for (i = 0; i < (KQU(1) << spin->iteration); i++) CPU_SPINWAIT; if (spin->iteration < 63) spin->iteration++; } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
1,154
21.211538
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/smoothstep.h
/* * This file was generated by the following command: * sh smoothstep.sh smoother 200 24 3 15 */ /******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* * This header defines a precomputed table based on the smoothstep family of * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0 * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so * that floating point math can be avoided. * * 3 2 * smoothstep(x) = -2x + 3x * * 5 4 3 * smootherstep(x) = 6x - 15x + 10x * * 7 6 5 4 * smootheststep(x) = -20x + 70x - 84x + 35x */ #define SMOOTHSTEP_VARIANT "smoother" #define SMOOTHSTEP_NSTEPS 200 #define SMOOTHSTEP_BFP 24 #define SMOOTHSTEP \ /* STEP(step, h, x, y) */ \ STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \ STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \ STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \ STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \ STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \ STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \ STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \ STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \ STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \ STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \ STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \ STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \ STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \ STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \ STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \ STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \ STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \ STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \ STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \ STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \ STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \ STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \ STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \ STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \ STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \ STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \ STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \ STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \ STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \ STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \ STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \ STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \ STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \ STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \ STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \ STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \ STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \ STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \ STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \ STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \ STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \ STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \ STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \ STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \ STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \ STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \ STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \ STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \ STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \ STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \ STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \ STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \ STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \ STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \ STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \ STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \ STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \ STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \ STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \ STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \ STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \ STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \ STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \ STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \ STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \ STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \ STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \ STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \ STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \ STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \ STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \ STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \ STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \ STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \ STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \ STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \ STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \ STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \ STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \ STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \ STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \ STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \ STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \ STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \ STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \ STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \ STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \ STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \ STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \ STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \ STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \ STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \ STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \ STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \ STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \ STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \ STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \ STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \ STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \ STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \ STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \ STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \ STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \ STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \ STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \ STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \ STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \ STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \ STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \ STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \ STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \ STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \ STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \ STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \ STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \ STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \ STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \ STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \ STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \ STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \ STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \ STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \ STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \ STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \ STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \ STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \ STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \ STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \ STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \ STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \ STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \ STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \ STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \ STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \ STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \ STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \ STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \ STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \ STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \ STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \ STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \ STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \ STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \ STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \ STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \ STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \ STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \ STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \ STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \ STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \ STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \ STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \ STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \ STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \ STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \ STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \ STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \ STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \ STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \ STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \ STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \ STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \ STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \ STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \ STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \ STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \ STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \ STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \ STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \ STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \ STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \ STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \ STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \ STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \ STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \ STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \ STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \ STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \ STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \ STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \ STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \ STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \ STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \ STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \ STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \ STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \ STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \ STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \ STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \ STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \ STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \ STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \ STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \ STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \ STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \ STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \ STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \ STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \ STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \ STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \ #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
16,061
64.02834
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void *chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit); bool chunk_dalloc_mmap(void *chunk, size_t size); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
789
34.909091
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/private_unnamespace.sh
#!/bin/sh for symbol in `cat $1` ; do echo "#undef ${symbol}" done
70
10.833333
27
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/chunk.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* * Size and alignment of memory chunks that are allocated by the OS's virtual * memory system. */ #define LG_CHUNK_DEFAULT 21 /* Return the chunk address for allocation address a. */ #define CHUNK_ADDR2BASE(a) \ ((void *)((uintptr_t)(a) & ~chunksize_mask)) /* Return the chunk offset of address a. */ #define CHUNK_ADDR2OFFSET(a) \ ((size_t)((uintptr_t)(a) & chunksize_mask)) /* Return the smallest chunk multiple that is >= s. */ #define CHUNK_CEILING(s) \ (((s) + chunksize_mask) & ~chunksize_mask) #define CHUNK_HOOKS_INITIALIZER { \ NULL, \ NULL, \ NULL, \ NULL, \ NULL, \ NULL, \ NULL \ } #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS extern size_t opt_lg_chunk; extern const char *opt_dss; extern rtree_t chunks_rtree; extern size_t chunksize; extern size_t chunksize_mask; /* (chunksize - 1). */ extern size_t chunk_npages; extern const chunk_hooks_t chunk_hooks_default; chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena); chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks); bool chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node); void chunk_deregister(const void *chunk, const extent_node_t *node); void *chunk_alloc_base(size_t size); void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, bool *commit, bool dalloc_node); void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, bool *commit); void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn, bool committed); void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn, bool zeroed, bool committed); bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset, size_t length); bool chunk_boot(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE extent_node_t *chunk_lookup(const void *chunk, bool dependent); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_)) JEMALLOC_INLINE extent_node_t * chunk_lookup(const void *ptr, bool dependent) { return (rtree_get(&chunks_rtree, (uintptr_t)ptr, dependent)); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ #include "jemalloc/internal/chunk_dss.h" #include "jemalloc/internal/chunk_mmap.h"
3,196
31.622449
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/ckh.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct ckh_s ckh_t; typedef struct ckhc_s ckhc_t; /* Typedefs to allow easy function pointer passing. */ typedef void ckh_hash_t (const void *, size_t[2]); typedef bool ckh_keycomp_t (const void *, const void *); /* Maintain counters used to get an idea of performance. */ /* #define CKH_COUNT */ /* Print counter values in ckh_delete() (requires CKH_COUNT). */ /* #define CKH_VERBOSE */ /* * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit * one bucket per L1 cache line. */ #define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS /* Hash table cell. */ struct ckhc_s { const void *key; const void *data; }; struct ckh_s { #ifdef CKH_COUNT /* Counters used to get an idea of performance. */ uint64_t ngrows; uint64_t nshrinks; uint64_t nshrinkfails; uint64_t ninserts; uint64_t nrelocs; #endif /* Used for pseudo-random number generation. */ uint64_t prng_state; /* Total number of items. */ size_t count; /* * Minimum and current number of hash table buckets. There are * 2^LG_CKH_BUCKET_CELLS cells per bucket. */ unsigned lg_minbuckets; unsigned lg_curbuckets; /* Hash and comparison functions. */ ckh_hash_t *hash; ckh_keycomp_t *keycomp; /* Hash table with 2^lg_curbuckets buckets. */ ckhc_t *tab; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp); void ckh_delete(tsd_t *tsd, ckh_t *ckh); size_t ckh_count(ckh_t *ckh); bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data); bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, void **data); bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data); void ckh_string_hash(const void *key, size_t r_hash[2]); bool ckh_string_keycomp(const void *k1, const void *k2); void ckh_pointer_hash(const void *key, size_t r_hash[2]); bool ckh_pointer_keycomp(const void *k1, const void *k2); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
2,648
29.448276
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/rb.h
/*- ******************************************************************************* * * cpp macro implementation of left-leaning 2-3 red-black trees. Parent * pointers are not used, and color bits are stored in the least significant * bit of right-child pointers (if RB_COMPACT is defined), thus making node * linkage as compact as is possible for red-black trees. * * Usage: * * #include <stdint.h> * #include <stdbool.h> * #define NDEBUG // (Optional, see assert(3).) * #include <assert.h> * #define RB_COMPACT // (Optional, embed color bits in right-child pointers.) * #include <rb.h> * ... * ******************************************************************************* */ #ifndef RB_H_ #define RB_H_ #ifdef RB_COMPACT /* Node structure. */ #define rb_node(a_type) \ struct { \ a_type *rbn_left; \ a_type *rbn_right_red; \ } #else #define rb_node(a_type) \ struct { \ a_type *rbn_left; \ a_type *rbn_right; \ bool rbn_red; \ } #endif /* Root structure. */ #define rb_tree(a_type) \ struct { \ a_type *rbt_root; \ } /* Left accessors. */ #define rbtn_left_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_left) #define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ (a_node)->a_field.rbn_left = a_left; \ } while (0) #ifdef RB_COMPACT /* Right accessors. */ #define rbtn_right_get(a_type, a_field, a_node) \ ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \ & ((ssize_t)-2))) #define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \ | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \ } while (0) /* Color accessors. */ #define rbtn_red_get(a_type, a_field, a_node) \ ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \ & ((size_t)1))) #define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \ | ((ssize_t)a_red)); \ } while (0) #define rbtn_red_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \ (a_node)->a_field.rbn_right_red) | ((size_t)1)); \ } while (0) #define rbtn_black_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \ } while (0) /* Node initializer. */ #define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ /* Bookkeeping bit cannot be used by node pointer. */ \ assert(((uintptr_t)(a_node) & 0x1) == 0); \ rbtn_left_set(a_type, a_field, (a_node), NULL); \ rbtn_right_set(a_type, a_field, (a_node), NULL); \ rbtn_red_set(a_type, a_field, (a_node)); \ } while (0) #else /* Right accessors. */ #define rbtn_right_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_right) #define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ (a_node)->a_field.rbn_right = a_right; \ } while (0) /* Color accessors. */ #define rbtn_red_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_red) #define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ (a_node)->a_field.rbn_red = (a_red); \ } while (0) #define rbtn_red_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_red = true; \ } while (0) #define rbtn_black_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_red = false; \ } while (0) /* Node initializer. */ #define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ rbtn_left_set(a_type, a_field, (a_node), NULL); \ rbtn_right_set(a_type, a_field, (a_node), NULL); \ rbtn_red_set(a_type, a_field, (a_node)); \ } while (0) #endif /* Tree initializer. */ #define rb_new(a_type, a_field, a_rbt) do { \ (a_rbt)->rbt_root = NULL; \ } while (0) /* Internal utility macros. */ #define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ (r_node) = (a_root); \ if ((r_node) != NULL) { \ for (; \ rbtn_left_get(a_type, a_field, (r_node)) != NULL; \ (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \ } \ } \ } while (0) #define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ (r_node) = (a_root); \ if ((r_node) != NULL) { \ for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \ (r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \ } \ } \ } while (0) #define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \ rbtn_right_set(a_type, a_field, (a_node), \ rbtn_left_get(a_type, a_field, (r_node))); \ rbtn_left_set(a_type, a_field, (r_node), (a_node)); \ } while (0) #define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \ rbtn_left_set(a_type, a_field, (a_node), \ rbtn_right_get(a_type, a_field, (r_node))); \ rbtn_right_set(a_type, a_field, (r_node), (a_node)); \ } while (0) /* * The rb_proto() macro generates function prototypes that correspond to the * functions generated by an equivalently parameterized call to rb_gen(). */ #define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ a_attr void \ a_prefix##new(a_rbt_type *rbtree); \ a_attr bool \ a_prefix##empty(a_rbt_type *rbtree); \ a_attr a_type * \ a_prefix##first(a_rbt_type *rbtree); \ a_attr a_type * \ a_prefix##last(a_rbt_type *rbtree); \ a_attr a_type * \ a_prefix##next(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ a_prefix##prev(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ a_prefix##search(a_rbt_type *rbtree, const a_type *key); \ a_attr a_type * \ a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \ a_attr a_type * \ a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \ a_attr void \ a_prefix##insert(a_rbt_type *rbtree, a_type *node); \ a_attr void \ a_prefix##remove(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ a_rbt_type *, a_type *, void *), void *arg); \ a_attr a_type * \ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \ a_attr void \ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ void *arg); /* * The rb_gen() macro generates a type-specific red-black tree implementation, * based on the above cpp macros. * * Arguments: * * a_attr : Function attribute for generated functions (ex: static). * a_prefix : Prefix for generated functions (ex: ex_). * a_rb_type : Type for red-black tree data structure (ex: ex_t). * a_type : Type for red-black tree node data structure (ex: ex_node_t). * a_field : Name of red-black tree node linkage (ex: ex_link). * a_cmp : Node comparison function name, with the following prototype: * int (a_cmp *)(a_type *a_node, a_type *a_other); * ^^^^^^ * or a_key * Interpretation of comparison function return values: * -1 : a_node < a_other * 0 : a_node == a_other * 1 : a_node > a_other * In all cases, the a_node or a_key macro argument is the first * argument to the comparison function, which makes it possible * to write comparison functions that treat the first argument * specially. * * Assuming the following setup: * * typedef struct ex_node_s ex_node_t; * struct ex_node_s { * rb_node(ex_node_t) ex_link; * }; * typedef rb_tree(ex_node_t) ex_t; * rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp) * * The following API is generated: * * static void * ex_new(ex_t *tree); * Description: Initialize a red-black tree structure. * Args: * tree: Pointer to an uninitialized red-black tree object. * * static bool * ex_empty(ex_t *tree); * Description: Determine whether tree is empty. * Args: * tree: Pointer to an initialized red-black tree object. * Ret: True if tree is empty, false otherwise. * * static ex_node_t * * ex_first(ex_t *tree); * static ex_node_t * * ex_last(ex_t *tree); * Description: Get the first/last node in tree. * Args: * tree: Pointer to an initialized red-black tree object. * Ret: First/last node in tree, or NULL if tree is empty. * * static ex_node_t * * ex_next(ex_t *tree, ex_node_t *node); * static ex_node_t * * ex_prev(ex_t *tree, ex_node_t *node); * Description: Get node's successor/predecessor. * Args: * tree: Pointer to an initialized red-black tree object. * node: A node in tree. * Ret: node's successor/predecessor in tree, or NULL if node is * last/first. * * static ex_node_t * * ex_search(ex_t *tree, const ex_node_t *key); * Description: Search for node that matches key. * Args: * tree: Pointer to an initialized red-black tree object. * key : Search key. * Ret: Node in tree that matches key, or NULL if no match. * * static ex_node_t * * ex_nsearch(ex_t *tree, const ex_node_t *key); * static ex_node_t * * ex_psearch(ex_t *tree, const ex_node_t *key); * Description: Search for node that matches key. If no match is found, * return what would be key's successor/predecessor, were * key in tree. * Args: * tree: Pointer to an initialized red-black tree object. * key : Search key. * Ret: Node in tree that matches key, or if no match, hypothetical node's * successor/predecessor (NULL if no successor/predecessor). * * static void * ex_insert(ex_t *tree, ex_node_t *node); * Description: Insert node into tree. * Args: * tree: Pointer to an initialized red-black tree object. * node: Node to be inserted into tree. * * static void * ex_remove(ex_t *tree, ex_node_t *node); * Description: Remove node from tree. * Args: * tree: Pointer to an initialized red-black tree object. * node: Node in tree to be removed. * * static ex_node_t * * ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *, * ex_node_t *, void *), void *arg); * static ex_node_t * * ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *, * ex_node_t *, void *), void *arg); * Description: Iterate forward/backward over tree, starting at node. If * tree is modified, iteration must be immediately * terminated by the callback function that causes the * modification. * Args: * tree : Pointer to an initialized red-black tree object. * start: Node at which to start iteration, or NULL to start at * first/last node. * cb : Callback function, which is called for each node during * iteration. Under normal circumstances the callback function * should return NULL, which causes iteration to continue. If a * callback function returns non-NULL, iteration is immediately * terminated and the non-NULL return value is returned by the * iterator. This is useful for re-starting iteration after * modifying tree. * arg : Opaque pointer passed to cb(). * Ret: NULL if iteration completed, or the non-NULL callback return value * that caused termination of the iteration. * * static void * ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg); * Description: Iterate over the tree with post-order traversal, remove * each node, and run the callback if non-null. This is * used for destroying a tree without paying the cost to * rebalance it. The tree must not be otherwise altered * during traversal. * Args: * tree: Pointer to an initialized red-black tree object. * cb : Callback function, which, if non-null, is called for each node * during iteration. There is no way to stop iteration once it * has begun. * arg : Opaque pointer passed to cb(). */ #define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ a_attr void \ a_prefix##new(a_rbt_type *rbtree) { \ rb_new(a_type, a_field, rbtree); \ } \ a_attr bool \ a_prefix##empty(a_rbt_type *rbtree) { \ return (rbtree->rbt_root == NULL); \ } \ a_attr a_type * \ a_prefix##first(a_rbt_type *rbtree) { \ a_type *ret; \ rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ return (ret); \ } \ a_attr a_type * \ a_prefix##last(a_rbt_type *rbtree) { \ a_type *ret; \ rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ return (ret); \ } \ a_attr a_type * \ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ a_type *ret; \ if (rbtn_right_get(a_type, a_field, node) != NULL) { \ rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \ a_field, node), ret); \ } else { \ a_type *tnode = rbtree->rbt_root; \ assert(tnode != NULL); \ ret = NULL; \ while (true) { \ int cmp = (a_cmp)(node, tnode); \ if (cmp < 0) { \ ret = tnode; \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ break; \ } \ assert(tnode != NULL); \ } \ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ a_type *ret; \ if (rbtn_left_get(a_type, a_field, node) != NULL) { \ rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \ a_field, node), ret); \ } else { \ a_type *tnode = rbtree->rbt_root; \ assert(tnode != NULL); \ ret = NULL; \ while (true) { \ int cmp = (a_cmp)(node, tnode); \ if (cmp < 0) { \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ ret = tnode; \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ break; \ } \ assert(tnode != NULL); \ } \ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \ a_type *ret; \ int cmp; \ ret = rbtree->rbt_root; \ while (ret != NULL \ && (cmp = (a_cmp)(key, ret)) != 0) { \ if (cmp < 0) { \ ret = rbtn_left_get(a_type, a_field, ret); \ } else { \ ret = rbtn_right_get(a_type, a_field, ret); \ } \ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \ a_type *ret; \ a_type *tnode = rbtree->rbt_root; \ ret = NULL; \ while (tnode != NULL) { \ int cmp = (a_cmp)(key, tnode); \ if (cmp < 0) { \ ret = tnode; \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ ret = tnode; \ break; \ } \ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \ a_type *ret; \ a_type *tnode = rbtree->rbt_root; \ ret = NULL; \ while (tnode != NULL) { \ int cmp = (a_cmp)(key, tnode); \ if (cmp < 0) { \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ ret = tnode; \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ ret = tnode; \ break; \ } \ } \ return (ret); \ } \ a_attr void \ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ struct { \ a_type *node; \ int cmp; \ } path[sizeof(void *) << 4], *pathp; \ rbt_node_new(a_type, a_field, rbtree, node); \ /* Wind. */ \ path->node = rbtree->rbt_root; \ for (pathp = path; pathp->node != NULL; pathp++) { \ int cmp = pathp->cmp = a_cmp(node, pathp->node); \ assert(cmp != 0); \ if (cmp < 0) { \ pathp[1].node = rbtn_left_get(a_type, a_field, \ pathp->node); \ } else { \ pathp[1].node = rbtn_right_get(a_type, a_field, \ pathp->node); \ } \ } \ pathp->node = node; \ /* Unwind. */ \ for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ a_type *cnode = pathp->node; \ if (pathp->cmp < 0) { \ a_type *left = pathp[1].node; \ rbtn_left_set(a_type, a_field, cnode, left); \ if (rbtn_red_get(a_type, a_field, left)) { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ leftleft)) { \ /* Fix up 4-node. */ \ a_type *tnode; \ rbtn_black_set(a_type, a_field, leftleft); \ rbtn_rotate_right(a_type, a_field, cnode, tnode); \ cnode = tnode; \ } \ } else { \ return; \ } \ } else { \ a_type *right = pathp[1].node; \ rbtn_right_set(a_type, a_field, cnode, right); \ if (rbtn_red_get(a_type, a_field, right)) { \ a_type *left = rbtn_left_get(a_type, a_field, cnode); \ if (left != NULL && rbtn_red_get(a_type, a_field, \ left)) { \ /* Split 4-node. */ \ rbtn_black_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, right); \ rbtn_red_set(a_type, a_field, cnode); \ } else { \ /* Lean left. */ \ a_type *tnode; \ bool tred = rbtn_red_get(a_type, a_field, cnode); \ rbtn_rotate_left(a_type, a_field, cnode, tnode); \ rbtn_color_set(a_type, a_field, tnode, tred); \ rbtn_red_set(a_type, a_field, cnode); \ cnode = tnode; \ } \ } else { \ return; \ } \ } \ pathp->node = cnode; \ } \ /* Set root, and make it black. */ \ rbtree->rbt_root = path->node; \ rbtn_black_set(a_type, a_field, rbtree->rbt_root); \ } \ a_attr void \ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ struct { \ a_type *node; \ int cmp; \ } *pathp, *nodep, path[sizeof(void *) << 4]; \ /* Wind. */ \ nodep = NULL; /* Silence compiler warning. */ \ path->node = rbtree->rbt_root; \ for (pathp = path; pathp->node != NULL; pathp++) { \ int cmp = pathp->cmp = a_cmp(node, pathp->node); \ if (cmp < 0) { \ pathp[1].node = rbtn_left_get(a_type, a_field, \ pathp->node); \ } else { \ pathp[1].node = rbtn_right_get(a_type, a_field, \ pathp->node); \ if (cmp == 0) { \ /* Find node's successor, in preparation for swap. */ \ pathp->cmp = 1; \ nodep = pathp; \ for (pathp++; pathp->node != NULL; \ pathp++) { \ pathp->cmp = -1; \ pathp[1].node = rbtn_left_get(a_type, a_field, \ pathp->node); \ } \ break; \ } \ } \ } \ assert(nodep->node == node); \ pathp--; \ if (pathp->node != node) { \ /* Swap node with its successor. */ \ bool tred = rbtn_red_get(a_type, a_field, pathp->node); \ rbtn_color_set(a_type, a_field, pathp->node, \ rbtn_red_get(a_type, a_field, node)); \ rbtn_left_set(a_type, a_field, pathp->node, \ rbtn_left_get(a_type, a_field, node)); \ /* If node's successor is its right child, the following code */\ /* will do the wrong thing for the right child pointer. */\ /* However, it doesn't matter, because the pointer will be */\ /* properly set when the successor is pruned. */\ rbtn_right_set(a_type, a_field, pathp->node, \ rbtn_right_get(a_type, a_field, node)); \ rbtn_color_set(a_type, a_field, node, tred); \ /* The pruned leaf node's child pointers are never accessed */\ /* again, so don't bother setting them to nil. */\ nodep->node = pathp->node; \ pathp->node = node; \ if (nodep == path) { \ rbtree->rbt_root = nodep->node; \ } else { \ if (nodep[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, nodep[-1].node, \ nodep->node); \ } else { \ rbtn_right_set(a_type, a_field, nodep[-1].node, \ nodep->node); \ } \ } \ } else { \ a_type *left = rbtn_left_get(a_type, a_field, node); \ if (left != NULL) { \ /* node has no successor, but it has a left child. */\ /* Splice node out, without losing the left child. */\ assert(!rbtn_red_get(a_type, a_field, node)); \ assert(rbtn_red_get(a_type, a_field, left)); \ rbtn_black_set(a_type, a_field, left); \ if (pathp == path) { \ rbtree->rbt_root = left; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ left); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ left); \ } \ } \ return; \ } else if (pathp == path) { \ /* The tree only contained one node. */ \ rbtree->rbt_root = NULL; \ return; \ } \ } \ if (rbtn_red_get(a_type, a_field, pathp->node)) { \ /* Prune red node, which requires no fixup. */ \ assert(pathp[-1].cmp < 0); \ rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \ return; \ } \ /* The node to be pruned is black, so unwind until balance is */\ /* restored. */\ pathp->node = NULL; \ for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ assert(pathp->cmp != 0); \ if (pathp->cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp->node, \ pathp[1].node); \ if (rbtn_red_get(a_type, a_field, pathp->node)) { \ a_type *right = rbtn_right_get(a_type, a_field, \ pathp->node); \ a_type *rightleft = rbtn_left_get(a_type, a_field, \ right); \ a_type *tnode; \ if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ rightleft)) { \ /* In the following diagrams, ||, //, and \\ */\ /* indicate the path to the removed node. */\ /* */\ /* || */\ /* pathp(r) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (r) */\ /* */\ rbtn_black_set(a_type, a_field, pathp->node); \ rbtn_rotate_right(a_type, a_field, right, tnode); \ rbtn_right_set(a_type, a_field, pathp->node, tnode);\ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ } else { \ /* || */\ /* pathp(r) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (b) */\ /* */\ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ } \ /* Balance restored, but rotation modified subtree */\ /* root. */\ assert((uintptr_t)pathp > (uintptr_t)path); \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ tnode); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ tnode); \ } \ return; \ } else { \ a_type *right = rbtn_right_get(a_type, a_field, \ pathp->node); \ a_type *rightleft = rbtn_left_get(a_type, a_field, \ right); \ if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ rightleft)) { \ /* || */\ /* pathp(b) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (r) */\ a_type *tnode; \ rbtn_black_set(a_type, a_field, rightleft); \ rbtn_rotate_right(a_type, a_field, right, tnode); \ rbtn_right_set(a_type, a_field, pathp->node, tnode);\ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ /* Balance restored, but rotation modified */\ /* subtree root, which may actually be the tree */\ /* root. */\ if (pathp == path) { \ /* Set root. */ \ rbtree->rbt_root = tnode; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, \ pathp[-1].node, tnode); \ } else { \ rbtn_right_set(a_type, a_field, \ pathp[-1].node, tnode); \ } \ } \ return; \ } else { \ /* || */\ /* pathp(b) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (b) */\ a_type *tnode; \ rbtn_red_set(a_type, a_field, pathp->node); \ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ pathp->node = tnode; \ } \ } \ } else { \ a_type *left; \ rbtn_right_set(a_type, a_field, pathp->node, \ pathp[1].node); \ left = rbtn_left_get(a_type, a_field, pathp->node); \ if (rbtn_red_get(a_type, a_field, left)) { \ a_type *tnode; \ a_type *leftright = rbtn_right_get(a_type, a_field, \ left); \ a_type *leftrightleft = rbtn_left_get(a_type, a_field, \ leftright); \ if (leftrightleft != NULL && rbtn_red_get(a_type, \ a_field, leftrightleft)) { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (r) (b) */\ /* \ */\ /* (b) */\ /* / */\ /* (r) */\ a_type *unode; \ rbtn_black_set(a_type, a_field, leftrightleft); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ unode); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ rbtn_right_set(a_type, a_field, unode, tnode); \ rbtn_rotate_left(a_type, a_field, unode, tnode); \ } else { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (r) (b) */\ /* \ */\ /* (b) */\ /* / */\ /* (b) */\ assert(leftright != NULL); \ rbtn_red_set(a_type, a_field, leftright); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ rbtn_black_set(a_type, a_field, tnode); \ } \ /* Balance restored, but rotation modified subtree */\ /* root, which may actually be the tree root. */\ if (pathp == path) { \ /* Set root. */ \ rbtree->rbt_root = tnode; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ tnode); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ tnode); \ } \ } \ return; \ } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ leftleft)) { \ /* || */\ /* pathp(r) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (r) */\ a_type *tnode; \ rbtn_black_set(a_type, a_field, pathp->node); \ rbtn_red_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, leftleft); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ /* Balance restored, but rotation modified */\ /* subtree root. */\ assert((uintptr_t)pathp > (uintptr_t)path); \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ tnode); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ tnode); \ } \ return; \ } else { \ /* || */\ /* pathp(r) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (b) */\ rbtn_red_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, pathp->node); \ /* Balance restored. */ \ return; \ } \ } else { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ leftleft)) { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (r) */\ a_type *tnode; \ rbtn_black_set(a_type, a_field, leftleft); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ /* Balance restored, but rotation modified */\ /* subtree root, which may actually be the tree */\ /* root. */\ if (pathp == path) { \ /* Set root. */ \ rbtree->rbt_root = tnode; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, \ pathp[-1].node, tnode); \ } else { \ rbtn_right_set(a_type, a_field, \ pathp[-1].node, tnode); \ } \ } \ return; \ } else { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (b) */\ rbtn_red_set(a_type, a_field, left); \ } \ } \ } \ } \ /* Set root. */ \ rbtree->rbt_root = path->node; \ assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \ } \ a_attr a_type * \ a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ if (node == NULL) { \ return (NULL); \ } else { \ a_type *ret; \ if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \ a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \ arg)) != NULL) { \ return (ret); \ } \ return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ a_field, node), cb, arg)); \ } \ } \ a_attr a_type * \ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ int cmp = a_cmp(start, node); \ if (cmp < 0) { \ a_type *ret; \ if ((ret = a_prefix##iter_start(rbtree, start, \ rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \ (ret = cb(rbtree, node, arg)) != NULL) { \ return (ret); \ } \ return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ a_field, node), cb, arg)); \ } else if (cmp > 0) { \ return (a_prefix##iter_start(rbtree, start, \ rbtn_right_get(a_type, a_field, node), cb, arg)); \ } else { \ a_type *ret; \ if ((ret = cb(rbtree, node, arg)) != NULL) { \ return (ret); \ } \ return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ a_field, node), cb, arg)); \ } \ } \ a_attr a_type * \ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ a_rbt_type *, a_type *, void *), void *arg) { \ a_type *ret; \ if (start != NULL) { \ ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \ cb, arg); \ } else { \ ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ if (node == NULL) { \ return (NULL); \ } else { \ a_type *ret; \ if ((ret = a_prefix##reverse_iter_recurse(rbtree, \ rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ (ret = cb(rbtree, node, arg)) != NULL) { \ return (ret); \ } \ return (a_prefix##reverse_iter_recurse(rbtree, \ rbtn_left_get(a_type, a_field, node), cb, arg)); \ } \ } \ a_attr a_type * \ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \ a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ void *arg) { \ int cmp = a_cmp(start, node); \ if (cmp > 0) { \ a_type *ret; \ if ((ret = a_prefix##reverse_iter_start(rbtree, start, \ rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ (ret = cb(rbtree, node, arg)) != NULL) { \ return (ret); \ } \ return (a_prefix##reverse_iter_recurse(rbtree, \ rbtn_left_get(a_type, a_field, node), cb, arg)); \ } else if (cmp < 0) { \ return (a_prefix##reverse_iter_start(rbtree, start, \ rbtn_left_get(a_type, a_field, node), cb, arg)); \ } else { \ a_type *ret; \ if ((ret = cb(rbtree, node, arg)) != NULL) { \ return (ret); \ } \ return (a_prefix##reverse_iter_recurse(rbtree, \ rbtn_left_get(a_type, a_field, node), cb, arg)); \ } \ } \ a_attr a_type * \ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ a_type *ret; \ if (start != NULL) { \ ret = a_prefix##reverse_iter_start(rbtree, start, \ rbtree->rbt_root, cb, arg); \ } else { \ ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \ cb, arg); \ } \ return (ret); \ } \ a_attr void \ a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \ a_type *, void *), void *arg) { \ if (node == NULL) { \ return; \ } \ a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \ node), cb, arg); \ rbtn_left_set(a_type, a_field, (node), NULL); \ a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \ node), cb, arg); \ rbtn_right_set(a_type, a_field, (node), NULL); \ if (cb) { \ cb(node, arg); \ } \ } \ a_attr void \ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ void *arg) { \ a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \ rbtree->rbt_root = NULL; \ } #endif /* RB_H_ */
38,311
37.159363
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/rtree.h
/* * This radix tree implementation is tailored to the singular purpose of * associating metadata with chunks that are currently owned by jemalloc. * ******************************************************************************* */ #ifdef JEMALLOC_H_TYPES typedef struct rtree_node_elm_s rtree_node_elm_t; typedef struct rtree_level_s rtree_level_t; typedef struct rtree_s rtree_t; /* * RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the * machine address width. */ #define LG_RTREE_BITS_PER_LEVEL 4 #define RTREE_BITS_PER_LEVEL (1U << LG_RTREE_BITS_PER_LEVEL) /* Maximum rtree height. */ #define RTREE_HEIGHT_MAX \ ((1U << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL) /* Used for two-stage lock-free node initialization. */ #define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1) /* * The node allocation callback function's argument is the number of contiguous * rtree_node_elm_t structures to allocate, and the resulting memory must be * zeroed. */ typedef rtree_node_elm_t *(rtree_node_alloc_t)(size_t); typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *); #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct rtree_node_elm_s { union { void *pun; rtree_node_elm_t *child; extent_node_t *val; }; }; struct rtree_level_s { /* * A non-NULL subtree points to a subtree rooted along the hypothetical * path to the leaf node corresponding to key 0. Depending on what keys * have been used to store to the tree, an arbitrary combination of * subtree pointers may remain NULL. * * Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4. * This results in a 3-level tree, and the leftmost leaf can be directly * accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding * 0x00000000) can be accessed via subtrees[1], and the remainder of the * tree can be accessed via subtrees[0]. * * levels[0] : [<unused> | 0x0001******** | 0x0002******** | ...] * * levels[1] : [<unused> | 0x00000001**** | 0x00000002**** | ... ] * * levels[2] : [val(0x000000000000) | val(0x000000000001) | ...] * * This has practical implications on x64, which currently uses only the * lower 47 bits of virtual address space in userland, thus leaving * subtrees[0] unused and avoiding a level of tree traversal. */ union { void *subtree_pun; rtree_node_elm_t *subtree; }; /* Number of key bits distinguished by this level. */ unsigned bits; /* * Cumulative number of key bits distinguished by traversing to * corresponding tree level. */ unsigned cumbits; }; struct rtree_s { rtree_node_alloc_t *alloc; rtree_node_dalloc_t *dalloc; unsigned height; /* * Precomputed table used to convert from the number of leading 0 key * bits to which subtree level to start at. */ unsigned start_level[RTREE_HEIGHT_MAX]; rtree_level_t levels[RTREE_HEIGHT_MAX]; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc, rtree_node_dalloc_t *dalloc); void rtree_delete(rtree_t *rtree); rtree_node_elm_t *rtree_subtree_read_hard(rtree_t *rtree, unsigned level); rtree_node_elm_t *rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE unsigned rtree_start_level(rtree_t *rtree, uintptr_t key); uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level); bool rtree_node_valid(rtree_node_elm_t *node); rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm, bool dependent); rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level, bool dependent); extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent); void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val); rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent); rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent); extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent); bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_)) JEMALLOC_ALWAYS_INLINE unsigned rtree_start_level(rtree_t *rtree, uintptr_t key) { unsigned start_level; if (unlikely(key == 0)) return (rtree->height - 1); start_level = rtree->start_level[lg_floor(key) >> LG_RTREE_BITS_PER_LEVEL]; assert(start_level < rtree->height); return (start_level); } JEMALLOC_ALWAYS_INLINE uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level) { return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - rtree->levels[level].cumbits)) & ((ZU(1) << rtree->levels[level].bits) - 1)); } JEMALLOC_ALWAYS_INLINE bool rtree_node_valid(rtree_node_elm_t *node) { return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING); } JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * rtree_child_tryread(rtree_node_elm_t *elm, bool dependent) { rtree_node_elm_t *child; /* Double-checked read (first read may be stale. */ child = elm->child; if (!dependent && !rtree_node_valid(child)) child = atomic_read_p(&elm->pun); assert(!dependent || child != NULL); return (child); } JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level, bool dependent) { rtree_node_elm_t *child; child = rtree_child_tryread(elm, dependent); if (!dependent && unlikely(!rtree_node_valid(child))) child = rtree_child_read_hard(rtree, elm, level); assert(!dependent || child != NULL); return (child); } JEMALLOC_ALWAYS_INLINE extent_node_t * rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent) { if (dependent) { /* * Reading a val on behalf of a pointer to a valid allocation is * guaranteed to be a clean read even without synchronization, * because the rtree update became visible in memory before the * pointer came into existence. */ return (elm->val); } else { /* * An arbitrary read, e.g. on behalf of ivsalloc(), may not be * dependent on a previous rtree write, which means a stale read * could result if synchronization were omitted here. */ return (atomic_read_p(&elm->pun)); } } JEMALLOC_INLINE void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val) { atomic_write_p(&elm->pun, val); } JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent) { rtree_node_elm_t *subtree; /* Double-checked read (first read may be stale. */ subtree = rtree->levels[level].subtree; if (!dependent && unlikely(!rtree_node_valid(subtree))) subtree = atomic_read_p(&rtree->levels[level].subtree_pun); assert(!dependent || subtree != NULL); return (subtree); } JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent) { rtree_node_elm_t *subtree; subtree = rtree_subtree_tryread(rtree, level, dependent); if (!dependent && unlikely(!rtree_node_valid(subtree))) subtree = rtree_subtree_read_hard(rtree, level); assert(!dependent || subtree != NULL); return (subtree); } JEMALLOC_ALWAYS_INLINE extent_node_t * rtree_get(rtree_t *rtree, uintptr_t key, bool dependent) { uintptr_t subkey; unsigned start_level; rtree_node_elm_t *node; start_level = rtree_start_level(rtree, key); node = rtree_subtree_tryread(rtree, start_level, dependent); #define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height) switch (start_level + RTREE_GET_BIAS) { #define RTREE_GET_SUBTREE(level) \ case level: \ assert(level < (RTREE_HEIGHT_MAX-1)); \ if (!dependent && unlikely(!rtree_node_valid(node))) \ return (NULL); \ subkey = rtree_subkey(rtree, key, level - \ RTREE_GET_BIAS); \ node = rtree_child_tryread(&node[subkey], dependent); \ /* Fall through. */ #define RTREE_GET_LEAF(level) \ case level: \ assert(level == (RTREE_HEIGHT_MAX-1)); \ if (!dependent && unlikely(!rtree_node_valid(node))) \ return (NULL); \ subkey = rtree_subkey(rtree, key, level - \ RTREE_GET_BIAS); \ /* \ * node is a leaf, so it contains values rather than \ * child pointers. \ */ \ return (rtree_val_read(rtree, &node[subkey], \ dependent)); #if RTREE_HEIGHT_MAX > 1 RTREE_GET_SUBTREE(0) #endif #if RTREE_HEIGHT_MAX > 2 RTREE_GET_SUBTREE(1) #endif #if RTREE_HEIGHT_MAX > 3 RTREE_GET_SUBTREE(2) #endif #if RTREE_HEIGHT_MAX > 4 RTREE_GET_SUBTREE(3) #endif #if RTREE_HEIGHT_MAX > 5 RTREE_GET_SUBTREE(4) #endif #if RTREE_HEIGHT_MAX > 6 RTREE_GET_SUBTREE(5) #endif #if RTREE_HEIGHT_MAX > 7 RTREE_GET_SUBTREE(6) #endif #if RTREE_HEIGHT_MAX > 8 RTREE_GET_SUBTREE(7) #endif #if RTREE_HEIGHT_MAX > 9 RTREE_GET_SUBTREE(8) #endif #if RTREE_HEIGHT_MAX > 10 RTREE_GET_SUBTREE(9) #endif #if RTREE_HEIGHT_MAX > 11 RTREE_GET_SUBTREE(10) #endif #if RTREE_HEIGHT_MAX > 12 RTREE_GET_SUBTREE(11) #endif #if RTREE_HEIGHT_MAX > 13 RTREE_GET_SUBTREE(12) #endif #if RTREE_HEIGHT_MAX > 14 RTREE_GET_SUBTREE(13) #endif #if RTREE_HEIGHT_MAX > 15 RTREE_GET_SUBTREE(14) #endif #if RTREE_HEIGHT_MAX > 16 # error Unsupported RTREE_HEIGHT_MAX #endif RTREE_GET_LEAF(RTREE_HEIGHT_MAX-1) #undef RTREE_GET_SUBTREE #undef RTREE_GET_LEAF default: not_reached(); } #undef RTREE_GET_BIAS not_reached(); } JEMALLOC_INLINE bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val) { uintptr_t subkey; unsigned i, start_level; rtree_node_elm_t *node, *child; start_level = rtree_start_level(rtree, key); node = rtree_subtree_read(rtree, start_level, false); if (node == NULL) return (true); for (i = start_level; /**/; i++, node = child) { subkey = rtree_subkey(rtree, key, i); if (i == rtree->height - 1) { /* * node is a leaf, so it contains values rather than * child pointers. */ rtree_val_write(rtree, &node[subkey], val); return (false); } assert(i + 1 < rtree->height); child = rtree_child_read(rtree, &node[subkey], i, false); if (child == NULL) return (true); } not_reached(); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
10,608
27.907357
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/size_classes.sh
#!/bin/sh # # Usage: size_classes.sh <lg_qarr> <lg_tmin> <lg_parr> <lg_g> # The following limits are chosen such that they cover all supported platforms. # Pointer sizes. lg_zarr="2 3" # Quanta. lg_qarr=$1 # The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)]. lg_tmin=$2 # Maximum lookup size. lg_kmax=12 # Page sizes. lg_parr=`echo $3 | tr ',' ' '` # Size class group size (number of size classes for each size doubling). lg_g=$4 pow2() { e=$1 pow2_result=1 while [ ${e} -gt 0 ] ; do pow2_result=$((${pow2_result} + ${pow2_result})) e=$((${e} - 1)) done } lg() { x=$1 lg_result=0 while [ ${x} -gt 1 ] ; do lg_result=$((${lg_result} + 1)) x=$((${x} / 2)) done } size_class() { index=$1 lg_grp=$2 lg_delta=$3 ndelta=$4 lg_p=$5 lg_kmax=$6 if [ ${lg_delta} -ge ${lg_p} ] ; then psz="yes" else pow2 ${lg_p}; p=${pow2_result} pow2 ${lg_grp}; grp=${pow2_result} pow2 ${lg_delta}; delta=${pow2_result} sz=$((${grp} + ${delta} * ${ndelta})) npgs=$((${sz} / ${p})) if [ ${sz} -eq $((${npgs} * ${p})) ] ; then psz="yes" else psz="no" fi fi lg ${ndelta}; lg_ndelta=${lg_result}; pow2 ${lg_ndelta} if [ ${pow2_result} -lt ${ndelta} ] ; then rem="yes" else rem="no" fi lg_size=${lg_grp} if [ $((${lg_delta} + ${lg_ndelta})) -eq ${lg_grp} ] ; then lg_size=$((${lg_grp} + 1)) else lg_size=${lg_grp} rem="yes" fi if [ ${lg_size} -lt $((${lg_p} + ${lg_g})) ] ; then bin="yes" else bin="no" fi if [ ${lg_size} -lt ${lg_kmax} \ -o ${lg_size} -eq ${lg_kmax} -a ${rem} = "no" ] ; then lg_delta_lookup=${lg_delta} else lg_delta_lookup="no" fi printf ' SC(%3d, %6d, %8d, %6d, %3s, %3s, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${psz} ${bin} ${lg_delta_lookup} # Defined upon return: # - psz ("yes" or "no") # - bin ("yes" or "no") # - lg_delta_lookup (${lg_delta} or "no") } sep_line() { echo " \\" } size_classes() { lg_z=$1 lg_q=$2 lg_t=$3 lg_p=$4 lg_g=$5 pow2 $((${lg_z} + 3)); ptr_bits=${pow2_result} pow2 ${lg_g}; g=${pow2_result} echo "#define SIZE_CLASSES \\" echo " /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \\" ntbins=0 nlbins=0 lg_tiny_maxclass='"NA"' nbins=0 npsizes=0 # Tiny size classes. ndelta=0 index=0 lg_grp=${lg_t} lg_delta=${lg_grp} while [ ${lg_grp} -lt ${lg_q} ] ; do size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} if [ ${lg_delta_lookup} != "no" ] ; then nlbins=$((${index} + 1)) fi if [ ${psz} = "yes" ] ; then npsizes=$((${npsizes} + 1)) fi if [ ${bin} != "no" ] ; then nbins=$((${index} + 1)) fi ntbins=$((${ntbins} + 1)) lg_tiny_maxclass=${lg_grp} # Final written value is correct. index=$((${index} + 1)) lg_delta=${lg_grp} lg_grp=$((${lg_grp} + 1)) done # First non-tiny group. if [ ${ntbins} -gt 0 ] ; then sep_line # The first size class has an unusual encoding, because the size has to be # split between grp and delta*ndelta. lg_grp=$((${lg_grp} - 1)) ndelta=1 size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} index=$((${index} + 1)) lg_grp=$((${lg_grp} + 1)) lg_delta=$((${lg_delta} + 1)) if [ ${psz} = "yes" ] ; then npsizes=$((${npsizes} + 1)) fi fi while [ ${ndelta} -lt ${g} ] ; do size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} index=$((${index} + 1)) ndelta=$((${ndelta} + 1)) if [ ${psz} = "yes" ] ; then npsizes=$((${npsizes} + 1)) fi done # All remaining groups. lg_grp=$((${lg_grp} + ${lg_g})) while [ ${lg_grp} -lt $((${ptr_bits} - 1)) ] ; do sep_line ndelta=1 if [ ${lg_grp} -eq $((${ptr_bits} - 2)) ] ; then ndelta_limit=$((${g} - 1)) else ndelta_limit=${g} fi while [ ${ndelta} -le ${ndelta_limit} ] ; do size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} if [ ${lg_delta_lookup} != "no" ] ; then nlbins=$((${index} + 1)) # Final written value is correct: lookup_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" fi if [ ${psz} = "yes" ] ; then npsizes=$((${npsizes} + 1)) fi if [ ${bin} != "no" ] ; then nbins=$((${index} + 1)) # Final written value is correct: small_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" if [ ${lg_g} -gt 0 ] ; then lg_large_minclass=$((${lg_grp} + 1)) else lg_large_minclass=$((${lg_grp} + 2)) fi fi # Final written value is correct: huge_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" index=$((${index} + 1)) ndelta=$((${ndelta} + 1)) done lg_grp=$((${lg_grp} + 1)) lg_delta=$((${lg_delta} + 1)) done echo nsizes=${index} # Defined upon completion: # - ntbins # - nlbins # - nbins # - nsizes # - npsizes # - lg_tiny_maxclass # - lookup_maxclass # - small_maxclass # - lg_large_minclass # - huge_maxclass } cat <<EOF /* This file was automatically generated by size_classes.sh. */ /******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* * This header requires LG_SIZEOF_PTR, LG_TINY_MIN, LG_QUANTUM, and LG_PAGE to * be defined prior to inclusion, and it in turn defines: * * LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling. * SIZE_CLASSES: Complete table of SC(index, lg_grp, lg_delta, ndelta, psz, * bin, lg_delta_lookup) tuples. * index: Size class index. * lg_grp: Lg group base size (no deltas added). * lg_delta: Lg delta to previous size class. * ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta * psz: 'yes' if a multiple of the page size, 'no' otherwise. * bin: 'yes' if a small bin size class, 'no' otherwise. * lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no' * otherwise. * NTBINS: Number of tiny bins. * NLBINS: Number of bins supported by the lookup table. * NBINS: Number of small size class bins. * NSIZES: Number of size classes. * NPSIZES: Number of size classes that are a multiple of (1U << LG_PAGE). * LG_TINY_MAXCLASS: Lg of maximum tiny size class. * LOOKUP_MAXCLASS: Maximum size class included in lookup table. * SMALL_MAXCLASS: Maximum small size class. * LG_LARGE_MINCLASS: Lg of minimum large size class. * HUGE_MAXCLASS: Maximum (huge) size class. */ #define LG_SIZE_CLASS_GROUP ${lg_g} EOF for lg_z in ${lg_zarr} ; do for lg_q in ${lg_qarr} ; do lg_t=${lg_tmin} while [ ${lg_t} -le ${lg_q} ] ; do # Iterate through page sizes and compute how many bins there are. for lg_p in ${lg_parr} ; do echo "#if (LG_SIZEOF_PTR == ${lg_z} && LG_TINY_MIN == ${lg_t} && LG_QUANTUM == ${lg_q} && LG_PAGE == ${lg_p})" size_classes ${lg_z} ${lg_q} ${lg_t} ${lg_p} ${lg_g} echo "#define SIZE_CLASSES_DEFINED" echo "#define NTBINS ${ntbins}" echo "#define NLBINS ${nlbins}" echo "#define NBINS ${nbins}" echo "#define NSIZES ${nsizes}" echo "#define NPSIZES ${npsizes}" echo "#define LG_TINY_MAXCLASS ${lg_tiny_maxclass}" echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}" echo "#define SMALL_MAXCLASS ${small_maxclass}" echo "#define LG_LARGE_MINCLASS ${lg_large_minclass}" echo "#define HUGE_MAXCLASS ${huge_maxclass}" echo "#endif" echo done lg_t=$((${lg_t} + 1)) done done done cat <<EOF #ifndef SIZE_CLASSES_DEFINED # error "No size class definitions match configuration" #endif #undef SIZE_CLASSES_DEFINED /* * The size2index_tab lookup table uses uint8_t to encode each bin index, so we * cannot support more than 256 small size classes. Further constrain NBINS to * 255 since all small size classes, plus a "not small" size class must be * stored in 8 bits of arena_chunk_map_bits_t's bits field. */ #if (NBINS > 255) # error "Too many small size classes" #endif #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ EOF
8,909
26.931034
131
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/private_namespace.sh
#!/bin/sh for symbol in `cat $1` ; do echo "#define ${symbol} JEMALLOC_N(${symbol})" done
93
14.666667
48
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/stats.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct tcache_bin_stats_s tcache_bin_stats_t; typedef struct malloc_bin_stats_s malloc_bin_stats_t; typedef struct malloc_large_stats_s malloc_large_stats_t; typedef struct malloc_huge_stats_s malloc_huge_stats_t; typedef struct arena_stats_s arena_stats_t; typedef struct chunk_stats_s chunk_stats_t; #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct tcache_bin_stats_s { /* * Number of allocation requests that corresponded to the size of this * bin. */ uint64_t nrequests; }; struct malloc_bin_stats_s { /* * Total number of allocation/deallocation requests served directly by * the bin. Note that tcache may allocate an object, then recycle it * many times, resulting many increments to nrequests, but only one * each to nmalloc and ndalloc. */ uint64_t nmalloc; uint64_t ndalloc; /* * Number of allocation requests that correspond to the size of this * bin. This includes requests served by tcache, though tcache only * periodically merges into this counter. */ uint64_t nrequests; /* * Current number of regions of this size class, including regions * currently cached by tcache. */ size_t curregs; /* Number of tcache fills from this bin. */ uint64_t nfills; /* Number of tcache flushes to this bin. */ uint64_t nflushes; /* Total number of runs created for this bin's size class. */ uint64_t nruns; /* * Total number of runs reused by extracting them from the runs tree for * this bin's size class. */ uint64_t reruns; /* Current number of runs in this bin. */ size_t curruns; }; struct malloc_large_stats_s { /* * Total number of allocation/deallocation requests served directly by * the arena. Note that tcache may allocate an object, then recycle it * many times, resulting many increments to nrequests, but only one * each to nmalloc and ndalloc. */ uint64_t nmalloc; uint64_t ndalloc; /* * Number of allocation requests that correspond to this size class. * This includes requests served by tcache, though tcache only * periodically merges into this counter. */ uint64_t nrequests; /* * Current number of runs of this size class, including runs currently * cached by tcache. */ size_t curruns; }; struct malloc_huge_stats_s { /* * Total number of allocation/deallocation requests served directly by * the arena. */ uint64_t nmalloc; uint64_t ndalloc; /* Current number of (multi-)chunk allocations of this size class. */ size_t curhchunks; }; struct arena_stats_s { /* Number of bytes currently mapped. */ size_t mapped; /* * Number of bytes currently retained as a side effect of munmap() being * disabled/bypassed. Retained bytes are technically mapped (though * always decommitted or purged), but they are excluded from the mapped * statistic (above). */ size_t retained; /* * Total number of purge sweeps, total number of madvise calls made, * and total pages purged in order to keep dirty unused memory under * control. */ uint64_t npurge; uint64_t nmadvise; uint64_t purged; /* * Number of bytes currently mapped purely for metadata purposes, and * number of bytes currently allocated for internal metadata. */ size_t metadata_mapped; size_t metadata_allocated; /* Protected via atomic_*_z(). */ /* Per-size-category statistics. */ size_t allocated_large; uint64_t nmalloc_large; uint64_t ndalloc_large; uint64_t nrequests_large; size_t allocated_huge; uint64_t nmalloc_huge; uint64_t ndalloc_huge; /* One element for each large size class. */ malloc_large_stats_t *lstats; /* One element for each huge size class. */ malloc_huge_stats_t *hstats; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS extern bool opt_stats_print; extern size_t stats_cactive; void stats_print(void (*write)(void *, const char *), void *cbopaque, const char *opts); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE size_t stats_cactive_get(void); void stats_cactive_add(size_t size); void stats_cactive_sub(size_t size); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_)) JEMALLOC_INLINE size_t stats_cactive_get(void) { return (atomic_read_z(&stats_cactive)); } JEMALLOC_INLINE void stats_cactive_add(size_t size) { assert(size > 0); assert((size & chunksize_mask) == 0); atomic_add_z(&stats_cactive, size); } JEMALLOC_INLINE void stats_cactive_sub(size_t size) { assert(size > 0); assert((size & chunksize_mask) == 0); atomic_sub_z(&stats_cactive, size); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
5,028
24.39899
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/util.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #ifdef _WIN32 # ifdef _WIN64 # define FMT64_PREFIX "ll" # define FMTPTR_PREFIX "ll" # else # define FMT64_PREFIX "ll" # define FMTPTR_PREFIX "" # endif # define FMTd32 "d" # define FMTu32 "u" # define FMTx32 "x" # define FMTd64 FMT64_PREFIX "d" # define FMTu64 FMT64_PREFIX "u" # define FMTx64 FMT64_PREFIX "x" # define FMTdPTR FMTPTR_PREFIX "d" # define FMTuPTR FMTPTR_PREFIX "u" # define FMTxPTR FMTPTR_PREFIX "x" #else # include <inttypes.h> # define FMTd32 PRId32 # define FMTu32 PRIu32 # define FMTx32 PRIx32 # define FMTd64 PRId64 # define FMTu64 PRIu64 # define FMTx64 PRIx64 # define FMTdPTR PRIdPTR # define FMTuPTR PRIuPTR # define FMTxPTR PRIxPTR #endif /* Size of stack-allocated buffer passed to buferror(). */ #define BUFERROR_BUF 64 /* * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be * large enough for all possible uses within jemalloc. */ #define MALLOC_PRINTF_BUFSIZE 4096 /* Junk fill patterns. */ #ifndef JEMALLOC_ALLOC_JUNK # define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5) #endif #ifndef JEMALLOC_FREE_JUNK # define JEMALLOC_FREE_JUNK ((uint8_t)0x5a) #endif /* * Wrap a cpp argument that contains commas such that it isn't broken up into * multiple arguments. */ #define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ /* * Silence compiler warnings due to uninitialized values. This is used * wherever the compiler fails to recognize that the variable is never used * uninitialized. */ #ifdef JEMALLOC_CC_SILENCE # define JEMALLOC_CC_SILENCE_INIT(v) = v #else # define JEMALLOC_CC_SILENCE_INIT(v) #endif #ifdef __GNUC__ # define likely(x) __builtin_expect(!!(x), 1) # define unlikely(x) __builtin_expect(!!(x), 0) #else # define likely(x) !!(x) # define unlikely(x) !!(x) #endif #if !defined(JEMALLOC_INTERNAL_UNREACHABLE) # error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure #endif #define unreachable() JEMALLOC_INTERNAL_UNREACHABLE() #include "jemalloc/internal/assert.h" /* Use to assert a particular configuration, e.g., cassert(config_debug). */ #define cassert(c) do { \ if (unlikely(!(c))) \ not_reached(); \ } while (0) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS int buferror(int err, char *buf, size_t buflen); uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base); void malloc_write(const char *s); /* * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating * point math. */ size_t malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap); size_t malloc_snprintf(char *str, size_t size, const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, const char *format, va_list ap); void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque, const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE unsigned ffs_llu(unsigned long long bitmap); unsigned ffs_lu(unsigned long bitmap); unsigned ffs_u(unsigned bitmap); unsigned ffs_zu(size_t bitmap); unsigned ffs_u64(uint64_t bitmap); unsigned ffs_u32(uint32_t bitmap); uint64_t pow2_ceil_u64(uint64_t x); uint32_t pow2_ceil_u32(uint32_t x); size_t pow2_ceil_zu(size_t x); unsigned lg_floor(size_t x); void set_errno(int errnum); int get_errno(void); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_)) /* Sanity check. */ #if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \ || !defined(JEMALLOC_INTERNAL_FFS) # error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure #endif JEMALLOC_ALWAYS_INLINE unsigned ffs_llu(unsigned long long bitmap) { return (JEMALLOC_INTERNAL_FFSLL(bitmap)); } JEMALLOC_ALWAYS_INLINE unsigned ffs_lu(unsigned long bitmap) { return (JEMALLOC_INTERNAL_FFSL(bitmap)); } JEMALLOC_ALWAYS_INLINE unsigned ffs_u(unsigned bitmap) { return (JEMALLOC_INTERNAL_FFS(bitmap)); } JEMALLOC_ALWAYS_INLINE unsigned ffs_zu(size_t bitmap) { #if LG_SIZEOF_PTR == LG_SIZEOF_INT return (ffs_u(bitmap)); #elif LG_SIZEOF_PTR == LG_SIZEOF_LONG return (ffs_lu(bitmap)); #elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG return (ffs_llu(bitmap)); #else #error No implementation for size_t ffs() #endif } JEMALLOC_ALWAYS_INLINE unsigned ffs_u64(uint64_t bitmap) { #if LG_SIZEOF_LONG == 3 return (ffs_lu(bitmap)); #elif LG_SIZEOF_LONG_LONG == 3 return (ffs_llu(bitmap)); #else #error No implementation for 64-bit ffs() #endif } JEMALLOC_ALWAYS_INLINE unsigned ffs_u32(uint32_t bitmap) { #if LG_SIZEOF_INT == 2 return (ffs_u(bitmap)); #else #error No implementation for 32-bit ffs() #endif return (ffs_u(bitmap)); } JEMALLOC_INLINE uint64_t pow2_ceil_u64(uint64_t x) { x--; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; x |= x >> 32; x++; return (x); } JEMALLOC_INLINE uint32_t pow2_ceil_u32(uint32_t x) { x--; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; x++; return (x); } /* Compute the smallest power of 2 that is >= x. */ JEMALLOC_INLINE size_t pow2_ceil_zu(size_t x) { #if (LG_SIZEOF_PTR == 3) return (pow2_ceil_u64(x)); #else return (pow2_ceil_u32(x)); #endif } #if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) JEMALLOC_INLINE unsigned lg_floor(size_t x) { size_t ret; assert(x != 0); asm ("bsr %1, %0" : "=r"(ret) // Outputs. : "r"(x) // Inputs. ); assert(ret < UINT_MAX); return ((unsigned)ret); } #elif (defined(_MSC_VER)) JEMALLOC_INLINE unsigned lg_floor(size_t x) { unsigned long ret; assert(x != 0); #if (LG_SIZEOF_PTR == 3) _BitScanReverse64(&ret, x); #elif (LG_SIZEOF_PTR == 2) _BitScanReverse(&ret, x); #else # error "Unsupported type size for lg_floor()" #endif assert(ret < UINT_MAX); return ((unsigned)ret); } #elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ)) JEMALLOC_INLINE unsigned lg_floor(size_t x) { assert(x != 0); #if (LG_SIZEOF_PTR == LG_SIZEOF_INT) return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x)); #elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG) return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x)); #else # error "Unsupported type size for lg_floor()" #endif } #else JEMALLOC_INLINE unsigned lg_floor(size_t x) { assert(x != 0); x |= (x >> 1); x |= (x >> 2); x |= (x >> 4); x |= (x >> 8); x |= (x >> 16); #if (LG_SIZEOF_PTR == 3) x |= (x >> 32); #endif if (x == SIZE_T_MAX) return ((8 << LG_SIZEOF_PTR) - 1); x++; return (ffs_zu(x) - 2); } #endif /* Set error code. */ JEMALLOC_INLINE void set_errno(int errnum) { #ifdef _WIN32 SetLastError(errnum); #else errno = errnum; #endif } /* Get last error code. */ JEMALLOC_INLINE int get_errno(void) { #ifdef _WIN32 return (GetLastError()); #else return (errno); #endif } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
7,458
20.746356
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/tcache.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct tcache_bin_info_s tcache_bin_info_t; typedef struct tcache_bin_s tcache_bin_t; typedef struct tcache_s tcache_t; typedef struct tcaches_s tcaches_t; /* * tcache pointers close to NULL are used to encode state information that is * used for two purposes: preventing thread caching on a per thread basis and * cleaning up during thread shutdown. */ #define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1) #define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2) #define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3) #define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY /* * Absolute minimum number of cache slots for each small bin. */ #define TCACHE_NSLOTS_SMALL_MIN 20 /* * Absolute maximum number of cache slots for each small bin in the thread * cache. This is an additional constraint beyond that imposed as: twice the * number of regions per run for this size class. * * This constant must be an even number. */ #define TCACHE_NSLOTS_SMALL_MAX 200 /* Number of cache slots for large size classes. */ #define TCACHE_NSLOTS_LARGE 20 /* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */ #define LG_TCACHE_MAXCLASS_DEFAULT 15 /* * TCACHE_GC_SWEEP is the approximate number of allocation events between * full GC sweeps. Integer rounding may cause the actual number to be * slightly higher, since GC is performed incrementally. */ #define TCACHE_GC_SWEEP 8192 /* Number of tcache allocation/deallocation events between incremental GCs. */ #define TCACHE_GC_INCR \ ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1)) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS typedef enum { tcache_enabled_false = 0, /* Enable cast to/from bool. */ tcache_enabled_true = 1, tcache_enabled_default = 2 } tcache_enabled_t; /* * Read-only information associated with each element of tcache_t's tbins array * is stored separately, mainly to reduce memory usage. */ struct tcache_bin_info_s { unsigned ncached_max; /* Upper limit on ncached. */ }; struct tcache_bin_s { tcache_bin_stats_t tstats; int low_water; /* Min # cached since last GC. */ unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */ unsigned ncached; /* # of cached objects. */ /* * To make use of adjacent cacheline prefetch, the items in the avail * stack goes to higher address for newer allocations. avail points * just above the available space, which means that * avail[-ncached, ... -1] are available items and the lowest item will * be allocated first. */ void **avail; /* Stack of available objects. */ }; struct tcache_s { ql_elm(tcache_t) link; /* Used for aggregating stats. */ uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */ ticker_t gc_ticker; /* Drives incremental GC. */ szind_t next_gc_bin; /* Next bin to GC. */ tcache_bin_t tbins[1]; /* Dynamically sized. */ /* * The pointer stacks associated with tbins follow as a contiguous * array. During tcache initialization, the avail pointer in each * element of tbins is initialized to point to the proper offset within * this array. */ }; /* Linkage for list of available (previously used) explicit tcache IDs. */ struct tcaches_s { union { tcache_t *tcache; tcaches_t *next; }; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS extern bool opt_tcache; extern ssize_t opt_lg_tcache_max; extern tcache_bin_info_t *tcache_bin_info; /* * Number of tcache bins. There are NBINS small-object bins, plus 0 or more * large-object bins. */ extern unsigned nhbins; /* Maximum cached size class. */ extern size_t tcache_maxclass; /* * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are * completely disjoint from this data structure. tcaches starts off as a sparse * array, so it has no physical memory footprint until individual pages are * touched. This allows the entire array to be allocated the first time an * explicit tcache is created without a disproportionate impact on memory usage. */ extern tcaches_t *tcaches; size_t tcache_salloc(tsdn_t *tsdn, const void *ptr); void tcache_event_hard(tsd_t *tsd, tcache_t *tcache); void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, tcache_bin_t *tbin, szind_t binind, bool *tcache_success); void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, szind_t binind, unsigned rem); void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, unsigned rem, tcache_t *tcache); void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena, arena_t *newarena); tcache_t *tcache_get_hard(tsd_t *tsd); tcache_t *tcache_create(tsdn_t *tsdn, arena_t *arena); void tcache_cleanup(tsd_t *tsd); void tcache_enabled_cleanup(tsd_t *tsd); void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); bool tcaches_create(tsd_t *tsd, unsigned *r_ind); void tcaches_flush(tsd_t *tsd, unsigned ind); void tcaches_destroy(tsd_t *tsd, unsigned ind); bool tcache_boot(tsdn_t *tsdn); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE void tcache_event(tsd_t *tsd, tcache_t *tcache); void tcache_flush(void); bool tcache_enabled_get(void); tcache_t *tcache_get(tsd_t *tsd, bool create); void tcache_enabled_set(bool enabled); void *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success); void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, szind_t ind, bool zero, bool slow_path); void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, szind_t ind, bool zero, bool slow_path); void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, bool slow_path); void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size, bool slow_path); tcache_t *tcaches_get(tsd_t *tsd, unsigned ind); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_)) JEMALLOC_INLINE void tcache_flush(void) { tsd_t *tsd; cassert(config_tcache); tsd = tsd_fetch(); tcache_cleanup(tsd); } JEMALLOC_INLINE bool tcache_enabled_get(void) { tsd_t *tsd; tcache_enabled_t tcache_enabled; cassert(config_tcache); tsd = tsd_fetch(); tcache_enabled = tsd_tcache_enabled_get(tsd); if (tcache_enabled == tcache_enabled_default) { tcache_enabled = (tcache_enabled_t)opt_tcache; tsd_tcache_enabled_set(tsd, tcache_enabled); } return ((bool)tcache_enabled); } JEMALLOC_INLINE void tcache_enabled_set(bool enabled) { tsd_t *tsd; tcache_enabled_t tcache_enabled; cassert(config_tcache); tsd = tsd_fetch(); tcache_enabled = (tcache_enabled_t)enabled; tsd_tcache_enabled_set(tsd, tcache_enabled); if (!enabled) tcache_cleanup(tsd); } JEMALLOC_ALWAYS_INLINE tcache_t * tcache_get(tsd_t *tsd, bool create) { tcache_t *tcache; if (!config_tcache) return (NULL); tcache = tsd_tcache_get(tsd); if (!create) return (tcache); if (unlikely(tcache == NULL) && tsd_nominal(tsd)) { tcache = tcache_get_hard(tsd); tsd_tcache_set(tsd, tcache); } return (tcache); } JEMALLOC_ALWAYS_INLINE void tcache_event(tsd_t *tsd, tcache_t *tcache) { if (TCACHE_GC_INCR == 0) return; if (unlikely(ticker_tick(&tcache->gc_ticker))) tcache_event_hard(tsd, tcache); } JEMALLOC_ALWAYS_INLINE void * tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success) { void *ret; if (unlikely(tbin->ncached == 0)) { tbin->low_water = -1; *tcache_success = false; return (NULL); } /* * tcache_success (instead of ret) should be checked upon the return of * this function. We avoid checking (ret == NULL) because there is * never a null stored on the avail stack (which is unknown to the * compiler), and eagerly checking ret would cause pipeline stall * (waiting for the cacheline). */ *tcache_success = true; ret = *(tbin->avail - tbin->ncached); tbin->ncached--; if (unlikely((int)tbin->ncached < tbin->low_water)) tbin->low_water = tbin->ncached; return (ret); } JEMALLOC_ALWAYS_INLINE void * tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, szind_t binind, bool zero, bool slow_path) { void *ret; tcache_bin_t *tbin; bool tcache_success; size_t usize JEMALLOC_CC_SILENCE_INIT(0); assert(binind < NBINS); tbin = &tcache->tbins[binind]; ret = tcache_alloc_easy(tbin, &tcache_success); assert(tcache_success == (ret != NULL)); if (unlikely(!tcache_success)) { bool tcache_hard_success; arena = arena_choose(tsd, arena); if (unlikely(arena == NULL)) return (NULL); ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache, tbin, binind, &tcache_hard_success); if (tcache_hard_success == false) return (NULL); } assert(ret); /* * Only compute usize if required. The checks in the following if * statement are all static. */ if (config_prof || (slow_path && config_fill) || unlikely(zero)) { usize = index2size(binind); assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize); } if (likely(!zero)) { if (slow_path && config_fill) { if (unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], false); } else if (unlikely(opt_zero)) memset(ret, 0, usize); } } else { if (slow_path && config_fill && unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], true); } memset(ret, 0, usize); } if (config_stats) tbin->tstats.nrequests++; if (config_prof) tcache->prof_accumbytes += usize; tcache_event(tsd, tcache); return (ret); } JEMALLOC_ALWAYS_INLINE void * tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, szind_t binind, bool zero, bool slow_path) { void *ret; tcache_bin_t *tbin; bool tcache_success; assert(binind < nhbins); tbin = &tcache->tbins[binind]; ret = tcache_alloc_easy(tbin, &tcache_success); assert(tcache_success == (ret != NULL)); if (unlikely(!tcache_success)) { /* * Only allocate one large object at a time, because it's quite * expensive to create one and not use it. */ arena = arena_choose(tsd, arena); if (unlikely(arena == NULL)) return (NULL); ret = arena_malloc_large(tsd_tsdn(tsd), arena, binind, zero); if (ret == NULL) return (NULL); } else { size_t usize JEMALLOC_CC_SILENCE_INIT(0); /* Only compute usize on demand */ if (config_prof || (slow_path && config_fill) || unlikely(zero)) { usize = index2size(binind); assert(usize <= tcache_maxclass); } if (config_prof && usize == LARGE_MINCLASS) { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret); size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >> LG_PAGE); arena_mapbits_large_binind_set(chunk, pageind, BININD_INVALID); } if (likely(!zero)) { if (slow_path && config_fill) { if (unlikely(opt_junk_alloc)) { memset(ret, JEMALLOC_ALLOC_JUNK, usize); } else if (unlikely(opt_zero)) memset(ret, 0, usize); } } else memset(ret, 0, usize); if (config_stats) tbin->tstats.nrequests++; if (config_prof) tcache->prof_accumbytes += usize; } tcache_event(tsd, tcache); return (ret); } JEMALLOC_ALWAYS_INLINE void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, bool slow_path) { tcache_bin_t *tbin; tcache_bin_info_t *tbin_info; assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS); if (slow_path && config_fill && unlikely(opt_junk_free)) arena_dalloc_junk_small(ptr, &arena_bin_info[binind]); tbin = &tcache->tbins[binind]; tbin_info = &tcache_bin_info[binind]; if (unlikely(tbin->ncached == tbin_info->ncached_max)) { tcache_bin_flush_small(tsd, tcache, tbin, binind, (tbin_info->ncached_max >> 1)); } assert(tbin->ncached < tbin_info->ncached_max); tbin->ncached++; *(tbin->avail - tbin->ncached) = ptr; tcache_event(tsd, tcache); } JEMALLOC_ALWAYS_INLINE void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size, bool slow_path) { szind_t binind; tcache_bin_t *tbin; tcache_bin_info_t *tbin_info; assert((size & PAGE_MASK) == 0); assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS); assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass); binind = size2index(size); if (slow_path && config_fill && unlikely(opt_junk_free)) arena_dalloc_junk_large(ptr, size); tbin = &tcache->tbins[binind]; tbin_info = &tcache_bin_info[binind]; if (unlikely(tbin->ncached == tbin_info->ncached_max)) { tcache_bin_flush_large(tsd, tbin, binind, (tbin_info->ncached_max >> 1), tcache); } assert(tbin->ncached < tbin_info->ncached_max); tbin->ncached++; *(tbin->avail - tbin->ncached) = ptr; tcache_event(tsd, tcache); } JEMALLOC_ALWAYS_INLINE tcache_t * tcaches_get(tsd_t *tsd, unsigned ind) { tcaches_t *elm = &tcaches[ind]; if (unlikely(elm->tcache == NULL)) { elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd, NULL)); } return (elm->tcache); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
13,576
27.887234
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/base.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void *base_alloc(tsdn_t *tsdn, size_t size); void base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident, size_t *mapped); bool base_boot(void); void base_prefork(tsdn_t *tsdn); void base_postfork_parent(tsdn_t *tsdn); void base_postfork_child(tsdn_t *tsdn); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
911
34.076923
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/bitmap.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ #define LG_BITMAP_MAXBITS LG_RUN_MAXREGS #define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS) typedef struct bitmap_level_s bitmap_level_t; typedef struct bitmap_info_s bitmap_info_t; typedef unsigned long bitmap_t; #define LG_SIZEOF_BITMAP LG_SIZEOF_LONG /* Number of bits per group. */ #define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) #define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS) #define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) /* * Do some analysis on how big the bitmap is before we use a tree. For a brute * force linear search, if we would have to call ffs_lu() more than 2^3 times, * use a tree instead. */ #if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3 # define USE_TREE #endif /* Number of groups required to store a given number of bits. */ #define BITMAP_BITS2GROUPS(nbits) \ ((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS) /* * Number of groups required at a particular level for a given number of bits. */ #define BITMAP_GROUPS_L0(nbits) \ BITMAP_BITS2GROUPS(nbits) #define BITMAP_GROUPS_L1(nbits) \ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits)) #define BITMAP_GROUPS_L2(nbits) \ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))) #define BITMAP_GROUPS_L3(nbits) \ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ BITMAP_BITS2GROUPS((nbits))))) /* * Assuming the number of levels, number of groups required for a given number * of bits. */ #define BITMAP_GROUPS_1_LEVEL(nbits) \ BITMAP_GROUPS_L0(nbits) #define BITMAP_GROUPS_2_LEVEL(nbits) \ (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits)) #define BITMAP_GROUPS_3_LEVEL(nbits) \ (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits)) #define BITMAP_GROUPS_4_LEVEL(nbits) \ (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits)) /* * Maximum number of groups required to support LG_BITMAP_MAXBITS. */ #ifdef USE_TREE #if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS # define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS) #elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2 # define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS) #elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3 # define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS) #elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4 # define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS) #else # error "Unsupported bitmap size" #endif /* Maximum number of levels possible. */ #define BITMAP_MAX_LEVELS \ (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP) #else /* USE_TREE */ #define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS) #endif /* USE_TREE */ #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct bitmap_level_s { /* Offset of this level's groups within the array of groups. */ size_t group_offset; }; struct bitmap_info_s { /* Logical number of bits in bitmap (stored at bottom level). */ size_t nbits; #ifdef USE_TREE /* Number of levels necessary for nbits. */ unsigned nlevels; /* * Only the first (nlevels+1) elements are used, and levels are ordered * bottom to top (e.g. the bottom level is stored in levels[0]). */ bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; #else /* USE_TREE */ /* Number of groups necessary for nbits. */ size_t ngroups; #endif /* USE_TREE */ }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo); size_t bitmap_size(const bitmap_info_t *binfo); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo); bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo); void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_)) JEMALLOC_INLINE bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) { #ifdef USE_TREE size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1; bitmap_t rg = bitmap[rgoff]; /* The bitmap is full iff the root group is 0. */ return (rg == 0); #else size_t i; for (i = 0; i < binfo->ngroups; i++) { if (bitmap[i] != 0) return (false); } return (true); #endif } JEMALLOC_INLINE bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t g; assert(bit < binfo->nbits); goff = bit >> LG_BITMAP_GROUP_NBITS; g = bitmap[goff]; return (!(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))); } JEMALLOC_INLINE void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t *gp; bitmap_t g; assert(bit < binfo->nbits); assert(!bitmap_get(bitmap, binfo, bit)); goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[goff]; g = *gp; assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; assert(bitmap_get(bitmap, binfo, bit)); #ifdef USE_TREE /* Propagate group state transitions up the tree. */ if (g == 0) { unsigned i; for (i = 1; i < binfo->nlevels; i++) { bit = goff; goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[binfo->levels[i].group_offset + goff]; g = *gp; assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; if (g != 0) break; } } #endif } /* sfu: set first unset. */ JEMALLOC_INLINE size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) { size_t bit; bitmap_t g; unsigned i; assert(!bitmap_full(bitmap, binfo)); #ifdef USE_TREE i = binfo->nlevels - 1; g = bitmap[binfo->levels[i].group_offset]; bit = ffs_lu(g) - 1; while (i > 0) { i--; g = bitmap[binfo->levels[i].group_offset + bit]; bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1); } #else i = 0; g = bitmap[0]; while ((bit = ffs_lu(g)) == 0) { i++; g = bitmap[i]; } bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); #endif bitmap_set(bitmap, binfo, bit); return (bit); } JEMALLOC_INLINE void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t *gp; bitmap_t g; UNUSED bool propagate; assert(bit < binfo->nbits); assert(bitmap_get(bitmap, binfo, bit)); goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[goff]; g = *gp; propagate = (g == 0); assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; assert(!bitmap_get(bitmap, binfo, bit)); #ifdef USE_TREE /* Propagate group state transitions up the tree. */ if (propagate) { unsigned i; for (i = 1; i < binfo->nlevels; i++) { bit = goff; goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[binfo->levels[i].group_offset + goff]; g = *gp; propagate = (g == 0); assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; if (!propagate) break; } } #endif /* USE_TREE */ } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
7,819
27.436364
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/ticker.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct ticker_s ticker_t; #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct ticker_s { int32_t tick; int32_t nticks; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE void ticker_init(ticker_t *ticker, int32_t nticks); void ticker_copy(ticker_t *ticker, const ticker_t *other); int32_t ticker_read(const ticker_t *ticker); bool ticker_ticks(ticker_t *ticker, int32_t nticks); bool ticker_tick(ticker_t *ticker); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TICKER_C_)) JEMALLOC_INLINE void ticker_init(ticker_t *ticker, int32_t nticks) { ticker->tick = nticks; ticker->nticks = nticks; } JEMALLOC_INLINE void ticker_copy(ticker_t *ticker, const ticker_t *other) { *ticker = *other; } JEMALLOC_INLINE int32_t ticker_read(const ticker_t *ticker) { return (ticker->tick); } JEMALLOC_INLINE bool ticker_ticks(ticker_t *ticker, int32_t nticks) { if (unlikely(ticker->tick < nticks)) { ticker->tick = ticker->nticks; return (true); } ticker->tick -= nticks; return(false); } JEMALLOC_INLINE bool ticker_tick(ticker_t *ticker) { return (ticker_ticks(ticker, 1)); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
1,698
21.355263
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/smoothstep.sh
#!/bin/sh # # Generate a discrete lookup table for a sigmoid function in the smoothstep # family (https://en.wikipedia.org/wiki/Smoothstep), where the lookup table # entries correspond to x in [1/nsteps, 2/nsteps, ..., nsteps/nsteps]. Encode # the entries using a binary fixed point representation. # # Usage: smoothstep.sh <variant> <nsteps> <bfp> <xprec> <yprec> # # <variant> is in {smooth, smoother, smoothest}. # <nsteps> must be greater than zero. # <bfp> must be in [0..62]; reasonable values are roughly [10..30]. # <xprec> is x decimal precision. # <yprec> is y decimal precision. #set -x cmd="sh smoothstep.sh $*" variant=$1 nsteps=$2 bfp=$3 xprec=$4 yprec=$5 case "${variant}" in smooth) ;; smoother) ;; smoothest) ;; *) echo "Unsupported variant" exit 1 ;; esac smooth() { step=$1 y=`echo ${yprec} k ${step} ${nsteps} / sx _2 lx 3 ^ '*' 3 lx 2 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` } smoother() { step=$1 y=`echo ${yprec} k ${step} ${nsteps} / sx 6 lx 5 ^ '*' _15 lx 4 ^ '*' + 10 lx 3 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` } smoothest() { step=$1 y=`echo ${yprec} k ${step} ${nsteps} / sx _20 lx 7 ^ '*' 70 lx 6 ^ '*' + _84 lx 5 ^ '*' + 35 lx 4 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` } cat <<EOF /* * This file was generated by the following command: * $cmd */ /******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* * This header defines a precomputed table based on the smoothstep family of * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0 * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so * that floating point math can be avoided. * * 3 2 * smoothstep(x) = -2x + 3x * * 5 4 3 * smootherstep(x) = 6x - 15x + 10x * * 7 6 5 4 * smootheststep(x) = -20x + 70x - 84x + 35x */ #define SMOOTHSTEP_VARIANT "${variant}" #define SMOOTHSTEP_NSTEPS ${nsteps} #define SMOOTHSTEP_BFP ${bfp} #define SMOOTHSTEP \\ /* STEP(step, h, x, y) */ \\ EOF s=1 while [ $s -le $nsteps ] ; do $variant ${s} x=`echo ${xprec} k ${s} ${nsteps} / p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` printf ' STEP(%4d, UINT64_C(0x%016x), %s, %s) \\\n' ${s} ${h} ${x} ${y} s=$((s+1)) done echo cat <<EOF #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ EOF
3,405
28.362069
154
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/prng.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* * Simple linear congruential pseudo-random number generator: * * prng(y) = (a*x + c) % m * * where the following constants ensure maximal period: * * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4. * c == Odd number (relatively prime to 2^n). * m == 2^32 * * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. * * This choice of m has the disadvantage that the quality of the bits is * proportional to bit position. For example, the lowest bit has a cycle of 2, * the next has a cycle of 4, etc. For this reason, we prefer to use the upper * bits. */ #define PRNG_A_32 UINT32_C(1103515241) #define PRNG_C_32 UINT32_C(12347) #define PRNG_A_64 UINT64_C(6364136223846793005) #define PRNG_C_64 UINT64_C(1442695040888963407) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE uint32_t prng_state_next_u32(uint32_t state); uint64_t prng_state_next_u64(uint64_t state); size_t prng_state_next_zu(size_t state); uint32_t prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic); uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range); size_t prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic); uint32_t prng_range_u32(uint32_t *state, uint32_t range, bool atomic); uint64_t prng_range_u64(uint64_t *state, uint64_t range); size_t prng_range_zu(size_t *state, size_t range, bool atomic); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_)) JEMALLOC_ALWAYS_INLINE uint32_t prng_state_next_u32(uint32_t state) { return ((state * PRNG_A_32) + PRNG_C_32); } JEMALLOC_ALWAYS_INLINE uint64_t prng_state_next_u64(uint64_t state) { return ((state * PRNG_A_64) + PRNG_C_64); } JEMALLOC_ALWAYS_INLINE size_t prng_state_next_zu(size_t state) { #if LG_SIZEOF_PTR == 2 return ((state * PRNG_A_32) + PRNG_C_32); #elif LG_SIZEOF_PTR == 3 return ((state * PRNG_A_64) + PRNG_C_64); #else #error Unsupported pointer size #endif } JEMALLOC_ALWAYS_INLINE uint32_t prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic) { uint32_t ret, state1; assert(lg_range > 0); assert(lg_range <= 32); if (atomic) { uint32_t state0; do { state0 = atomic_read_uint32(state); state1 = prng_state_next_u32(state0); } while (atomic_cas_uint32(state, state0, state1)); } else { state1 = prng_state_next_u32(*state); *state = state1; } ret = state1 >> (32 - lg_range); return (ret); } /* 64-bit atomic operations cannot be supported on all relevant platforms. */ JEMALLOC_ALWAYS_INLINE uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range) { uint64_t ret, state1; assert(lg_range > 0); assert(lg_range <= 64); state1 = prng_state_next_u64(*state); *state = state1; ret = state1 >> (64 - lg_range); return (ret); } JEMALLOC_ALWAYS_INLINE size_t prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic) { size_t ret, state1; assert(lg_range > 0); assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR)); if (atomic) { size_t state0; do { state0 = atomic_read_z(state); state1 = prng_state_next_zu(state0); } while (atomic_cas_z(state, state0, state1)); } else { state1 = prng_state_next_zu(*state); *state = state1; } ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range); return (ret); } JEMALLOC_ALWAYS_INLINE uint32_t prng_range_u32(uint32_t *state, uint32_t range, bool atomic) { uint32_t ret; unsigned lg_range; assert(range > 1); /* Compute the ceiling of lg(range). */ lg_range = ffs_u32(pow2_ceil_u32(range)) - 1; /* Generate a result in [0..range) via repeated trial. */ do { ret = prng_lg_range_u32(state, lg_range, atomic); } while (ret >= range); return (ret); } JEMALLOC_ALWAYS_INLINE uint64_t prng_range_u64(uint64_t *state, uint64_t range) { uint64_t ret; unsigned lg_range; assert(range > 1); /* Compute the ceiling of lg(range). */ lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; /* Generate a result in [0..range) via repeated trial. */ do { ret = prng_lg_range_u64(state, lg_range); } while (ret >= range); return (ret); } JEMALLOC_ALWAYS_INLINE size_t prng_range_zu(size_t *state, size_t range, bool atomic) { size_t ret; unsigned lg_range; assert(range > 1); /* Compute the ceiling of lg(range). */ lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; /* Generate a result in [0..range) via repeated trial. */ do { ret = prng_lg_range_zu(state, lg_range, atomic); } while (ret >= range); return (ret); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
5,087
23.461538
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/ph.h
/* * A Pairing Heap implementation. * * "The Pairing Heap: A New Form of Self-Adjusting Heap" * https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf * * With auxiliary twopass list, described in a follow on paper. * * "Pairing Heaps: Experiments and Analysis" * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf * ******************************************************************************* */ #ifndef PH_H_ #define PH_H_ /* Node structure. */ #define phn(a_type) \ struct { \ a_type *phn_prev; \ a_type *phn_next; \ a_type *phn_lchild; \ } /* Root structure. */ #define ph(a_type) \ struct { \ a_type *ph_root; \ } /* Internal utility macros. */ #define phn_lchild_get(a_type, a_field, a_phn) \ (a_phn->a_field.phn_lchild) #define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \ a_phn->a_field.phn_lchild = a_lchild; \ } while (0) #define phn_next_get(a_type, a_field, a_phn) \ (a_phn->a_field.phn_next) #define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \ a_phn->a_field.phn_prev = a_prev; \ } while (0) #define phn_prev_get(a_type, a_field, a_phn) \ (a_phn->a_field.phn_prev) #define phn_next_set(a_type, a_field, a_phn, a_next) do { \ a_phn->a_field.phn_next = a_next; \ } while (0) #define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \ a_type *phn0child; \ \ assert(a_phn0 != NULL); \ assert(a_phn1 != NULL); \ assert(a_cmp(a_phn0, a_phn1) <= 0); \ \ phn_prev_set(a_type, a_field, a_phn1, a_phn0); \ phn0child = phn_lchild_get(a_type, a_field, a_phn0); \ phn_next_set(a_type, a_field, a_phn1, phn0child); \ if (phn0child != NULL) \ phn_prev_set(a_type, a_field, phn0child, a_phn1); \ phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \ } while (0) #define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \ if (a_phn0 == NULL) \ r_phn = a_phn1; \ else if (a_phn1 == NULL) \ r_phn = a_phn0; \ else if (a_cmp(a_phn0, a_phn1) < 0) { \ phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \ a_cmp); \ r_phn = a_phn0; \ } else { \ phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \ a_cmp); \ r_phn = a_phn1; \ } \ } while (0) #define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \ a_type *head = NULL; \ a_type *tail = NULL; \ a_type *phn0 = a_phn; \ a_type *phn1 = phn_next_get(a_type, a_field, phn0); \ \ /* \ * Multipass merge, wherein the first two elements of a FIFO \ * are repeatedly merged, and each result is appended to the \ * singly linked FIFO, until the FIFO contains only a single \ * element. We start with a sibling list but no reference to \ * its tail, so we do a single pass over the sibling list to \ * populate the FIFO. \ */ \ if (phn1 != NULL) { \ a_type *phnrest = phn_next_get(a_type, a_field, phn1); \ if (phnrest != NULL) \ phn_prev_set(a_type, a_field, phnrest, NULL); \ phn_prev_set(a_type, a_field, phn0, NULL); \ phn_next_set(a_type, a_field, phn0, NULL); \ phn_prev_set(a_type, a_field, phn1, NULL); \ phn_next_set(a_type, a_field, phn1, NULL); \ phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \ head = tail = phn0; \ phn0 = phnrest; \ while (phn0 != NULL) { \ phn1 = phn_next_get(a_type, a_field, phn0); \ if (phn1 != NULL) { \ phnrest = phn_next_get(a_type, a_field, \ phn1); \ if (phnrest != NULL) { \ phn_prev_set(a_type, a_field, \ phnrest, NULL); \ } \ phn_prev_set(a_type, a_field, phn0, \ NULL); \ phn_next_set(a_type, a_field, phn0, \ NULL); \ phn_prev_set(a_type, a_field, phn1, \ NULL); \ phn_next_set(a_type, a_field, phn1, \ NULL); \ phn_merge(a_type, a_field, phn0, phn1, \ a_cmp, phn0); \ phn_next_set(a_type, a_field, tail, \ phn0); \ tail = phn0; \ phn0 = phnrest; \ } else { \ phn_next_set(a_type, a_field, tail, \ phn0); \ tail = phn0; \ phn0 = NULL; \ } \ } \ phn0 = head; \ phn1 = phn_next_get(a_type, a_field, phn0); \ if (phn1 != NULL) { \ while (true) { \ head = phn_next_get(a_type, a_field, \ phn1); \ assert(phn_prev_get(a_type, a_field, \ phn0) == NULL); \ phn_next_set(a_type, a_field, phn0, \ NULL); \ assert(phn_prev_get(a_type, a_field, \ phn1) == NULL); \ phn_next_set(a_type, a_field, phn1, \ NULL); \ phn_merge(a_type, a_field, phn0, phn1, \ a_cmp, phn0); \ if (head == NULL) \ break; \ phn_next_set(a_type, a_field, tail, \ phn0); \ tail = phn0; \ phn0 = head; \ phn1 = phn_next_get(a_type, a_field, \ phn0); \ } \ } \ } \ r_phn = phn0; \ } while (0) #define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \ a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \ if (phn != NULL) { \ phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \ phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \ phn_prev_set(a_type, a_field, phn, NULL); \ ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \ assert(phn_next_get(a_type, a_field, phn) == NULL); \ phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \ a_ph->ph_root); \ } \ } while (0) #define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \ a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \ if (lchild == NULL) \ r_phn = NULL; \ else { \ ph_merge_siblings(a_type, a_field, lchild, a_cmp, \ r_phn); \ } \ } while (0) /* * The ph_proto() macro generates function prototypes that correspond to the * functions generated by an equivalently parameterized call to ph_gen(). */ #define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \ a_attr void a_prefix##new(a_ph_type *ph); \ a_attr bool a_prefix##empty(a_ph_type *ph); \ a_attr a_type *a_prefix##first(a_ph_type *ph); \ a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \ a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \ a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn); /* * The ph_gen() macro generates a type-specific pairing heap implementation, * based on the above cpp macros. */ #define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \ a_attr void \ a_prefix##new(a_ph_type *ph) \ { \ \ memset(ph, 0, sizeof(ph(a_type))); \ } \ a_attr bool \ a_prefix##empty(a_ph_type *ph) \ { \ \ return (ph->ph_root == NULL); \ } \ a_attr a_type * \ a_prefix##first(a_ph_type *ph) \ { \ \ if (ph->ph_root == NULL) \ return (NULL); \ ph_merge_aux(a_type, a_field, ph, a_cmp); \ return (ph->ph_root); \ } \ a_attr void \ a_prefix##insert(a_ph_type *ph, a_type *phn) \ { \ \ memset(&phn->a_field, 0, sizeof(phn(a_type))); \ \ /* \ * Treat the root as an aux list during insertion, and lazily \ * merge during a_prefix##remove_first(). For elements that \ * are inserted, then removed via a_prefix##remove() before the \ * aux list is ever processed, this makes insert/remove \ * constant-time, whereas eager merging would make insert \ * O(log n). \ */ \ if (ph->ph_root == NULL) \ ph->ph_root = phn; \ else { \ phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \ a_field, ph->ph_root)); \ if (phn_next_get(a_type, a_field, ph->ph_root) != \ NULL) { \ phn_prev_set(a_type, a_field, \ phn_next_get(a_type, a_field, ph->ph_root), \ phn); \ } \ phn_prev_set(a_type, a_field, phn, ph->ph_root); \ phn_next_set(a_type, a_field, ph->ph_root, phn); \ } \ } \ a_attr a_type * \ a_prefix##remove_first(a_ph_type *ph) \ { \ a_type *ret; \ \ if (ph->ph_root == NULL) \ return (NULL); \ ph_merge_aux(a_type, a_field, ph, a_cmp); \ \ ret = ph->ph_root; \ \ ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \ ph->ph_root); \ \ return (ret); \ } \ a_attr void \ a_prefix##remove(a_ph_type *ph, a_type *phn) \ { \ a_type *replace, *parent; \ \ /* \ * We can delete from aux list without merging it, but we need \ * to merge if we are dealing with the root node. \ */ \ if (ph->ph_root == phn) { \ ph_merge_aux(a_type, a_field, ph, a_cmp); \ if (ph->ph_root == phn) { \ ph_merge_children(a_type, a_field, ph->ph_root, \ a_cmp, ph->ph_root); \ return; \ } \ } \ \ /* Get parent (if phn is leftmost child) before mutating. */ \ if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \ if (phn_lchild_get(a_type, a_field, parent) != phn) \ parent = NULL; \ } \ /* Find a possible replacement node, and link to parent. */ \ ph_merge_children(a_type, a_field, phn, a_cmp, replace); \ /* Set next/prev for sibling linked list. */ \ if (replace != NULL) { \ if (parent != NULL) { \ phn_prev_set(a_type, a_field, replace, parent); \ phn_lchild_set(a_type, a_field, parent, \ replace); \ } else { \ phn_prev_set(a_type, a_field, replace, \ phn_prev_get(a_type, a_field, phn)); \ if (phn_prev_get(a_type, a_field, phn) != \ NULL) { \ phn_next_set(a_type, a_field, \ phn_prev_get(a_type, a_field, phn), \ replace); \ } \ } \ phn_next_set(a_type, a_field, replace, \ phn_next_get(a_type, a_field, phn)); \ if (phn_next_get(a_type, a_field, phn) != NULL) { \ phn_prev_set(a_type, a_field, \ phn_next_get(a_type, a_field, phn), \ replace); \ } \ } else { \ if (parent != NULL) { \ a_type *next = phn_next_get(a_type, a_field, \ phn); \ phn_lchild_set(a_type, a_field, parent, next); \ if (next != NULL) { \ phn_prev_set(a_type, a_field, next, \ parent); \ } \ } else { \ assert(phn_prev_get(a_type, a_field, phn) != \ NULL); \ phn_next_set(a_type, a_field, \ phn_prev_get(a_type, a_field, phn), \ phn_next_get(a_type, a_field, phn)); \ } \ if (phn_next_get(a_type, a_field, phn) != NULL) { \ phn_prev_set(a_type, a_field, \ phn_next_get(a_type, a_field, phn), \ phn_prev_get(a_type, a_field, phn)); \ } \ } \ } #endif /* PH_H_ */
10,965
30.693642
86
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/huge.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero); void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero); bool huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero); void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize, size_t alignment, bool zero, tcache_t *tcache); #ifdef JEMALLOC_JET typedef void (huge_dalloc_junk_t)(void *, size_t); extern huge_dalloc_junk_t *huge_dalloc_junk; #endif void huge_dalloc(tsdn_t *tsdn, void *ptr); arena_t *huge_aalloc(const void *ptr); size_t huge_salloc(tsdn_t *tsdn, const void *ptr); prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr); void huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx); void huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
1,518
41.194444
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/assert.h
/* * Define a custom assert() in order to reduce the chances of deadlock during * assertion failure. */ #ifndef assert #define assert(e) do { \ if (unlikely(config_debug && !(e))) { \ malloc_printf( \ "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \ __FILE__, __LINE__, #e); \ abort(); \ } \ } while (0) #endif #ifndef not_reached #define not_reached() do { \ if (config_debug) { \ malloc_printf( \ "<jemalloc>: %s:%d: Unreachable code reached\n", \ __FILE__, __LINE__); \ abort(); \ } \ unreachable(); \ } while (0) #endif #ifndef not_implemented #define not_implemented() do { \ if (config_debug) { \ malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \ __FILE__, __LINE__); \ abort(); \ } \ } while (0) #endif #ifndef assert_not_implemented #define assert_not_implemented(e) do { \ if (unlikely(config_debug && !(e))) \ not_implemented(); \ } while (0) #endif
1,029
21.391304
77
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/atomic.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #define atomic_read_uint64(p) atomic_add_uint64(p, 0) #define atomic_read_uint32(p) atomic_add_uint32(p, 0) #define atomic_read_p(p) atomic_add_p(p, NULL) #define atomic_read_z(p) atomic_add_z(p, 0) #define atomic_read_u(p) atomic_add_u(p, 0) #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES /* * All arithmetic functions return the arithmetic result of the atomic * operation. Some atomic operation APIs return the value prior to mutation, in * which case the following functions must redundantly compute the result so * that it can be returned. These functions are normally inlined, so the extra * operations can be optimized away if the return values aren't used by the * callers. * * <t> atomic_read_<t>(<t> *p) { return (*p); } * <t> atomic_add_<t>(<t> *p, <t> x) { return (*p += x); } * <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p -= x); } * bool atomic_cas_<t>(<t> *p, <t> c, <t> s) * { * if (*p != c) * return (true); * *p = s; * return (false); * } * void atomic_write_<t>(<t> *p, <t> x) { *p = x; } */ #ifndef JEMALLOC_ENABLE_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x); uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x); bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s); void atomic_write_uint64(uint64_t *p, uint64_t x); uint32_t atomic_add_uint32(uint32_t *p, uint32_t x); uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x); bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s); void atomic_write_uint32(uint32_t *p, uint32_t x); void *atomic_add_p(void **p, void *x); void *atomic_sub_p(void **p, void *x); bool atomic_cas_p(void **p, void *c, void *s); void atomic_write_p(void **p, const void *x); size_t atomic_add_z(size_t *p, size_t x); size_t atomic_sub_z(size_t *p, size_t x); bool atomic_cas_z(size_t *p, size_t c, size_t s); void atomic_write_z(size_t *p, size_t x); unsigned atomic_add_u(unsigned *p, unsigned x); unsigned atomic_sub_u(unsigned *p, unsigned x); bool atomic_cas_u(unsigned *p, unsigned c, unsigned s); void atomic_write_u(unsigned *p, unsigned x); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_)) /******************************************************************************/ /* 64-bit operations. */ #if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) # if (defined(__amd64__) || defined(__x86_64__)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { uint64_t t = x; asm volatile ( "lock; xaddq %0, %1;" : "+r" (t), "=m" (*p) /* Outputs. */ : "m" (*p) /* Inputs. */ ); return (t + x); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { uint64_t t; x = (uint64_t)(-(int64_t)x); t = x; asm volatile ( "lock; xaddq %0, %1;" : "+r" (t), "=m" (*p) /* Outputs. */ : "m" (*p) /* Inputs. */ ); return (t + x); } JEMALLOC_INLINE bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) { uint8_t success; asm volatile ( "lock; cmpxchgq %4, %0;" "sete %1;" : "=m" (*p), "=a" (success) /* Outputs. */ : "m" (*p), "a" (c), "r" (s) /* Inputs. */ : "memory" /* Clobbers. */ ); return (!(bool)success); } JEMALLOC_INLINE void atomic_write_uint64(uint64_t *p, uint64_t x) { asm volatile ( "xchgq %1, %0;" /* Lock is implied by xchgq. */ : "=m" (*p), "+r" (x) /* Outputs. */ : "m" (*p) /* Inputs. */ : "memory" /* Clobbers. */ ); } # elif (defined(JEMALLOC_C11ATOMICS)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; return (atomic_fetch_add(a, x) + x); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; return (atomic_fetch_sub(a, x) - x); } JEMALLOC_INLINE bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) { volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; return (!atomic_compare_exchange_strong(a, &c, s)); } JEMALLOC_INLINE void atomic_write_uint64(uint64_t *p, uint64_t x) { volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; atomic_store(a, x); } # elif (defined(JEMALLOC_ATOMIC9)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { /* * atomic_fetchadd_64() doesn't exist, but we only ever use this * function on LP64 systems, so atomic_fetchadd_long() will do. */ assert(sizeof(uint64_t) == sizeof(unsigned long)); return (atomic_fetchadd_long(p, (unsigned long)x) + x); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { assert(sizeof(uint64_t) == sizeof(unsigned long)); return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x); } JEMALLOC_INLINE bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) { assert(sizeof(uint64_t) == sizeof(unsigned long)); return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s)); } JEMALLOC_INLINE void atomic_write_uint64(uint64_t *p, uint64_t x) { assert(sizeof(uint64_t) == sizeof(unsigned long)); atomic_store_rel_long(p, x); } # elif (defined(JEMALLOC_OSATOMIC)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { return (OSAtomicAdd64((int64_t)x, (int64_t *)p)); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p)); } JEMALLOC_INLINE bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) { return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p)); } JEMALLOC_INLINE void atomic_write_uint64(uint64_t *p, uint64_t x) { uint64_t o; /*The documented OSAtomic*() API does not expose an atomic exchange. */ do { o = atomic_read_uint64(p); } while (atomic_cas_uint64(p, o, x)); } # elif (defined(_MSC_VER)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { return (InterlockedExchangeAdd64(p, x) + x); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x); } JEMALLOC_INLINE bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) { uint64_t o; o = InterlockedCompareExchange64(p, s, c); return (o != c); } JEMALLOC_INLINE void atomic_write_uint64(uint64_t *p, uint64_t x) { InterlockedExchange64(p, x); } # elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \ defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { return (__sync_add_and_fetch(p, x)); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { return (__sync_sub_and_fetch(p, x)); } JEMALLOC_INLINE bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) { return (!__sync_bool_compare_and_swap(p, c, s)); } JEMALLOC_INLINE void atomic_write_uint64(uint64_t *p, uint64_t x) { __sync_lock_test_and_set(p, x); } # else # error "Missing implementation for 64-bit atomic operations" # endif #endif /******************************************************************************/ /* 32-bit operations. */ #if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { uint32_t t = x; asm volatile ( "lock; xaddl %0, %1;" : "+r" (t), "=m" (*p) /* Outputs. */ : "m" (*p) /* Inputs. */ ); return (t + x); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { uint32_t t; x = (uint32_t)(-(int32_t)x); t = x; asm volatile ( "lock; xaddl %0, %1;" : "+r" (t), "=m" (*p) /* Outputs. */ : "m" (*p) /* Inputs. */ ); return (t + x); } JEMALLOC_INLINE bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) { uint8_t success; asm volatile ( "lock; cmpxchgl %4, %0;" "sete %1;" : "=m" (*p), "=a" (success) /* Outputs. */ : "m" (*p), "a" (c), "r" (s) /* Inputs. */ : "memory" ); return (!(bool)success); } JEMALLOC_INLINE void atomic_write_uint32(uint32_t *p, uint32_t x) { asm volatile ( "xchgl %1, %0;" /* Lock is implied by xchgl. */ : "=m" (*p), "+r" (x) /* Outputs. */ : "m" (*p) /* Inputs. */ : "memory" /* Clobbers. */ ); } # elif (defined(JEMALLOC_C11ATOMICS)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; return (atomic_fetch_add(a, x) + x); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; return (atomic_fetch_sub(a, x) - x); } JEMALLOC_INLINE bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) { volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; return (!atomic_compare_exchange_strong(a, &c, s)); } JEMALLOC_INLINE void atomic_write_uint32(uint32_t *p, uint32_t x) { volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; atomic_store(a, x); } #elif (defined(JEMALLOC_ATOMIC9)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { return (atomic_fetchadd_32(p, x) + x); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x); } JEMALLOC_INLINE bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) { return (!atomic_cmpset_32(p, c, s)); } JEMALLOC_INLINE void atomic_write_uint32(uint32_t *p, uint32_t x) { atomic_store_rel_32(p, x); } #elif (defined(JEMALLOC_OSATOMIC)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { return (OSAtomicAdd32((int32_t)x, (int32_t *)p)); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p)); } JEMALLOC_INLINE bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) { return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p)); } JEMALLOC_INLINE void atomic_write_uint32(uint32_t *p, uint32_t x) { uint32_t o; /*The documented OSAtomic*() API does not expose an atomic exchange. */ do { o = atomic_read_uint32(p); } while (atomic_cas_uint32(p, o, x)); } #elif (defined(_MSC_VER)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { return (InterlockedExchangeAdd(p, x) + x); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { return (InterlockedExchangeAdd(p, -((int32_t)x)) - x); } JEMALLOC_INLINE bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) { uint32_t o; o = InterlockedCompareExchange(p, s, c); return (o != c); } JEMALLOC_INLINE void atomic_write_uint32(uint32_t *p, uint32_t x) { InterlockedExchange(p, x); } #elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \ defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { return (__sync_add_and_fetch(p, x)); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { return (__sync_sub_and_fetch(p, x)); } JEMALLOC_INLINE bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) { return (!__sync_bool_compare_and_swap(p, c, s)); } JEMALLOC_INLINE void atomic_write_uint32(uint32_t *p, uint32_t x) { __sync_lock_test_and_set(p, x); } #else # error "Missing implementation for 32-bit atomic operations" #endif /******************************************************************************/ /* Pointer operations. */ JEMALLOC_INLINE void * atomic_add_p(void **p, void *x) { #if (LG_SIZEOF_PTR == 3) return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); #elif (LG_SIZEOF_PTR == 2) return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); #endif } JEMALLOC_INLINE void * atomic_sub_p(void **p, void *x) { #if (LG_SIZEOF_PTR == 3) return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x))); #elif (LG_SIZEOF_PTR == 2) return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x))); #endif } JEMALLOC_INLINE bool atomic_cas_p(void **p, void *c, void *s) { #if (LG_SIZEOF_PTR == 3) return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); #elif (LG_SIZEOF_PTR == 2) return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); #endif } JEMALLOC_INLINE void atomic_write_p(void **p, const void *x) { #if (LG_SIZEOF_PTR == 3) atomic_write_uint64((uint64_t *)p, (uint64_t)x); #elif (LG_SIZEOF_PTR == 2) atomic_write_uint32((uint32_t *)p, (uint32_t)x); #endif } /******************************************************************************/ /* size_t operations. */ JEMALLOC_INLINE size_t atomic_add_z(size_t *p, size_t x) { #if (LG_SIZEOF_PTR == 3) return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); #elif (LG_SIZEOF_PTR == 2) return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); #endif } JEMALLOC_INLINE size_t atomic_sub_z(size_t *p, size_t x) { #if (LG_SIZEOF_PTR == 3) return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x))); #elif (LG_SIZEOF_PTR == 2) return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x))); #endif } JEMALLOC_INLINE bool atomic_cas_z(size_t *p, size_t c, size_t s) { #if (LG_SIZEOF_PTR == 3) return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); #elif (LG_SIZEOF_PTR == 2) return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); #endif } JEMALLOC_INLINE void atomic_write_z(size_t *p, size_t x) { #if (LG_SIZEOF_PTR == 3) atomic_write_uint64((uint64_t *)p, (uint64_t)x); #elif (LG_SIZEOF_PTR == 2) atomic_write_uint32((uint32_t *)p, (uint32_t)x); #endif } /******************************************************************************/ /* unsigned operations. */ JEMALLOC_INLINE unsigned atomic_add_u(unsigned *p, unsigned x) { #if (LG_SIZEOF_INT == 3) return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); #elif (LG_SIZEOF_INT == 2) return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); #endif } JEMALLOC_INLINE unsigned atomic_sub_u(unsigned *p, unsigned x) { #if (LG_SIZEOF_INT == 3) return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x))); #elif (LG_SIZEOF_INT == 2) return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x))); #endif } JEMALLOC_INLINE bool atomic_cas_u(unsigned *p, unsigned c, unsigned s) { #if (LG_SIZEOF_INT == 3) return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); #elif (LG_SIZEOF_INT == 2) return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); #endif } JEMALLOC_INLINE void atomic_write_u(unsigned *p, unsigned x) { #if (LG_SIZEOF_INT == 3) atomic_write_uint64((uint64_t *)p, (uint64_t)x); #elif (LG_SIZEOF_INT == 2) atomic_write_uint32((uint32_t *)p, (uint32_t)x); #endif } /******************************************************************************/ #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
15,441
22.684049
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
#ifndef JEMALLOC_INTERNAL_DECLS_H #define JEMALLOC_INTERNAL_DECLS_H #include <math.h> #ifdef _WIN32 # include <windows.h> # include "msvc_compat/windows_extra.h" #else # include <sys/param.h> # include <sys/mman.h> # if !defined(__pnacl__) && !defined(__native_client__) # include <sys/syscall.h> # if !defined(SYS_write) && defined(__NR_write) # define SYS_write __NR_write # endif # include <sys/uio.h> # endif # include <pthread.h> # ifdef JEMALLOC_OS_UNFAIR_LOCK # include <os/lock.h> # endif # ifdef JEMALLOC_GLIBC_MALLOC_HOOK # include <sched.h> # endif # include <errno.h> # include <sys/time.h> # include <time.h> # ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME # include <mach/mach_time.h> # endif #endif #include <sys/types.h> #include <limits.h> #ifndef SIZE_T_MAX # define SIZE_T_MAX SIZE_MAX #endif #include <stdarg.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <stddef.h> #ifndef offsetof # define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) #endif #include <string.h> #include <strings.h> #include <ctype.h> #ifdef _MSC_VER # include <io.h> typedef intptr_t ssize_t; # define PATH_MAX 1024 # define STDERR_FILENO 2 # define __func__ __FUNCTION__ # ifdef JEMALLOC_HAS_RESTRICT # define restrict __restrict # endif /* Disable warnings about deprecated system functions. */ # pragma warning(disable: 4996) #if _MSC_VER < 1800 static int isblank(int c) { return (c == '\t' || c == ' '); } #endif #else # include <unistd.h> #endif #include <fcntl.h> #endif /* JEMALLOC_INTERNAL_H */
1,608
20.171053
68
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/mb.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE void mb_write(void); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_)) #ifdef __i386__ /* * According to the Intel Architecture Software Developer's Manual, current * processors execute instructions in order from the perspective of other * processors in a multiprocessor system, but 1) Intel reserves the right to * change that, and 2) the compiler's optimizer could re-order instructions if * there weren't some form of barrier. Therefore, even if running on an * architecture that does not need memory barriers (everything through at least * i686), an "optimizer barrier" is necessary. */ JEMALLOC_INLINE void mb_write(void) { # if 0 /* This is a true memory barrier. */ asm volatile ("pusha;" "xor %%eax,%%eax;" "cpuid;" "popa;" : /* Outputs. */ : /* Inputs. */ : "memory" /* Clobbers. */ ); # else /* * This is hopefully enough to keep the compiler from reordering * instructions around this one. */ asm volatile ("nop;" : /* Outputs. */ : /* Inputs. */ : "memory" /* Clobbers. */ ); # endif } #elif (defined(__amd64__) || defined(__x86_64__)) JEMALLOC_INLINE void mb_write(void) { asm volatile ("sfence" : /* Outputs. */ : /* Inputs. */ : "memory" /* Clobbers. */ ); } #elif defined(__powerpc__) JEMALLOC_INLINE void mb_write(void) { asm volatile ("eieio" : /* Outputs. */ : /* Inputs. */ : "memory" /* Clobbers. */ ); } #elif defined(__sparc64__) JEMALLOC_INLINE void mb_write(void) { asm volatile ("membar #StoreStore" : /* Outputs. */ : /* Inputs. */ : "memory" /* Clobbers. */ ); } #elif defined(__tile__) JEMALLOC_INLINE void mb_write(void) { __sync_synchronize(); } #else /* * This is much slower than a simple memory barrier, but the semantics of mutex * unlock make this work. */ JEMALLOC_INLINE void mb_write(void) { malloc_mutex_t mtx; malloc_mutex_init(&mtx, "mb", WITNESS_RANK_OMIT); malloc_mutex_lock(TSDN_NULL, &mtx); malloc_mutex_unlock(TSDN_NULL, &mtx); } #endif #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
2,738
22.612069
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/quarantine.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct quarantine_obj_s quarantine_obj_t; typedef struct quarantine_s quarantine_t; /* Default per thread quarantine size if valgrind is enabled. */ #define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct quarantine_obj_s { void *ptr; size_t usize; }; struct quarantine_s { size_t curbytes; size_t curobjs; size_t first; #define LG_MAXOBJS_INIT 10 size_t lg_maxobjs; quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */ }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void quarantine_alloc_hook_work(tsd_t *tsd); void quarantine(tsd_t *tsd, void *ptr); void quarantine_cleanup(tsd_t *tsd); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE void quarantine_alloc_hook(void); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_)) JEMALLOC_ALWAYS_INLINE void quarantine_alloc_hook(void) { tsd_t *tsd; assert(config_fill && opt_quarantine); tsd = tsd_fetch(); if (tsd_quarantine_get(tsd) == NULL) quarantine_alloc_hook_work(tsd); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
1,593
25.131148
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/valgrind.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #ifdef JEMALLOC_VALGRIND #include <valgrind/valgrind.h> /* * The size that is reported to Valgrind must be consistent through a chain of * malloc..realloc..realloc calls. Request size isn't recorded anywhere in * jemalloc, so it is critical that all callers of these macros provide usize * rather than request size. As a result, buffer overflow detection is * technically weakened for the standard API, though it is generally accepted * practice to consider any extra bytes reported by malloc_usable_size() as * usable space. */ #define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do { \ if (unlikely(in_valgrind)) \ valgrind_make_mem_noaccess(ptr, usize); \ } while (0) #define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do { \ if (unlikely(in_valgrind)) \ valgrind_make_mem_undefined(ptr, usize); \ } while (0) #define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do { \ if (unlikely(in_valgrind)) \ valgrind_make_mem_defined(ptr, usize); \ } while (0) /* * The VALGRIND_MALLOCLIKE_BLOCK() and VALGRIND_RESIZEINPLACE_BLOCK() macro * calls must be embedded in macros rather than in functions so that when * Valgrind reports errors, there are no extra stack frames in the backtraces. */ #define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do { \ if (unlikely(in_valgrind && cond)) { \ VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsdn, ptr), \ zero); \ } \ } while (0) #define JEMALLOC_VALGRIND_REALLOC_MOVED_no(ptr, old_ptr) \ (false) #define JEMALLOC_VALGRIND_REALLOC_MOVED_maybe(ptr, old_ptr) \ ((ptr) != (old_ptr)) #define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_no(ptr) \ (false) #define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_maybe(ptr) \ (ptr == NULL) #define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_no(old_ptr) \ (false) #define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_maybe(old_ptr) \ (old_ptr == NULL) #define JEMALLOC_VALGRIND_REALLOC(moved, tsdn, ptr, usize, ptr_null, \ old_ptr, old_usize, old_rzsize, old_ptr_null, zero) do { \ if (unlikely(in_valgrind)) { \ size_t rzsize = p2rz(tsdn, ptr); \ \ if (!JEMALLOC_VALGRIND_REALLOC_MOVED_##moved(ptr, \ old_ptr)) { \ VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ usize, rzsize); \ if (zero && old_usize < usize) { \ valgrind_make_mem_defined( \ (void *)((uintptr_t)ptr + \ old_usize), usize - old_usize); \ } \ } else { \ if (!JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_## \ old_ptr_null(old_ptr)) { \ valgrind_freelike_block(old_ptr, \ old_rzsize); \ } \ if (!JEMALLOC_VALGRIND_REALLOC_PTR_NULL_## \ ptr_null(ptr)) { \ size_t copy_size = (old_usize < usize) \ ? old_usize : usize; \ size_t tail_size = usize - copy_size; \ VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \ rzsize, false); \ if (copy_size > 0) { \ valgrind_make_mem_defined(ptr, \ copy_size); \ } \ if (zero && tail_size > 0) { \ valgrind_make_mem_defined( \ (void *)((uintptr_t)ptr + \ copy_size), tail_size); \ } \ } \ } \ } \ } while (0) #define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \ if (unlikely(in_valgrind)) \ valgrind_freelike_block(ptr, rzsize); \ } while (0) #else #define RUNNING_ON_VALGRIND ((unsigned)0) #define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0) #define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0) #define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0) #define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do {} while (0) #define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \ ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \ zero) do {} while (0) #define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0) #endif #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_VALGRIND void valgrind_make_mem_noaccess(void *ptr, size_t usize); void valgrind_make_mem_undefined(void *ptr, size_t usize); void valgrind_make_mem_defined(void *ptr, size_t usize); void valgrind_freelike_block(void *ptr, size_t usize); #endif #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
4,841
36.534884
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/extent.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct extent_node_s extent_node_t; #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS /* Tree of extents. Use accessor functions for en_* fields. */ struct extent_node_s { /* Arena from which this extent came, if any. */ arena_t *en_arena; /* Pointer to the extent that this tree node is responsible for. */ void *en_addr; /* Total region size. */ size_t en_size; /* * Serial number (potentially non-unique). * * In principle serial numbers can wrap around on 32-bit systems if * JEMALLOC_MUNMAP is defined, but as long as comparison functions fall * back on address comparison for equal serial numbers, stable (if * imperfect) ordering is maintained. * * Serial numbers may not be unique even in the absence of wrap-around, * e.g. when splitting an extent and assigning the same serial number to * both resulting adjacent extents. */ size_t en_sn; /* * The zeroed flag is used by chunk recycling code to track whether * memory is zero-filled. */ bool en_zeroed; /* * True if physical memory is committed to the extent, whether * explicitly or implicitly as on a system that overcommits and * satisfies physical memory needs on demand via soft page faults. */ bool en_committed; /* * The achunk flag is used to validate that huge allocation lookups * don't return arena chunks. */ bool en_achunk; /* Profile counters, used for huge objects. */ prof_tctx_t *en_prof_tctx; /* Linkage for arena's runs_dirty and chunks_cache rings. */ arena_runs_dirty_link_t rd; qr(extent_node_t) cc_link; union { /* Linkage for the size/sn/address-ordered tree. */ rb_node(extent_node_t) szsnad_link; /* Linkage for arena's achunks, huge, and node_cache lists. */ ql_elm(extent_node_t) ql_link; }; /* Linkage for the address-ordered tree. */ rb_node(extent_node_t) ad_link; }; typedef rb_tree(extent_node_t) extent_tree_t; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS rb_proto(, extent_tree_szsnad_, extent_tree_t, extent_node_t) rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t) #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE arena_t *extent_node_arena_get(const extent_node_t *node); void *extent_node_addr_get(const extent_node_t *node); size_t extent_node_size_get(const extent_node_t *node); size_t extent_node_sn_get(const extent_node_t *node); bool extent_node_zeroed_get(const extent_node_t *node); bool extent_node_committed_get(const extent_node_t *node); bool extent_node_achunk_get(const extent_node_t *node); prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node); void extent_node_arena_set(extent_node_t *node, arena_t *arena); void extent_node_addr_set(extent_node_t *node, void *addr); void extent_node_size_set(extent_node_t *node, size_t size); void extent_node_sn_set(extent_node_t *node, size_t sn); void extent_node_zeroed_set(extent_node_t *node, bool zeroed); void extent_node_committed_set(extent_node_t *node, bool committed); void extent_node_achunk_set(extent_node_t *node, bool achunk); void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx); void extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size, size_t sn, bool zeroed, bool committed); void extent_node_dirty_linkage_init(extent_node_t *node); void extent_node_dirty_insert(extent_node_t *node, arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty); void extent_node_dirty_remove(extent_node_t *node); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_)) JEMALLOC_INLINE arena_t * extent_node_arena_get(const extent_node_t *node) { return (node->en_arena); } JEMALLOC_INLINE void * extent_node_addr_get(const extent_node_t *node) { return (node->en_addr); } JEMALLOC_INLINE size_t extent_node_size_get(const extent_node_t *node) { return (node->en_size); } JEMALLOC_INLINE size_t extent_node_sn_get(const extent_node_t *node) { return (node->en_sn); } JEMALLOC_INLINE bool extent_node_zeroed_get(const extent_node_t *node) { return (node->en_zeroed); } JEMALLOC_INLINE bool extent_node_committed_get(const extent_node_t *node) { assert(!node->en_achunk); return (node->en_committed); } JEMALLOC_INLINE bool extent_node_achunk_get(const extent_node_t *node) { return (node->en_achunk); } JEMALLOC_INLINE prof_tctx_t * extent_node_prof_tctx_get(const extent_node_t *node) { return (node->en_prof_tctx); } JEMALLOC_INLINE void extent_node_arena_set(extent_node_t *node, arena_t *arena) { node->en_arena = arena; } JEMALLOC_INLINE void extent_node_addr_set(extent_node_t *node, void *addr) { node->en_addr = addr; } JEMALLOC_INLINE void extent_node_size_set(extent_node_t *node, size_t size) { node->en_size = size; } JEMALLOC_INLINE void extent_node_sn_set(extent_node_t *node, size_t sn) { node->en_sn = sn; } JEMALLOC_INLINE void extent_node_zeroed_set(extent_node_t *node, bool zeroed) { node->en_zeroed = zeroed; } JEMALLOC_INLINE void extent_node_committed_set(extent_node_t *node, bool committed) { node->en_committed = committed; } JEMALLOC_INLINE void extent_node_achunk_set(extent_node_t *node, bool achunk) { node->en_achunk = achunk; } JEMALLOC_INLINE void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx) { node->en_prof_tctx = tctx; } JEMALLOC_INLINE void extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size, size_t sn, bool zeroed, bool committed) { extent_node_arena_set(node, arena); extent_node_addr_set(node, addr); extent_node_size_set(node, size); extent_node_sn_set(node, sn); extent_node_zeroed_set(node, zeroed); extent_node_committed_set(node, committed); extent_node_achunk_set(node, false); if (config_prof) extent_node_prof_tctx_set(node, NULL); } JEMALLOC_INLINE void extent_node_dirty_linkage_init(extent_node_t *node) { qr_new(&node->rd, rd_link); qr_new(node, cc_link); } JEMALLOC_INLINE void extent_node_dirty_insert(extent_node_t *node, arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty) { qr_meld(runs_dirty, &node->rd, rd_link); qr_meld(chunks_dirty, node, cc_link); } JEMALLOC_INLINE void extent_node_dirty_remove(extent_node_t *node) { qr_remove(&node->rd, rd_link); qr_remove(node, cc_link); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
6,787
24.04797
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/chunk_dss.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef enum { dss_prec_disabled = 0, dss_prec_primary = 1, dss_prec_secondary = 2, dss_prec_limit = 3 } dss_prec_t; #define DSS_PREC_DEFAULT dss_prec_secondary #define DSS_DEFAULT "secondary" #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS extern const char *dss_prec_names[]; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS dss_prec_t chunk_dss_prec_get(void); bool chunk_dss_prec_set(dss_prec_t dss_prec); void *chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit); bool chunk_in_dss(void *chunk); bool chunk_dss_mergeable(void *chunk_a, void *chunk_b); void chunk_dss_boot(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
1,211
30.894737
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
/* * JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for * functions that are static inline functions if inlining is enabled, and * single-definition library-private functions if inlining is disabled. * * JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in * which case the denoted functions are always static, regardless of whether * inlining is enabled. */ #if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE) /* Disable inlining to make debugging/profiling easier. */ # define JEMALLOC_ALWAYS_INLINE # define JEMALLOC_ALWAYS_INLINE_C static # define JEMALLOC_INLINE # define JEMALLOC_INLINE_C static # define inline #else # define JEMALLOC_ENABLE_INLINE # ifdef JEMALLOC_HAVE_ATTR # define JEMALLOC_ALWAYS_INLINE \ static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline) # define JEMALLOC_ALWAYS_INLINE_C \ static inline JEMALLOC_ATTR(always_inline) # else # define JEMALLOC_ALWAYS_INLINE static inline # define JEMALLOC_ALWAYS_INLINE_C static inline # endif # define JEMALLOC_INLINE static inline # define JEMALLOC_INLINE_C static inline # ifdef _MSC_VER # define inline _inline # endif #endif #ifdef JEMALLOC_CC_SILENCE # define UNUSED JEMALLOC_ATTR(unused) #else # define UNUSED #endif #define ZU(z) ((size_t)z) #define ZI(z) ((ssize_t)z) #define QU(q) ((uint64_t)q) #define QI(q) ((int64_t)q) #define KZU(z) ZU(z##ULL) #define KZI(z) ZI(z##LL) #define KQU(q) QU(q##ULL) #define KQI(q) QI(q##LL) #ifndef __DECONST # define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) #endif #ifndef JEMALLOC_HAS_RESTRICT # define restrict #endif
1,669
27.793103
78
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/pages.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void *pages_map(void *addr, size_t size, bool *commit); void pages_unmap(void *addr, size_t size); void *pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, bool *commit); bool pages_commit(void *addr, size_t size); bool pages_decommit(void *addr, size_t size); bool pages_purge(void *addr, size_t size); bool pages_huge(void *addr, size_t size); bool pages_nohuge(void *addr, size_t size); void pages_boot(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
1,077
34.933333
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/prof.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct prof_bt_s prof_bt_t; typedef struct prof_cnt_s prof_cnt_t; typedef struct prof_tctx_s prof_tctx_t; typedef struct prof_gctx_s prof_gctx_t; typedef struct prof_tdata_s prof_tdata_t; /* Option defaults. */ #ifdef JEMALLOC_PROF # define PROF_PREFIX_DEFAULT "jeprof" #else # define PROF_PREFIX_DEFAULT "" #endif #define LG_PROF_SAMPLE_DEFAULT 19 #define LG_PROF_INTERVAL_DEFAULT -1 /* * Hard limit on stack backtrace depth. The version of prof_backtrace() that * is based on __builtin_return_address() necessarily has a hard-coded number * of backtrace frame handlers, and should be kept in sync with this setting. */ #define PROF_BT_MAX 128 /* Initial hash table size. */ #define PROF_CKH_MINITEMS 64 /* Size of memory buffer to use when writing dump files. */ #define PROF_DUMP_BUFSIZE 65536 /* Size of stack-allocated buffer used by prof_printf(). */ #define PROF_PRINTF_BUFSIZE 128 /* * Number of mutexes shared among all gctx's. No space is allocated for these * unless profiling is enabled, so it's okay to over-provision. */ #define PROF_NCTX_LOCKS 1024 /* * Number of mutexes shared among all tdata's. No space is allocated for these * unless profiling is enabled, so it's okay to over-provision. */ #define PROF_NTDATA_LOCKS 256 /* * prof_tdata pointers close to NULL are used to encode state information that * is used for cleaning up during thread shutdown. */ #define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1) #define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2) #define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct prof_bt_s { /* Backtrace, stored as len program counters. */ void **vec; unsigned len; }; #ifdef JEMALLOC_PROF_LIBGCC /* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ typedef struct { prof_bt_t *bt; unsigned max; } prof_unwind_data_t; #endif struct prof_cnt_s { /* Profiling counters. */ uint64_t curobjs; uint64_t curbytes; uint64_t accumobjs; uint64_t accumbytes; }; typedef enum { prof_tctx_state_initializing, prof_tctx_state_nominal, prof_tctx_state_dumping, prof_tctx_state_purgatory /* Dumper must finish destroying. */ } prof_tctx_state_t; struct prof_tctx_s { /* Thread data for thread that performed the allocation. */ prof_tdata_t *tdata; /* * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be * defunct during teardown. */ uint64_t thr_uid; uint64_t thr_discrim; /* Profiling counters, protected by tdata->lock. */ prof_cnt_t cnts; /* Associated global context. */ prof_gctx_t *gctx; /* * UID that distinguishes multiple tctx's created by the same thread, * but coexisting in gctx->tctxs. There are two ways that such * coexistence can occur: * - A dumper thread can cause a tctx to be retained in the purgatory * state. * - Although a single "producer" thread must create all tctx's which * share the same thr_uid, multiple "consumers" can each concurrently * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only * gets called once each time cnts.cur{objs,bytes} drop to 0, but this * threshold can be hit again before the first consumer finishes * executing prof_tctx_destroy(). */ uint64_t tctx_uid; /* Linkage into gctx's tctxs. */ rb_node(prof_tctx_t) tctx_link; /* * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents * sample vs destroy race. */ bool prepared; /* Current dump-related state, protected by gctx->lock. */ prof_tctx_state_t state; /* * Copy of cnts snapshotted during early dump phase, protected by * dump_mtx. */ prof_cnt_t dump_cnts; }; typedef rb_tree(prof_tctx_t) prof_tctx_tree_t; struct prof_gctx_s { /* Protects nlimbo, cnt_summed, and tctxs. */ malloc_mutex_t *lock; /* * Number of threads that currently cause this gctx to be in a state of * limbo due to one of: * - Initializing this gctx. * - Initializing per thread counters associated with this gctx. * - Preparing to destroy this gctx. * - Dumping a heap profile that includes this gctx. * nlimbo must be 1 (single destroyer) in order to safely destroy the * gctx. */ unsigned nlimbo; /* * Tree of profile counters, one for each thread that has allocated in * this context. */ prof_tctx_tree_t tctxs; /* Linkage for tree of contexts to be dumped. */ rb_node(prof_gctx_t) dump_link; /* Temporary storage for summation during dump. */ prof_cnt_t cnt_summed; /* Associated backtrace. */ prof_bt_t bt; /* Backtrace vector, variable size, referred to by bt. */ void *vec[1]; }; typedef rb_tree(prof_gctx_t) prof_gctx_tree_t; struct prof_tdata_s { malloc_mutex_t *lock; /* Monotonically increasing unique thread identifier. */ uint64_t thr_uid; /* * Monotonically increasing discriminator among tdata structures * associated with the same thr_uid. */ uint64_t thr_discrim; /* Included in heap profile dumps if non-NULL. */ char *thread_name; bool attached; bool expired; rb_node(prof_tdata_t) tdata_link; /* * Counter used to initialize prof_tctx_t's tctx_uid. No locking is * necessary when incrementing this field, because only one thread ever * does so. */ uint64_t tctx_uid_next; /* * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks * backtraces for which it has non-zero allocation/deallocation counters * associated with thread-specific prof_tctx_t objects. Other threads * may write to prof_tctx_t contents when freeing associated objects. */ ckh_t bt2tctx; /* Sampling state. */ uint64_t prng_state; uint64_t bytes_until_sample; /* State used to avoid dumping while operating on prof internals. */ bool enq; bool enq_idump; bool enq_gdump; /* * Set to true during an early dump phase for tdata's which are * currently being dumped. New threads' tdata's have this initialized * to false so that they aren't accidentally included in later dump * phases. */ bool dumping; /* * True if profiling is active for this tdata's thread * (thread.prof.active mallctl). */ bool active; /* Temporary storage for summation during dump. */ prof_cnt_t cnt_summed; /* Backtrace vector, used for calls to prof_backtrace(). */ void *vec[PROF_BT_MAX]; }; typedef rb_tree(prof_tdata_t) prof_tdata_tree_t; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS extern bool opt_prof; extern bool opt_prof_active; extern bool opt_prof_thread_active_init; extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ extern bool opt_prof_gdump; /* High-water memory dumping. */ extern bool opt_prof_final; /* Final profile dumping. */ extern bool opt_prof_leak; /* Dump leak summary at exit. */ extern bool opt_prof_accum; /* Report cumulative bytes. */ extern char opt_prof_prefix[ /* Minimize memory bloat for non-prof builds. */ #ifdef JEMALLOC_PROF PATH_MAX + #endif 1]; /* Accessed via prof_active_[gs]et{_unlocked,}(). */ extern bool prof_active; /* Accessed via prof_gdump_[gs]et{_unlocked,}(). */ extern bool prof_gdump_val; /* * Profile dump interval, measured in bytes allocated. Each arena triggers a * profile dump when it reaches this threshold. The effect is that the * interval between profile dumps averages prof_interval, though the actual * interval between dumps will tend to be sporadic, and the interval will be a * maximum of approximately (prof_interval * narenas). */ extern uint64_t prof_interval; /* * Initialized as opt_lg_prof_sample, and potentially modified during profiling * resets. */ extern size_t lg_prof_sample; void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated); void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx); void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx); void bt_init(prof_bt_t *bt, void **vec); void prof_backtrace(prof_bt_t *bt); prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt); #ifdef JEMALLOC_JET size_t prof_tdata_count(void); size_t prof_bt_count(void); const prof_cnt_t *prof_cnt_all(void); typedef int (prof_dump_open_t)(bool, const char *); extern prof_dump_open_t *prof_dump_open; typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *); extern prof_dump_header_t *prof_dump_header; #endif void prof_idump(tsdn_t *tsdn); bool prof_mdump(tsd_t *tsd, const char *filename); void prof_gdump(tsdn_t *tsdn); prof_tdata_t *prof_tdata_init(tsd_t *tsd); prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); void prof_reset(tsd_t *tsd, size_t lg_sample); void prof_tdata_cleanup(tsd_t *tsd); bool prof_active_get(tsdn_t *tsdn); bool prof_active_set(tsdn_t *tsdn, bool active); const char *prof_thread_name_get(tsd_t *tsd); int prof_thread_name_set(tsd_t *tsd, const char *thread_name); bool prof_thread_active_get(tsd_t *tsd); bool prof_thread_active_set(tsd_t *tsd, bool active); bool prof_thread_active_init_get(tsdn_t *tsdn); bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init); bool prof_gdump_get(tsdn_t *tsdn); bool prof_gdump_set(tsdn_t *tsdn, bool active); void prof_boot0(void); void prof_boot1(void); bool prof_boot2(tsd_t *tsd); void prof_prefork0(tsdn_t *tsdn); void prof_prefork1(tsdn_t *tsdn); void prof_postfork_parent(tsdn_t *tsdn); void prof_postfork_child(tsdn_t *tsdn); void prof_sample_threshold_update(prof_tdata_t *tdata); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE bool prof_active_get_unlocked(void); bool prof_gdump_get_unlocked(void); prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create); prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const void *ptr); void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx); void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr, prof_tctx_t *tctx); bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit, prof_tdata_t **tdata_out); prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update); void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx); void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr, size_t old_usize, prof_tctx_t *old_tctx); void prof_free(tsd_t *tsd, const void *ptr, size_t usize); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_)) JEMALLOC_ALWAYS_INLINE bool prof_active_get_unlocked(void) { /* * Even if opt_prof is true, sampling can be temporarily disabled by * setting prof_active to false. No locking is used when reading * prof_active in the fast path, so there are no guarantees regarding * how long it will take for all threads to notice state changes. */ return (prof_active); } JEMALLOC_ALWAYS_INLINE bool prof_gdump_get_unlocked(void) { /* * No locking is used when reading prof_gdump_val in the fast path, so * there are no guarantees regarding how long it will take for all * threads to notice state changes. */ return (prof_gdump_val); } JEMALLOC_ALWAYS_INLINE prof_tdata_t * prof_tdata_get(tsd_t *tsd, bool create) { prof_tdata_t *tdata; cassert(config_prof); tdata = tsd_prof_tdata_get(tsd); if (create) { if (unlikely(tdata == NULL)) { if (tsd_nominal(tsd)) { tdata = prof_tdata_init(tsd); tsd_prof_tdata_set(tsd, tdata); } } else if (unlikely(tdata->expired)) { tdata = prof_tdata_reinit(tsd, tdata); tsd_prof_tdata_set(tsd, tdata); } assert(tdata == NULL || tdata->attached); } return (tdata); } JEMALLOC_ALWAYS_INLINE prof_tctx_t * prof_tctx_get(tsdn_t *tsdn, const void *ptr) { cassert(config_prof); assert(ptr != NULL); return (arena_prof_tctx_get(tsdn, ptr)); } JEMALLOC_ALWAYS_INLINE void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx) { cassert(config_prof); assert(ptr != NULL); arena_prof_tctx_set(tsdn, ptr, usize, tctx); } JEMALLOC_ALWAYS_INLINE void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr, prof_tctx_t *old_tctx) { cassert(config_prof); assert(ptr != NULL); arena_prof_tctx_reset(tsdn, ptr, usize, old_ptr, old_tctx); } JEMALLOC_ALWAYS_INLINE bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update, prof_tdata_t **tdata_out) { prof_tdata_t *tdata; cassert(config_prof); tdata = prof_tdata_get(tsd, true); if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) tdata = NULL; if (tdata_out != NULL) *tdata_out = tdata; if (unlikely(tdata == NULL)) return (true); if (likely(tdata->bytes_until_sample >= usize)) { if (update) tdata->bytes_until_sample -= usize; return (true); } else { /* Compute new sample threshold. */ if (update) prof_sample_threshold_update(tdata); return (!tdata->active); } } JEMALLOC_ALWAYS_INLINE prof_tctx_t * prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) { prof_tctx_t *ret; prof_tdata_t *tdata; prof_bt_t bt; assert(usize == s2u(usize)); if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update, &tdata))) ret = (prof_tctx_t *)(uintptr_t)1U; else { bt_init(&bt, tdata->vec); prof_backtrace(&bt); ret = prof_lookup(tsd, &bt); } return (ret); } JEMALLOC_ALWAYS_INLINE void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx) { cassert(config_prof); assert(ptr != NULL); assert(usize == isalloc(tsdn, ptr, true)); if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) prof_malloc_sample_object(tsdn, ptr, usize, tctx); else prof_tctx_set(tsdn, ptr, usize, (prof_tctx_t *)(uintptr_t)1U); } JEMALLOC_ALWAYS_INLINE void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr, size_t old_usize, prof_tctx_t *old_tctx) { bool sampled, old_sampled; cassert(config_prof); assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U); if (prof_active && !updated && ptr != NULL) { assert(usize == isalloc(tsd_tsdn(tsd), ptr, true)); if (prof_sample_accum_update(tsd, usize, true, NULL)) { /* * Don't sample. The usize passed to prof_alloc_prep() * was larger than what actually got allocated, so a * backtrace was captured for this allocation, even * though its actual usize was insufficient to cross the * sample threshold. */ prof_alloc_rollback(tsd, tctx, true); tctx = (prof_tctx_t *)(uintptr_t)1U; } } sampled = ((uintptr_t)tctx > (uintptr_t)1U); old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U); if (unlikely(sampled)) prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx); else prof_tctx_reset(tsd_tsdn(tsd), ptr, usize, old_ptr, old_tctx); if (unlikely(old_sampled)) prof_free_sampled_object(tsd, old_usize, old_tctx); } JEMALLOC_ALWAYS_INLINE void prof_free(tsd_t *tsd, const void *ptr, size_t usize) { prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr); cassert(config_prof); assert(usize == isalloc(tsd_tsdn(tsd), ptr, true)); if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) prof_free_sampled_object(tsd, usize, tctx); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
15,844
27.914234
81
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/hash.h
/* * The following hash function is based on MurmurHash3, placed into the public * domain by Austin Appleby. See https://github.com/aappleby/smhasher for * details. */ /******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE uint32_t hash_x86_32(const void *key, int len, uint32_t seed); void hash_x86_128(const void *key, const int len, uint32_t seed, uint64_t r_out[2]); void hash_x64_128(const void *key, const int len, const uint32_t seed, uint64_t r_out[2]); void hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_)) /******************************************************************************/ /* Internal implementation. */ JEMALLOC_INLINE uint32_t hash_rotl_32(uint32_t x, int8_t r) { return ((x << r) | (x >> (32 - r))); } JEMALLOC_INLINE uint64_t hash_rotl_64(uint64_t x, int8_t r) { return ((x << r) | (x >> (64 - r))); } JEMALLOC_INLINE uint32_t hash_get_block_32(const uint32_t *p, int i) { /* Handle unaligned read. */ if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) { uint32_t ret; memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t)); return (ret); } return (p[i]); } JEMALLOC_INLINE uint64_t hash_get_block_64(const uint64_t *p, int i) { /* Handle unaligned read. */ if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) { uint64_t ret; memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t)); return (ret); } return (p[i]); } JEMALLOC_INLINE uint32_t hash_fmix_32(uint32_t h) { h ^= h >> 16; h *= 0x85ebca6b; h ^= h >> 13; h *= 0xc2b2ae35; h ^= h >> 16; return (h); } JEMALLOC_INLINE uint64_t hash_fmix_64(uint64_t k) { k ^= k >> 33; k *= KQU(0xff51afd7ed558ccd); k ^= k >> 33; k *= KQU(0xc4ceb9fe1a85ec53); k ^= k >> 33; return (k); } JEMALLOC_INLINE uint32_t hash_x86_32(const void *key, int len, uint32_t seed) { const uint8_t *data = (const uint8_t *) key; const int nblocks = len / 4; uint32_t h1 = seed; const uint32_t c1 = 0xcc9e2d51; const uint32_t c2 = 0x1b873593; /* body */ { const uint32_t *blocks = (const uint32_t *) (data + nblocks*4); int i; for (i = -nblocks; i; i++) { uint32_t k1 = hash_get_block_32(blocks, i); k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; h1 = hash_rotl_32(h1, 13); h1 = h1*5 + 0xe6546b64; } } /* tail */ { const uint8_t *tail = (const uint8_t *) (data + nblocks*4); uint32_t k1 = 0; switch (len & 3) { case 3: k1 ^= tail[2] << 16; case 2: k1 ^= tail[1] << 8; case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; } } /* finalization */ h1 ^= len; h1 = hash_fmix_32(h1); return (h1); } UNUSED JEMALLOC_INLINE void hash_x86_128(const void *key, const int len, uint32_t seed, uint64_t r_out[2]) { const uint8_t * data = (const uint8_t *) key; const int nblocks = len / 16; uint32_t h1 = seed; uint32_t h2 = seed; uint32_t h3 = seed; uint32_t h4 = seed; const uint32_t c1 = 0x239b961b; const uint32_t c2 = 0xab0e9789; const uint32_t c3 = 0x38b34ae5; const uint32_t c4 = 0xa1e38b93; /* body */ { const uint32_t *blocks = (const uint32_t *) (data + nblocks*16); int i; for (i = -nblocks; i; i++) { uint32_t k1 = hash_get_block_32(blocks, i*4 + 0); uint32_t k2 = hash_get_block_32(blocks, i*4 + 1); uint32_t k3 = hash_get_block_32(blocks, i*4 + 2); uint32_t k4 = hash_get_block_32(blocks, i*4 + 3); k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; h1 = hash_rotl_32(h1, 19); h1 += h2; h1 = h1*5 + 0x561ccd1b; k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; h2 = hash_rotl_32(h2, 17); h2 += h3; h2 = h2*5 + 0x0bcaa747; k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; h3 = hash_rotl_32(h3, 15); h3 += h4; h3 = h3*5 + 0x96cd1c35; k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; h4 = hash_rotl_32(h4, 13); h4 += h1; h4 = h4*5 + 0x32ac3b17; } } /* tail */ { const uint8_t *tail = (const uint8_t *) (data + nblocks*16); uint32_t k1 = 0; uint32_t k2 = 0; uint32_t k3 = 0; uint32_t k4 = 0; switch (len & 15) { case 15: k4 ^= tail[14] << 16; case 14: k4 ^= tail[13] << 8; case 13: k4 ^= tail[12] << 0; k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; case 12: k3 ^= tail[11] << 24; case 11: k3 ^= tail[10] << 16; case 10: k3 ^= tail[ 9] << 8; case 9: k3 ^= tail[ 8] << 0; k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; case 8: k2 ^= tail[ 7] << 24; case 7: k2 ^= tail[ 6] << 16; case 6: k2 ^= tail[ 5] << 8; case 5: k2 ^= tail[ 4] << 0; k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; case 4: k1 ^= tail[ 3] << 24; case 3: k1 ^= tail[ 2] << 16; case 2: k1 ^= tail[ 1] << 8; case 1: k1 ^= tail[ 0] << 0; k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; } } /* finalization */ h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; h1 += h2; h1 += h3; h1 += h4; h2 += h1; h3 += h1; h4 += h1; h1 = hash_fmix_32(h1); h2 = hash_fmix_32(h2); h3 = hash_fmix_32(h3); h4 = hash_fmix_32(h4); h1 += h2; h1 += h3; h1 += h4; h2 += h1; h3 += h1; h4 += h1; r_out[0] = (((uint64_t) h2) << 32) | h1; r_out[1] = (((uint64_t) h4) << 32) | h3; } UNUSED JEMALLOC_INLINE void hash_x64_128(const void *key, const int len, const uint32_t seed, uint64_t r_out[2]) { const uint8_t *data = (const uint8_t *) key; const int nblocks = len / 16; uint64_t h1 = seed; uint64_t h2 = seed; const uint64_t c1 = KQU(0x87c37b91114253d5); const uint64_t c2 = KQU(0x4cf5ad432745937f); /* body */ { const uint64_t *blocks = (const uint64_t *) (data); int i; for (i = 0; i < nblocks; i++) { uint64_t k1 = hash_get_block_64(blocks, i*2 + 0); uint64_t k2 = hash_get_block_64(blocks, i*2 + 1); k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; h1 = hash_rotl_64(h1, 27); h1 += h2; h1 = h1*5 + 0x52dce729; k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; h2 = hash_rotl_64(h2, 31); h2 += h1; h2 = h2*5 + 0x38495ab5; } } /* tail */ { const uint8_t *tail = (const uint8_t*)(data + nblocks*16); uint64_t k1 = 0; uint64_t k2 = 0; switch (len & 15) { case 15: k2 ^= ((uint64_t)(tail[14])) << 48; case 14: k2 ^= ((uint64_t)(tail[13])) << 40; case 13: k2 ^= ((uint64_t)(tail[12])) << 32; case 12: k2 ^= ((uint64_t)(tail[11])) << 24; case 11: k2 ^= ((uint64_t)(tail[10])) << 16; case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; } } /* finalization */ h1 ^= len; h2 ^= len; h1 += h2; h2 += h1; h1 = hash_fmix_64(h1); h2 = hash_fmix_64(h2); h1 += h2; h2 += h1; r_out[0] = h1; r_out[1] = h2; } /******************************************************************************/ /* API. */ JEMALLOC_INLINE void hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) { assert(len <= INT_MAX); /* Unfortunate implementation limitation. */ #if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN)) hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash); #else { uint64_t hashes[2]; hash_x86_128(key, (int)len, seed, hashes); r_hash[0] = (size_t)hashes[0]; r_hash[1] = (size_t)hashes[1]; } #endif } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
8,394
22.449721
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/jemalloc/include/jemalloc/internal/tsd.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* Maximum number of malloc_tsd users with cleanup functions. */ #define MALLOC_TSD_CLEANUPS_MAX 2 typedef bool (*malloc_tsd_cleanup_t)(void); #if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ !defined(_WIN32)) typedef struct tsd_init_block_s tsd_init_block_t; typedef struct tsd_init_head_s tsd_init_head_t; #endif typedef struct tsd_s tsd_t; typedef struct tsdn_s tsdn_t; #define TSDN_NULL ((tsdn_t *)0) typedef enum { tsd_state_uninitialized, tsd_state_nominal, tsd_state_purgatory, tsd_state_reincarnated } tsd_state_t; /* * TLS/TSD-agnostic macro-based implementation of thread-specific data. There * are five macros that support (at least) three use cases: file-private, * library-private, and library-private inlined. Following is an example * library-private tsd variable: * * In example.h: * typedef struct { * int x; * int y; * } example_t; * #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0}) * malloc_tsd_types(example_, example_t) * malloc_tsd_protos(, example_, example_t) * malloc_tsd_externs(example_, example_t) * In example.c: * malloc_tsd_data(, example_, example_t, EX_INITIALIZER) * malloc_tsd_funcs(, example_, example_t, EX_INITIALIZER, * example_tsd_cleanup) * * The result is a set of generated functions, e.g.: * * bool example_tsd_boot(void) {...} * bool example_tsd_booted_get(void) {...} * example_t *example_tsd_get(bool init) {...} * void example_tsd_set(example_t *val) {...} * * Note that all of the functions deal in terms of (a_type *) rather than * (a_type) so that it is possible to support non-pointer types (unlike * pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is * cast to (void *). This means that the cleanup function needs to cast the * function argument to (a_type *), then dereference the resulting pointer to * access fields, e.g. * * void * example_tsd_cleanup(void *arg) * { * example_t *example = (example_t *)arg; * * example->x = 42; * [...] * if ([want the cleanup function to be called again]) * example_tsd_set(example); * } * * If example_tsd_set() is called within example_tsd_cleanup(), it will be * called again. This is similar to how pthreads TSD destruction works, except * that pthreads only calls the cleanup function again if the value was set to * non-NULL. */ /* malloc_tsd_types(). */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #define malloc_tsd_types(a_name, a_type) #elif (defined(JEMALLOC_TLS)) #define malloc_tsd_types(a_name, a_type) #elif (defined(_WIN32)) #define malloc_tsd_types(a_name, a_type) \ typedef struct { \ bool initialized; \ a_type val; \ } a_name##tsd_wrapper_t; #else #define malloc_tsd_types(a_name, a_type) \ typedef struct { \ bool initialized; \ a_type val; \ } a_name##tsd_wrapper_t; #endif /* malloc_tsd_protos(). */ #define malloc_tsd_protos(a_attr, a_name, a_type) \ a_attr bool \ a_name##tsd_boot0(void); \ a_attr void \ a_name##tsd_boot1(void); \ a_attr bool \ a_name##tsd_boot(void); \ a_attr bool \ a_name##tsd_booted_get(void); \ a_attr a_type * \ a_name##tsd_get(bool init); \ a_attr void \ a_name##tsd_set(a_type *val); /* malloc_tsd_externs(). */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #define malloc_tsd_externs(a_name, a_type) \ extern __thread a_type a_name##tsd_tls; \ extern __thread bool a_name##tsd_initialized; \ extern bool a_name##tsd_booted; #elif (defined(JEMALLOC_TLS)) #define malloc_tsd_externs(a_name, a_type) \ extern __thread a_type a_name##tsd_tls; \ extern pthread_key_t a_name##tsd_tsd; \ extern bool a_name##tsd_booted; #elif (defined(_WIN32)) #define malloc_tsd_externs(a_name, a_type) \ extern DWORD a_name##tsd_tsd; \ extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \ extern bool a_name##tsd_booted; #else #define malloc_tsd_externs(a_name, a_type) \ extern pthread_key_t a_name##tsd_tsd; \ extern tsd_init_head_t a_name##tsd_init_head; \ extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \ extern bool a_name##tsd_booted; #endif /* malloc_tsd_data(). */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ a_attr __thread a_type JEMALLOC_TLS_MODEL \ a_name##tsd_tls = a_initializer; \ a_attr __thread bool JEMALLOC_TLS_MODEL \ a_name##tsd_initialized = false; \ a_attr bool a_name##tsd_booted = false; #elif (defined(JEMALLOC_TLS)) #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ a_attr __thread a_type JEMALLOC_TLS_MODEL \ a_name##tsd_tls = a_initializer; \ a_attr pthread_key_t a_name##tsd_tsd; \ a_attr bool a_name##tsd_booted = false; #elif (defined(_WIN32)) #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ a_attr DWORD a_name##tsd_tsd; \ a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \ false, \ a_initializer \ }; \ a_attr bool a_name##tsd_booted = false; #else #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ a_attr pthread_key_t a_name##tsd_tsd; \ a_attr tsd_init_head_t a_name##tsd_init_head = { \ ql_head_initializer(blocks), \ MALLOC_MUTEX_INITIALIZER \ }; \ a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \ false, \ a_initializer \ }; \ a_attr bool a_name##tsd_booted = false; #endif /* malloc_tsd_funcs(). */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ a_cleanup) \ /* Initialization/cleanup. */ \ a_attr bool \ a_name##tsd_cleanup_wrapper(void) \ { \ \ if (a_name##tsd_initialized) { \ a_name##tsd_initialized = false; \ a_cleanup(&a_name##tsd_tls); \ } \ return (a_name##tsd_initialized); \ } \ a_attr bool \ a_name##tsd_boot0(void) \ { \ \ if (a_cleanup != malloc_tsd_no_cleanup) { \ malloc_tsd_cleanup_register( \ &a_name##tsd_cleanup_wrapper); \ } \ a_name##tsd_booted = true; \ return (false); \ } \ a_attr void \ a_name##tsd_boot1(void) \ { \ \ /* Do nothing. */ \ } \ a_attr bool \ a_name##tsd_boot(void) \ { \ \ return (a_name##tsd_boot0()); \ } \ a_attr bool \ a_name##tsd_booted_get(void) \ { \ \ return (a_name##tsd_booted); \ } \ a_attr bool \ a_name##tsd_get_allocates(void) \ { \ \ return (false); \ } \ /* Get/set. */ \ a_attr a_type * \ a_name##tsd_get(bool init) \ { \ \ assert(a_name##tsd_booted); \ return (&a_name##tsd_tls); \ } \ a_attr void \ a_name##tsd_set(a_type *val) \ { \ \ assert(a_name##tsd_booted); \ a_name##tsd_tls = (*val); \ if (a_cleanup != malloc_tsd_no_cleanup) \ a_name##tsd_initialized = true; \ } #elif (defined(JEMALLOC_TLS)) #define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ a_cleanup) \ /* Initialization/cleanup. */ \ a_attr bool \ a_name##tsd_boot0(void) \ { \ \ if (a_cleanup != malloc_tsd_no_cleanup) { \ if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \ 0) \ return (true); \ } \ a_name##tsd_booted = true; \ return (false); \ } \ a_attr void \ a_name##tsd_boot1(void) \ { \ \ /* Do nothing. */ \ } \ a_attr bool \ a_name##tsd_boot(void) \ { \ \ return (a_name##tsd_boot0()); \ } \ a_attr bool \ a_name##tsd_booted_get(void) \ { \ \ return (a_name##tsd_booted); \ } \ a_attr bool \ a_name##tsd_get_allocates(void) \ { \ \ return (false); \ } \ /* Get/set. */ \ a_attr a_type * \ a_name##tsd_get(bool init) \ { \ \ assert(a_name##tsd_booted); \ return (&a_name##tsd_tls); \ } \ a_attr void \ a_name##tsd_set(a_type *val) \ { \ \ assert(a_name##tsd_booted); \ a_name##tsd_tls = (*val); \ if (a_cleanup != malloc_tsd_no_cleanup) { \ if (pthread_setspecific(a_name##tsd_tsd, \ (void *)(&a_name##tsd_tls))) { \ malloc_write("<jemalloc>: Error" \ " setting TSD for "#a_name"\n"); \ if (opt_abort) \ abort(); \ } \ } \ } #elif (defined(_WIN32)) #define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ a_cleanup) \ /* Initialization/cleanup. */ \ a_attr bool \ a_name##tsd_cleanup_wrapper(void) \ { \ DWORD error = GetLastError(); \ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ TlsGetValue(a_name##tsd_tsd); \ SetLastError(error); \ \ if (wrapper == NULL) \ return (false); \ if (a_cleanup != malloc_tsd_no_cleanup && \ wrapper->initialized) { \ wrapper->initialized = false; \ a_cleanup(&wrapper->val); \ if (wrapper->initialized) { \ /* Trigger another cleanup round. */ \ return (true); \ } \ } \ malloc_tsd_dalloc(wrapper); \ return (false); \ } \ a_attr void \ a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \ { \ \ if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \ malloc_write("<jemalloc>: Error setting" \ " TSD for "#a_name"\n"); \ abort(); \ } \ } \ a_attr a_name##tsd_wrapper_t * \ a_name##tsd_wrapper_get(bool init) \ { \ DWORD error = GetLastError(); \ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ TlsGetValue(a_name##tsd_tsd); \ SetLastError(error); \ \ if (init && unlikely(wrapper == NULL)) { \ wrapper = (a_name##tsd_wrapper_t *) \ malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ if (wrapper == NULL) { \ malloc_write("<jemalloc>: Error allocating" \ " TSD for "#a_name"\n"); \ abort(); \ } else { \ wrapper->initialized = false; \ wrapper->val = a_initializer; \ } \ a_name##tsd_wrapper_set(wrapper); \ } \ return (wrapper); \ } \ a_attr bool \ a_name##tsd_boot0(void) \ { \ \ a_name##tsd_tsd = TlsAlloc(); \ if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) \ return (true); \ if (a_cleanup != malloc_tsd_no_cleanup) { \ malloc_tsd_cleanup_register( \ &a_name##tsd_cleanup_wrapper); \ } \ a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \ a_name##tsd_booted = true; \ return (false); \ } \ a_attr void \ a_name##tsd_boot1(void) \ { \ a_name##tsd_wrapper_t *wrapper; \ wrapper = (a_name##tsd_wrapper_t *) \ malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ if (wrapper == NULL) { \ malloc_write("<jemalloc>: Error allocating" \ " TSD for "#a_name"\n"); \ abort(); \ } \ memcpy(wrapper, &a_name##tsd_boot_wrapper, \ sizeof(a_name##tsd_wrapper_t)); \ a_name##tsd_wrapper_set(wrapper); \ } \ a_attr bool \ a_name##tsd_boot(void) \ { \ \ if (a_name##tsd_boot0()) \ return (true); \ a_name##tsd_boot1(); \ return (false); \ } \ a_attr bool \ a_name##tsd_booted_get(void) \ { \ \ return (a_name##tsd_booted); \ } \ a_attr bool \ a_name##tsd_get_allocates(void) \ { \ \ return (true); \ } \ /* Get/set. */ \ a_attr a_type * \ a_name##tsd_get(bool init) \ { \ a_name##tsd_wrapper_t *wrapper; \ \ assert(a_name##tsd_booted); \ wrapper = a_name##tsd_wrapper_get(init); \ if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \ return (NULL); \ return (&wrapper->val); \ } \ a_attr void \ a_name##tsd_set(a_type *val) \ { \ a_name##tsd_wrapper_t *wrapper; \ \ assert(a_name##tsd_booted); \ wrapper = a_name##tsd_wrapper_get(true); \ wrapper->val = *(val); \ if (a_cleanup != malloc_tsd_no_cleanup) \ wrapper->initialized = true; \ } #else #define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ a_cleanup) \ /* Initialization/cleanup. */ \ a_attr void \ a_name##tsd_cleanup_wrapper(void *arg) \ { \ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)arg; \ \ if (a_cleanup != malloc_tsd_no_cleanup && \ wrapper->initialized) { \ wrapper->initialized = false; \ a_cleanup(&wrapper->val); \ if (wrapper->initialized) { \ /* Trigger another cleanup round. */ \ if (pthread_setspecific(a_name##tsd_tsd, \ (void *)wrapper)) { \ malloc_write("<jemalloc>: Error" \ " setting TSD for "#a_name"\n"); \ if (opt_abort) \ abort(); \ } \ return; \ } \ } \ malloc_tsd_dalloc(wrapper); \ } \ a_attr void \ a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \ { \ \ if (pthread_setspecific(a_name##tsd_tsd, \ (void *)wrapper)) { \ malloc_write("<jemalloc>: Error setting" \ " TSD for "#a_name"\n"); \ abort(); \ } \ } \ a_attr a_name##tsd_wrapper_t * \ a_name##tsd_wrapper_get(bool init) \ { \ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ pthread_getspecific(a_name##tsd_tsd); \ \ if (init && unlikely(wrapper == NULL)) { \ tsd_init_block_t block; \ wrapper = tsd_init_check_recursion( \ &a_name##tsd_init_head, &block); \ if (wrapper) \ return (wrapper); \ wrapper = (a_name##tsd_wrapper_t *) \ malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ block.data = wrapper; \ if (wrapper == NULL) { \ malloc_write("<jemalloc>: Error allocating" \ " TSD for "#a_name"\n"); \ abort(); \ } else { \ wrapper->initialized = false; \ wrapper->val = a_initializer; \ } \ a_name##tsd_wrapper_set(wrapper); \ tsd_init_finish(&a_name##tsd_init_head, &block); \ } \ return (wrapper); \ } \ a_attr bool \ a_name##tsd_boot0(void) \ { \ \ if (pthread_key_create(&a_name##tsd_tsd, \ a_name##tsd_cleanup_wrapper) != 0) \ return (true); \ a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \ a_name##tsd_booted = true; \ return (false); \ } \ a_attr void \ a_name##tsd_boot1(void) \ { \ a_name##tsd_wrapper_t *wrapper; \ wrapper = (a_name##tsd_wrapper_t *) \ malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ if (wrapper == NULL) { \ malloc_write("<jemalloc>: Error allocating" \ " TSD for "#a_name"\n"); \ abort(); \ } \ memcpy(wrapper, &a_name##tsd_boot_wrapper, \ sizeof(a_name##tsd_wrapper_t)); \ a_name##tsd_wrapper_set(wrapper); \ } \ a_attr bool \ a_name##tsd_boot(void) \ { \ \ if (a_name##tsd_boot0()) \ return (true); \ a_name##tsd_boot1(); \ return (false); \ } \ a_attr bool \ a_name##tsd_booted_get(void) \ { \ \ return (a_name##tsd_booted); \ } \ a_attr bool \ a_name##tsd_get_allocates(void) \ { \ \ return (true); \ } \ /* Get/set. */ \ a_attr a_type * \ a_name##tsd_get(bool init) \ { \ a_name##tsd_wrapper_t *wrapper; \ \ assert(a_name##tsd_booted); \ wrapper = a_name##tsd_wrapper_get(init); \ if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \ return (NULL); \ return (&wrapper->val); \ } \ a_attr void \ a_name##tsd_set(a_type *val) \ { \ a_name##tsd_wrapper_t *wrapper; \ \ assert(a_name##tsd_booted); \ wrapper = a_name##tsd_wrapper_get(true); \ wrapper->val = *(val); \ if (a_cleanup != malloc_tsd_no_cleanup) \ wrapper->initialized = true; \ } #endif #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ !defined(_WIN32)) struct tsd_init_block_s { ql_elm(tsd_init_block_t) link; pthread_t thread; void *data; }; struct tsd_init_head_s { ql_head(tsd_init_block_t) blocks; malloc_mutex_t lock; }; #endif #define MALLOC_TSD \ /* O(name, type) */ \ O(tcache, tcache_t *) \ O(thread_allocated, uint64_t) \ O(thread_deallocated, uint64_t) \ O(prof_tdata, prof_tdata_t *) \ O(iarena, arena_t *) \ O(arena, arena_t *) \ O(arenas_tdata, arena_tdata_t *) \ O(narenas_tdata, unsigned) \ O(arenas_tdata_bypass, bool) \ O(tcache_enabled, tcache_enabled_t) \ O(quarantine, quarantine_t *) \ O(witnesses, witness_list_t) \ O(witness_fork, bool) \ #define TSD_INITIALIZER { \ tsd_state_uninitialized, \ NULL, \ 0, \ 0, \ NULL, \ NULL, \ NULL, \ NULL, \ 0, \ false, \ tcache_enabled_default, \ NULL, \ ql_head_initializer(witnesses), \ false \ } struct tsd_s { tsd_state_t state; #define O(n, t) \ t n; MALLOC_TSD #undef O }; /* * Wrapper around tsd_t that makes it possible to avoid implicit conversion * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be * explicitly converted to tsd_t, which is non-nullable. */ struct tsdn_s { tsd_t tsd; }; static const tsd_t tsd_initializer = TSD_INITIALIZER; malloc_tsd_types(, tsd_t) #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void *malloc_tsd_malloc(size_t size); void malloc_tsd_dalloc(void *wrapper); void malloc_tsd_no_cleanup(void *arg); void malloc_tsd_cleanup_register(bool (*f)(void)); tsd_t *malloc_tsd_boot0(void); void malloc_tsd_boot1(void); #if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ !defined(_WIN32)) void *tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block); void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); #endif void tsd_cleanup(void *arg); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t) tsd_t *tsd_fetch_impl(bool init); tsd_t *tsd_fetch(void); tsdn_t *tsd_tsdn(tsd_t *tsd); bool tsd_nominal(tsd_t *tsd); #define O(n, t) \ t *tsd_##n##p_get(tsd_t *tsd); \ t tsd_##n##_get(tsd_t *tsd); \ void tsd_##n##_set(tsd_t *tsd, t n); MALLOC_TSD #undef O tsdn_t *tsdn_fetch(void); bool tsdn_null(const tsdn_t *tsdn); tsd_t *tsdn_tsd(tsdn_t *tsdn); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_)) malloc_tsd_externs(, tsd_t) malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup) JEMALLOC_ALWAYS_INLINE tsd_t * tsd_fetch_impl(bool init) { tsd_t *tsd = tsd_get(init); if (!init && tsd_get_allocates() && tsd == NULL) return (NULL); assert(tsd != NULL); if (unlikely(tsd->state != tsd_state_nominal)) { if (tsd->state == tsd_state_uninitialized) { tsd->state = tsd_state_nominal; /* Trigger cleanup handler registration. */ tsd_set(tsd); } else if (tsd->state == tsd_state_purgatory) { tsd->state = tsd_state_reincarnated; tsd_set(tsd); } else assert(tsd->state == tsd_state_reincarnated); } return (tsd); } JEMALLOC_ALWAYS_INLINE tsd_t * tsd_fetch(void) { return (tsd_fetch_impl(true)); } JEMALLOC_ALWAYS_INLINE tsdn_t * tsd_tsdn(tsd_t *tsd) { return ((tsdn_t *)tsd); } JEMALLOC_INLINE bool tsd_nominal(tsd_t *tsd) { return (tsd->state == tsd_state_nominal); } #define O(n, t) \ JEMALLOC_ALWAYS_INLINE t * \ tsd_##n##p_get(tsd_t *tsd) \ { \ \ return (&tsd->n); \ } \ \ JEMALLOC_ALWAYS_INLINE t \ tsd_##n##_get(tsd_t *tsd) \ { \ \ return (*tsd_##n##p_get(tsd)); \ } \ \ JEMALLOC_ALWAYS_INLINE void \ tsd_##n##_set(tsd_t *tsd, t n) \ { \ \ assert(tsd->state == tsd_state_nominal); \ tsd->n = n; \ } MALLOC_TSD #undef O JEMALLOC_ALWAYS_INLINE tsdn_t * tsdn_fetch(void) { if (!tsd_booted_get()) return (NULL); return (tsd_tsdn(tsd_fetch_impl(false))); } JEMALLOC_ALWAYS_INLINE bool tsdn_null(const tsdn_t *tsdn) { return (tsdn == NULL); } JEMALLOC_ALWAYS_INLINE tsd_t * tsdn_tsd(tsdn_t *tsdn) { assert(!tsdn_null(tsdn)); return (&tsdn->tsd); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
21,743
26.593909
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/geohash-int/geohash.h
/* * Copyright (c) 2013-2014, yinqiwen <[email protected]> * Copyright (c) 2014, Matt Stancliff <[email protected]>. * Copyright (c) 2015, Salvatore Sanfilippo <[email protected]>. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef GEOHASH_H_ #define GEOHASH_H_ #include <stddef.h> #include <stdint.h> #include <stdint.h> #if defined(__cplusplus) extern "C" { #endif #define HASHISZERO(r) (!(r).bits && !(r).step) #define RANGEISZERO(r) (!(r).max && !(r).min) #define RANGEPISZERO(r) (r == NULL || RANGEISZERO(*r)) #define GEO_STEP_MAX 26 /* 26*2 = 52 bits. */ /* Limits from EPSG:900913 / EPSG:3785 / OSGEO:41001 */ #define GEO_LAT_MIN -85.05112878 #define GEO_LAT_MAX 85.05112878 #define GEO_LONG_MIN -180 #define GEO_LONG_MAX 180 typedef enum { GEOHASH_NORTH = 0, GEOHASH_EAST, GEOHASH_WEST, GEOHASH_SOUTH, GEOHASH_SOUTH_WEST, GEOHASH_SOUTH_EAST, GEOHASH_NORT_WEST, GEOHASH_NORT_EAST } GeoDirection; typedef struct { uint64_t bits; uint8_t step; } GeoHashBits; typedef struct { double min; double max; } GeoHashRange; typedef struct { GeoHashBits hash; GeoHashRange longitude; GeoHashRange latitude; } GeoHashArea; typedef struct { GeoHashBits north; GeoHashBits east; GeoHashBits west; GeoHashBits south; GeoHashBits north_east; GeoHashBits south_east; GeoHashBits north_west; GeoHashBits south_west; } GeoHashNeighbors; /* * 0:success * -1:failed */ void geohashGetCoordRange(GeoHashRange *long_range, GeoHashRange *lat_range); int geohashEncode(const GeoHashRange *long_range, const GeoHashRange *lat_range, double longitude, double latitude, uint8_t step, GeoHashBits *hash); int geohashEncodeType(double longitude, double latitude, uint8_t step, GeoHashBits *hash); int geohashEncodeWGS84(double longitude, double latitude, uint8_t step, GeoHashBits *hash); int geohashDecode(const GeoHashRange long_range, const GeoHashRange lat_range, const GeoHashBits hash, GeoHashArea *area); int geohashDecodeType(const GeoHashBits hash, GeoHashArea *area); int geohashDecodeWGS84(const GeoHashBits hash, GeoHashArea *area); int geohashDecodeAreaToLongLat(const GeoHashArea *area, double *xy); int geohashDecodeToLongLatType(const GeoHashBits hash, double *xy); int geohashDecodeToLongLatWGS84(const GeoHashBits hash, double *xy); int geohashDecodeToLongLatMercator(const GeoHashBits hash, double *xy); void geohashNeighbors(const GeoHashBits *hash, GeoHashNeighbors *neighbors); #if defined(__cplusplus) } #endif #endif /* GEOHASH_H_ */
4,124
33.663866
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/deps/geohash-int/geohash_helper.h
/* * Copyright (c) 2013-2014, yinqiwen <[email protected]> * Copyright (c) 2014, Matt Stancliff <[email protected]>. * Copyright (c) 2015, Salvatore Sanfilippo <[email protected]>. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef GEOHASH_HELPER_HPP_ #define GEOHASH_HELPER_HPP_ #include <math.h> #include "geohash.h" #define GZERO(s) s.bits = s.step = 0; #define GISZERO(s) (!s.bits && !s.step) #define GISNOTZERO(s) (s.bits || s.step) typedef uint64_t GeoHashFix52Bits; typedef uint64_t GeoHashVarBits; typedef struct { GeoHashBits hash; GeoHashArea area; GeoHashNeighbors neighbors; } GeoHashRadius; int GeoHashBitsComparator(const GeoHashBits *a, const GeoHashBits *b); uint8_t geohashEstimateStepsByRadius(double range_meters, double lat); int geohashBoundingBox(double longitude, double latitude, double radius_meters, double *bounds); GeoHashRadius geohashGetAreasByRadius(double longitude, double latitude, double radius_meters); GeoHashRadius geohashGetAreasByRadiusWGS84(double longitude, double latitude, double radius_meters); GeoHashRadius geohashGetAreasByRadiusMercator(double longitude, double latitude, double radius_meters); GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits hash); double geohashGetDistance(double lon1d, double lat1d, double lon2d, double lat2d); int geohashGetDistanceIfInRadius(double x1, double y1, double x2, double y2, double radius, double *distance); int geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2, double y2, double radius, double *distance); #endif /* GEOHASH_HELPER_HPP_ */
3,368
45.791667
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/utils/install_server.sh
#!/bin/sh # Copyright 2011 Dvir Volk <dvirsk at gmail dot com>. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED # WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL Dvir Volk OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################ # # Interactive service installer for redis server # this generates a redis config file and an /etc/init.d script, and installs them # this scripts should be run as root die () { echo "ERROR: $1. Aborting!" exit 1 } #Absolute path to this script SCRIPT=$(readlink -f $0) #Absolute path this script is in SCRIPTPATH=$(dirname $SCRIPT) #Initial defaults _REDIS_PORT=6379 echo "Welcome to the redis service installer" echo "This script will help you easily set up a running redis server" echo #check for root user if [ "$(id -u)" -ne 0 ] ; then echo "You must run this script as root. Sorry!" exit 1 fi #Read the redis port read -p "Please select the redis port for this instance: [$_REDIS_PORT] " REDIS_PORT if ! echo $REDIS_PORT | egrep -q '^[0-9]+$' ; then echo "Selecting default: $_REDIS_PORT" REDIS_PORT=$_REDIS_PORT fi #read the redis config file _REDIS_CONFIG_FILE="/etc/redis/$REDIS_PORT.conf" read -p "Please select the redis config file name [$_REDIS_CONFIG_FILE] " REDIS_CONFIG_FILE if [ -z "$REDIS_CONFIG_FILE" ] ; then REDIS_CONFIG_FILE=$_REDIS_CONFIG_FILE echo "Selected default - $REDIS_CONFIG_FILE" fi #read the redis log file path _REDIS_LOG_FILE="/var/log/redis_$REDIS_PORT.log" read -p "Please select the redis log file name [$_REDIS_LOG_FILE] " REDIS_LOG_FILE if [ -z "$REDIS_LOG_FILE" ] ; then REDIS_LOG_FILE=$_REDIS_LOG_FILE echo "Selected default - $REDIS_LOG_FILE" fi #get the redis data directory _REDIS_DATA_DIR="/var/lib/redis/$REDIS_PORT" read -p "Please select the data directory for this instance [$_REDIS_DATA_DIR] " REDIS_DATA_DIR if [ -z "$REDIS_DATA_DIR" ] ; then REDIS_DATA_DIR=$_REDIS_DATA_DIR echo "Selected default - $REDIS_DATA_DIR" fi #get the redis executable path _REDIS_EXECUTABLE=`command -v redis-server` read -p "Please select the redis executable path [$_REDIS_EXECUTABLE] " REDIS_EXECUTABLE if [ ! -x "$REDIS_EXECUTABLE" ] ; then REDIS_EXECUTABLE=$_REDIS_EXECUTABLE if [ ! -x "$REDIS_EXECUTABLE" ] ; then echo "Mmmmm... it seems like you don't have a redis executable. Did you run make install yet?" exit 1 fi fi #check the default for redis cli CLI_EXEC=`command -v redis-cli` if [ -z "$CLI_EXEC" ] ; then CLI_EXEC=`dirname $REDIS_EXECUTABLE`"/redis-cli" fi echo "Selected config:" echo "Port : $REDIS_PORT" echo "Config file : $REDIS_CONFIG_FILE" echo "Log file : $REDIS_LOG_FILE" echo "Data dir : $REDIS_DATA_DIR" echo "Executable : $REDIS_EXECUTABLE" echo "Cli Executable : $CLI_EXEC" read -p "Is this ok? Then press ENTER to go on or Ctrl-C to abort." _UNUSED_ mkdir -p `dirname "$REDIS_CONFIG_FILE"` || die "Could not create redis config directory" mkdir -p `dirname "$REDIS_LOG_FILE"` || die "Could not create redis log dir" mkdir -p "$REDIS_DATA_DIR" || die "Could not create redis data directory" #render the templates TMP_FILE="/tmp/${REDIS_PORT}.conf" DEFAULT_CONFIG="${SCRIPTPATH}/../redis.conf" INIT_TPL_FILE="${SCRIPTPATH}/redis_init_script.tpl" INIT_SCRIPT_DEST="/etc/init.d/redis_${REDIS_PORT}" PIDFILE="/var/run/redis_${REDIS_PORT}.pid" if [ ! -f "$DEFAULT_CONFIG" ]; then echo "Mmmmm... the default config is missing. Did you switch to the utils directory?" exit 1 fi #Generate config file from the default config file as template #changing only the stuff we're controlling from this script echo "## Generated by install_server.sh ##" > $TMP_FILE read -r SED_EXPR <<-EOF s#^port [0-9]{4}\$#port ${REDIS_PORT}#; \ s#^logfile .+\$#logfile ${REDIS_LOG_FILE}#; \ s#^dir .+\$#dir ${REDIS_DATA_DIR}#; \ s#^pidfile .+\$#pidfile ${PIDFILE}#; \ s#^daemonize no\$#daemonize yes#; EOF sed -r "$SED_EXPR" $DEFAULT_CONFIG >> $TMP_FILE #cat $TPL_FILE | while read line; do eval "echo \"$line\"" >> $TMP_FILE; done cp $TMP_FILE $REDIS_CONFIG_FILE || die "Could not write redis config file $REDIS_CONFIG_FILE" #Generate sample script from template file rm -f $TMP_FILE #we hard code the configs here to avoid issues with templates containing env vars #kinda lame but works! REDIS_INIT_HEADER=\ "#!/bin/sh\n #Configurations injected by install_server below....\n\n EXEC=$REDIS_EXECUTABLE\n CLIEXEC=$CLI_EXEC\n PIDFILE=\"$PIDFILE\"\n CONF=\"$REDIS_CONFIG_FILE\"\n\n REDISPORT=\"$REDIS_PORT\"\n\n ###############\n\n" REDIS_CHKCONFIG_INFO=\ "# REDHAT chkconfig header\n\n # chkconfig: - 58 74\n # description: redis_${REDIS_PORT} is the redis daemon.\n ### BEGIN INIT INFO\n # Provides: redis_6379\n # Required-Start: \$network \$local_fs \$remote_fs\n # Required-Stop: \$network \$local_fs \$remote_fs\n # Default-Start: 2 3 4 5\n # Default-Stop: 0 1 6\n # Should-Start: \$syslog \$named\n # Should-Stop: \$syslog \$named\n # Short-Description: start and stop redis_${REDIS_PORT}\n # Description: Redis daemon\n ### END INIT INFO\n\n" if command -v chkconfig >/dev/null; then #if we're a box with chkconfig on it we want to include info for chkconfig echo "$REDIS_INIT_HEADER" "$REDIS_CHKCONFIG_INFO" > $TMP_FILE && cat $INIT_TPL_FILE >> $TMP_FILE || die "Could not write init script to $TMP_FILE" else #combine the header and the template (which is actually a static footer) echo "$REDIS_INIT_HEADER" > $TMP_FILE && cat $INIT_TPL_FILE >> $TMP_FILE || die "Could not write init script to $TMP_FILE" fi ### # Generate sample script from template file # - No need to check which system we are on. The init info are comments and # do not interfere with update_rc.d systems. Additionally: # Ubuntu/debian by default does not come with chkconfig, but does issue a # warning if init info is not available. cat > ${TMP_FILE} <<EOT #!/bin/sh #Configurations injected by install_server below.... EXEC=$REDIS_EXECUTABLE CLIEXEC=$CLI_EXEC PIDFILE=$PIDFILE CONF="$REDIS_CONFIG_FILE" REDISPORT="$REDIS_PORT" ############### # SysV Init Information # chkconfig: - 58 74 # description: redis_${REDIS_PORT} is the redis daemon. ### BEGIN INIT INFO # Provides: redis_${REDIS_PORT} # Required-Start: \$network \$local_fs \$remote_fs # Required-Stop: \$network \$local_fs \$remote_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Should-Start: \$syslog \$named # Should-Stop: \$syslog \$named # Short-Description: start and stop redis_${REDIS_PORT} # Description: Redis daemon ### END INIT INFO EOT cat ${INIT_TPL_FILE} >> ${TMP_FILE} #copy to /etc/init.d cp $TMP_FILE $INIT_SCRIPT_DEST && \ chmod +x $INIT_SCRIPT_DEST || die "Could not copy redis init script to $INIT_SCRIPT_DEST" echo "Copied $TMP_FILE => $INIT_SCRIPT_DEST" #Install the service echo "Installing service..." if command -v chkconfig >/dev/null 2>&1; then # we're chkconfig, so lets add to chkconfig and put in runlevel 345 chkconfig --add redis_${REDIS_PORT} && echo "Successfully added to chkconfig!" chkconfig --level 345 redis_${REDIS_PORT} on && echo "Successfully added to runlevels 345!" elif command -v update-rc.d >/dev/null 2>&1; then #if we're not a chkconfig box assume we're able to use update-rc.d update-rc.d redis_${REDIS_PORT} defaults && echo "Success!" else echo "No supported init tool found." fi /etc/init.d/redis_$REDIS_PORT start || die "Failed starting service..." #tada echo "Installation successful!" exit 0
8,545
33.739837
147
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/utils/whatisdoing.sh
# This script is from http://poormansprofiler.org/ # # NOTE: Instead of using this script, you should use the Redis # Software Watchdog, which provides a similar functionality but in # a more reliable / easy to use way. # # Check http://redis.io/topics/latency for more information. #!/bin/bash nsamples=1 sleeptime=0 pid=$(ps auxww | grep '[r]edis-server' | awk '{print $2}') for x in $(seq 1 $nsamples) do gdb -ex "set pagination 0" -ex "thread apply all bt" -batch -p $pid sleep $sleeptime done | \ awk ' BEGIN { s = ""; } /Thread/ { print s; s = ""; } /^\#/ { if (s != "" ) { s = s "," $4} else { s = $4 } } END { print s }' | \ sort | uniq -c | sort -r -n -k 1,1
693
26.76
71
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/utils/releasetools/02_upload_tarball.sh
#!/bin/bash echo "Uploading..." scp /tmp/redis-${1}.tar.gz [email protected]:/var/virtual/download.redis.io/httpdocs/releases/ echo "Updating web site... (press any key if it is a stable release, or Ctrl+C)" read x ssh [email protected] "cd /var/virtual/download.redis.io/httpdocs; ./update.sh ${1}"
304
42.571429
96
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/utils/releasetools/04_release_hash.sh
#!/bin/bash SHA=$(curl -s http://download.redis.io/releases/redis-${1}.tar.gz | shasum | cut -f 1 -d' ') ENTRY="hash redis-${1}.tar.gz sha1 $SHA http://download.redis.io/releases/redis-${1}.tar.gz" echo $ENTRY >> ~/hack/redis-hashes/README vi ~/hack/redis-hashes/README echo "Press any key to commit, Ctrl-C to abort)." read yes (cd ~/hack/redis-hashes; git commit -a -m "${1} hash."; git push)
395
43
92
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/utils/releasetools/03_test_release.sh
#!/bin/sh if [ $# != "1" ] then echo "Usage: ${0} <git-ref>" exit 1 fi TAG=$1 TARNAME="redis-${TAG}.tar.gz" DOWNLOADURL="http://download.redis.io/releases/${TARNAME}" ssh antirez@metal "export TERM=xterm; cd /tmp; rm -rf test_release_tmp_dir; cd test_release_tmp_dir; wget $DOWNLOADURL; tar xvzf $TARNAME; cd redis-${TAG}; make; ./runtest; ./runtest-sentinel; if [ -x runtest-cluster ]; then ./runtest-cluster; fi"
657
25.32
58
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/redis-NDP-chekpoint/utils/releasetools/01_create_tarball.sh
#!/bin/sh if [ $# != "1" ] then echo "Usage: ./mkrelease.sh <git-ref>" exit 1 fi TAG=$1 TARNAME="redis-${TAG}.tar" echo "Generating /tmp/${TARNAME}" cd ~/hack/redis git archive $TAG --prefix redis-${TAG}/ > /tmp/$TARNAME || exit 1 echo "Gizipping the archive" rm -f /tmp/$TARNAME.gz gzip -9 /tmp/$TARNAME
314
18.6875
65
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/TATP_CP/tatp_db.h
/* Author: Vaibhav Gogte <[email protected]> Aasheesh Kolli <[email protected]> This file declares the TATP data base and the different transactions supported by the database. */ #include <cstdint> //#include <atomic> #include <pthread.h> #include "tableEntries.h" #include "../include/txopt.h" class TATP_DB{ private: long total_subscribers; // Holds the number of subscribers int num_threads; subscriber_entry* subscriber_table; // Pointer to the subscriber table access_info_entry* access_info_table; // Pointer to the access info table special_facility_entry* special_facility_table; // Pointer to the special facility table call_forwarding_entry* call_forwarding_table; // Pointer to the call forwarding table pthread_mutex_t* lock_; // Lock per subscriber to protect the update //std::atomic<long>** txCounts; // Array of tx counts, success and fails unsigned long* subscriber_rndm_seeds; unsigned long* vlr_rndm_seeds; unsigned long* rndm_seeds; public: TATP_DB(unsigned num_subscribers); // Constructs and sizes tables as per num_subscribers ~TATP_DB(); void initialize(unsigned num_subscribers, int n); void populate_tables(unsigned num_subscribers); // Populates the various tables void fill_subscriber_entry(unsigned _s_id); // Fills subscriber table entry given subscriber id void fill_access_info_entry(unsigned _s_id, short _ai_type); // Fills access info table entry given subscriber id and ai_type void fill_special_facility_entry(unsigned _s_id, short _sf_type); // Fills special facility table entry given subscriber id and sf_type void fill_call_forwarding_entry(unsigned _s_id, short _sf_type, short _start_time); // Fills call forwarding table entry given subscriber id, sf_type and start type void convert_to_string(unsigned number, int num_digits, char* string_ptr); void make_upper_case_string(char* string_ptr, int num_chars); void update_subscriber_data(int threadId); // Tx: updates a random subscriber data void update_location(int threadId, int num_ops); // Tx: updates location for a random subscriber void insert_call_forwarding(int threadId); // Tx: Inserts into call forwarding table for a random user void delete_call_forwarding(int threadId); // Tx: Deletes call forwarding for a random user unsigned long get_random(int thread_id, int min, int max); unsigned long get_random(int thread_id); unsigned long get_random_s_id(int thread_id); unsigned long get_random_vlr(int thread_id); void print_results(); }; //DS for logging info to recover from a failed update_subscriber_data Tx struct recovery_update_subscriber_data { char txType; // will be '0' unsigned s_id; // the subscriber id being updated short sf_type; // the sf_type being modified short bit_1; // the old bit_! value short data_a; // the old data_a value char padding[5]; }; struct recovery_update_location { char txType; // will be '1' unsigned s_id; // the subcriber whose location is being updated unsigned vlr_location; // the old vlr location char padding[7]; };
3,135
39.727273
168
h
null
NearPMSW-main/nearpmMDsync/checkpointing/TATP_CP/tatp_db.cc
/* Author: Vaibhav Gogte <[email protected]> Aasheesh Kolli <[email protected]> This file defines the various transactions in TATP. */ #include "tatp_db.h" #include <cstdlib> // For rand #include <iostream> #include <libpmem.h> #include <sys/mman.h> //#include <queue> //#include <iostream> #define NUM_RNDM_SEEDS 1280 subscriber_entry * subscriber_table_entry_backup; uint64_t * subscriber_table_entry_backup_valid; extern void * device; static void setpage(void * addr){ uint64_t pageNo = ((uint64_t)addr)/4096; unsigned long * pageStart = (unsigned long *)(pageNo*4096); mprotect(pageStart, 4096, PROT_READ); return; } extern void * checkpoint_start; int getRand() { return rand(); } TATP_DB::TATP_DB(unsigned num_subscribers) {} //Korakit //this function is used in setup phase, no need to provide crash consistency void TATP_DB::initialize(unsigned num_subscribers, int n) { total_subscribers = num_subscribers; num_threads = n; size_t mapped_len; int is_pmem; void * pmemstart; int totsize = num_subscribers*sizeof(subscriber_entry) + 4*num_subscribers*sizeof(access_info_entry) + 4*num_subscribers*sizeof(special_facility_entry) + 3*4*num_subscribers*sizeof(call_forwarding_entry) + NUM_RNDM_SEEDS*sizeof(unsigned long) + NUM_RNDM_SEEDS*sizeof(unsigned long) + NUM_RNDM_SEEDS*sizeof(unsigned long) + sizeof(subscriber_entry) + sizeof(uint64_t); if ((pmemstart = pmem_map_file("/mnt/mem/tatp", totsize, PMEM_FILE_CREATE, 0666, &mapped_len, &is_pmem)) == NULL) { fprintf(stderr, "pmem_map_file failed\n"); exit(0); } if ((checkpoint_start = pmem_map_file("/mnt/mem/checkpoint", 4096*50, PMEM_FILE_CREATE, 0666, &mapped_len, &is_pmem)) == NULL) { fprintf(stderr, "pmem_map_file failed\n"); exit(0); } subscriber_table = (subscriber_entry*) pmemstart; // A max of 4 access info entries per subscriber access_info_table = (access_info_entry*) (pmemstart + num_subscribers*sizeof(subscriber_entry)); // A max of 4 access info entries per subscriber special_facility_table = (special_facility_entry*) (pmemstart + num_subscribers*sizeof(subscriber_entry) + 4*num_subscribers*sizeof(access_info_entry)); // A max of 3 call forwarding entries per "special facility entry" call_forwarding_table= (call_forwarding_entry*) (pmemstart + num_subscribers*sizeof(subscriber_entry) + 4*num_subscribers*sizeof(access_info_entry) + 4*num_subscribers*sizeof(special_facility_entry)); //Korakit //removed for single thread version /* lock_ = (pthread_mutex_t *)malloc(n*sizeof(pthread_mutex_t)); for(int i=0; i<num_threads; i++) { pthread_mutex_init(&lock_[i], NULL); } */ for(int i=0; i<4*num_subscribers; i++) { access_info_table[i].valid = false; special_facility_table[i].valid = false; for(int j=0; j<3; j++) { call_forwarding_table[3*i+j].valid = false; // printf("%d\n",j); } // printf("%d %d %d\n", i, 4*num_subscribers, totsize); } //printf("ab\n"); //rndm_seeds = new std::atomic<unsigned long>[NUM_RNDM_SEEDS]; //rndm_seeds = (std::atomic<unsigned long>*) malloc(NUM_RNDM_SEEDS*sizeof(std::atomic<unsigned long>)); subscriber_rndm_seeds = (unsigned long*) (pmemstart + num_subscribers*sizeof(subscriber_entry) + 4*num_subscribers*sizeof(access_info_entry) + 4*num_subscribers*sizeof(special_facility_entry) + 3*4*num_subscribers*sizeof(call_forwarding_entry)); vlr_rndm_seeds = (unsigned long*) (pmemstart + num_subscribers*sizeof(subscriber_entry) + 4*num_subscribers*sizeof(access_info_entry) + 4*num_subscribers*sizeof(special_facility_entry) + 3*4*num_subscribers*sizeof(call_forwarding_entry) + NUM_RNDM_SEEDS*sizeof(unsigned long)); rndm_seeds = (unsigned long*) (pmemstart + num_subscribers*sizeof(subscriber_entry) + 4*num_subscribers*sizeof(access_info_entry) + 4*num_subscribers*sizeof(special_facility_entry) + 3*4*num_subscribers*sizeof(call_forwarding_entry) + NUM_RNDM_SEEDS*sizeof(unsigned long) + NUM_RNDM_SEEDS*sizeof(unsigned long)); subscriber_table_entry_backup = (subscriber_entry*) (pmemstart + num_subscribers*sizeof(subscriber_entry) + 4*num_subscribers*sizeof(access_info_entry) + 4*num_subscribers*sizeof(special_facility_entry) + 3*4*num_subscribers*sizeof(call_forwarding_entry) + NUM_RNDM_SEEDS*sizeof(unsigned long) + NUM_RNDM_SEEDS*sizeof(unsigned long) + NUM_RNDM_SEEDS*sizeof(unsigned long)); subscriber_table_entry_backup_valid = (uint64_t*)(pmemstart + num_subscribers*sizeof(subscriber_entry) + 4*num_subscribers*sizeof(access_info_entry) + 4*num_subscribers*sizeof(special_facility_entry) + 3*4*num_subscribers*sizeof(call_forwarding_entry) + NUM_RNDM_SEEDS*sizeof(unsigned long) + NUM_RNDM_SEEDS*sizeof(unsigned long) + NUM_RNDM_SEEDS*sizeof(unsigned long) + sizeof(subscriber_entry) ); //sgetRand(); for(int i=0; i<NUM_RNDM_SEEDS; i++) { subscriber_rndm_seeds[i] = getRand()%(NUM_RNDM_SEEDS*10)+1; vlr_rndm_seeds[i] = getRand()%(NUM_RNDM_SEEDS*10)+1; rndm_seeds[i] = getRand()%(NUM_RNDM_SEEDS*10)+1; //std::cout<<i<<" "<<rndm_seeds[i]<<std::endl; } } TATP_DB::~TATP_DB(){ free(subscriber_rndm_seeds); free(vlr_rndm_seeds); free(rndm_seeds); } //Korakit //this function is used in setup phase, no need to provide crash consistency void TATP_DB::populate_tables(unsigned num_subscribers) { for(int i=0; i<num_subscribers; i++) { fill_subscriber_entry(i); int num_ai_types = getRand()%4 + 1; // num_ai_types varies from 1->4 for(int j=1; j<=num_ai_types; j++) { fill_access_info_entry(i,j); } int num_sf_types = getRand()%4 + 1; // num_sf_types varies from 1->4 for(int k=1; k<=num_sf_types; k++) { fill_special_facility_entry(i,k); int num_call_forwards = getRand()%4; // num_call_forwards varies from 0->3 for(int p=0; p<num_call_forwards; p++) { fill_call_forwarding_entry(i,k,8*p); } } } } //Korakit //this function is used in setup phase, no need to provide crash consistency void TATP_DB::fill_subscriber_entry(unsigned _s_id) { subscriber_table[_s_id].s_id = _s_id; convert_to_string(_s_id, 15, subscriber_table[_s_id].sub_nbr); subscriber_table[_s_id].bit_1 = (short) (getRand()%2); subscriber_table[_s_id].bit_2 = (short) (getRand()%2); subscriber_table[_s_id].bit_3 = (short) (getRand()%2); subscriber_table[_s_id].bit_4 = (short) (getRand()%2); subscriber_table[_s_id].bit_5 = (short) (getRand()%2); subscriber_table[_s_id].bit_6 = (short) (getRand()%2); subscriber_table[_s_id].bit_7 = (short) (getRand()%2); subscriber_table[_s_id].bit_8 = (short) (getRand()%2); subscriber_table[_s_id].bit_9 = (short) (getRand()%2); subscriber_table[_s_id].bit_10 = (short) (getRand()%2); subscriber_table[_s_id].hex_1 = (short) (getRand()%16); subscriber_table[_s_id].hex_2 = (short) (getRand()%16); subscriber_table[_s_id].hex_3 = (short) (getRand()%16); subscriber_table[_s_id].hex_4 = (short) (getRand()%16); subscriber_table[_s_id].hex_5 = (short) (getRand()%16); subscriber_table[_s_id].hex_6 = (short) (getRand()%16); subscriber_table[_s_id].hex_7 = (short) (getRand()%16); subscriber_table[_s_id].hex_8 = (short) (getRand()%16); subscriber_table[_s_id].hex_9 = (short) (getRand()%16); subscriber_table[_s_id].hex_10 = (short) (getRand()%16); subscriber_table[_s_id].byte2_1 = (short) (getRand()%256); subscriber_table[_s_id].byte2_2 = (short) (getRand()%256); subscriber_table[_s_id].byte2_3 = (short) (getRand()%256); subscriber_table[_s_id].byte2_4 = (short) (getRand()%256); subscriber_table[_s_id].byte2_5 = (short) (getRand()%256); subscriber_table[_s_id].byte2_6 = (short) (getRand()%256); subscriber_table[_s_id].byte2_7 = (short) (getRand()%256); subscriber_table[_s_id].byte2_8 = (short) (getRand()%256); subscriber_table[_s_id].byte2_9 = (short) (getRand()%256); subscriber_table[_s_id].byte2_10 = (short) (getRand()%256); subscriber_table[_s_id].msc_location = getRand()%(2^32 - 1) + 1; subscriber_table[_s_id].vlr_location = getRand()%(2^32 - 1) + 1; } //Korakit //this function is used in setup phase, no need to provide crash consistency void TATP_DB::fill_access_info_entry(unsigned _s_id, short _ai_type) { int tab_indx = 4*_s_id + _ai_type - 1; access_info_table[tab_indx].s_id = _s_id; access_info_table[tab_indx].ai_type = _ai_type; access_info_table[tab_indx].data_1 = getRand()%256; access_info_table[tab_indx].data_2 = getRand()%256; make_upper_case_string(access_info_table[tab_indx].data_3, 3); make_upper_case_string(access_info_table[tab_indx].data_4, 5); access_info_table[tab_indx].valid = true; } //Korakit //this function is used in setup phase, no need to provide crash consistency void TATP_DB::fill_special_facility_entry(unsigned _s_id, short _sf_type) { int tab_indx = 4*_s_id + _sf_type - 1; special_facility_table[tab_indx].s_id = _s_id; special_facility_table[tab_indx].sf_type = _sf_type; special_facility_table[tab_indx].is_active = ((getRand()%100 < 85) ? 1 : 0); special_facility_table[tab_indx].error_cntrl = getRand()%256; special_facility_table[tab_indx].data_a = getRand()%256; make_upper_case_string(special_facility_table[tab_indx].data_b, 5); special_facility_table[tab_indx].valid = true; } //Korakit //this function is used in setup phase, no need to provide crash consistency void TATP_DB::fill_call_forwarding_entry(unsigned _s_id, short _sf_type, short _start_time) { if(_start_time == 0) return; int tab_indx = 12*_s_id + 3*(_sf_type-1) + (_start_time-8)/8; call_forwarding_table[tab_indx].s_id = _s_id; call_forwarding_table[tab_indx].sf_type = _sf_type; call_forwarding_table[tab_indx].start_time = _start_time - 8; call_forwarding_table[tab_indx].end_time = (_start_time - 8) + getRand()%8 + 1; convert_to_string(getRand()%1000, 15, call_forwarding_table[tab_indx].numberx); } void TATP_DB::convert_to_string(unsigned number, int num_digits, char* string_ptr) { char digits[10] = {'0','1','2','3','4','5','6','7','8','9'}; int quotient = number; int reminder = 0; int num_digits_converted=0; int divider = 1; while((quotient != 0) && (num_digits_converted<num_digits)) { divider = 10^(num_digits_converted+1); reminder = quotient%divider; quotient = quotient/divider; string_ptr[num_digits-1 - num_digits_converted] = digits[reminder]; num_digits_converted++; } if(num_digits_converted < num_digits) { string_ptr[num_digits-1 - num_digits_converted] = digits[0]; num_digits_converted++; } return; } void TATP_DB::make_upper_case_string(char* string_ptr, int num_chars) { char alphabets[26] = {'A','B','C','D','E','F','G','H','I','J','K','L','M','N', 'O','P','Q','R','S','T','U','V','W','X','Y','Z'}; for(int i=0; i<num_chars; i++) { string_ptr[i] = alphabets[getRand()%26]; } return; } void TATP_DB::update_subscriber_data(int threadId) { unsigned rndm_s_id = getRand() % total_subscribers; short rndm_sf_type = getRand() % 4 + 1; unsigned special_facility_tab_indx = 4*rndm_s_id + rndm_sf_type -1; if(special_facility_table[special_facility_tab_indx].valid) { //FIXME: There is a potential data race here, do not use this function yet //Korakit //removed for single thread version //pthread_mutex_lock(&lock_[rndm_s_id]); subscriber_table[rndm_s_id].bit_1 = getRand()%2; special_facility_table[special_facility_tab_indx].data_a = getRand()%256; //Korakit //removed for single thread version //pthread_mutex_unlock(&lock_[rndm_s_id]); } return; } //subscriber_entry subscriber_table_entry_backup; //uint64_t subscriber_table_entry_backup_valid; void TATP_DB::update_location(int threadId, int num_ops) { long rndm_s_id; rndm_s_id = get_random_s_id(threadId)-1; rndm_s_id /=total_subscribers; //Korakit //removed for single thread version //pthread_mutex_lock(&lock_[rndm_s_id]); //create backup //*subscriber_table_entry_backup = subscriber_table[rndm_s_id]; //move_data(&subscriber_table_entry_backup, &subscriber_table[rndm_s_id],sizeof(subscriber_table_entry_backup)); //s_fence(); //*subscriber_table_entry_backup_valid = 1; //s_fence(); setpage(&subscriber_table[rndm_s_id].vlr_location); subscriber_table[rndm_s_id].vlr_location = get_random_vlr(threadId); pmem_persist(&subscriber_table[rndm_s_id], sizeof(subscriber_table[rndm_s_id])); //*subscriber_table_entry_backup_valid = 0; //s_fence(); //Korakit //removed for single thread version //pthread_mutex_unlock(&lock_[rndm_s_id]); return; } void TATP_DB::insert_call_forwarding(int threadId) { return; } void TATP_DB::delete_call_forwarding(int threadId) { return; } void TATP_DB::print_results() { //std::cout<<"TxType:0 successful txs = "<<txCounts[0][0]<<std::endl; //std::cout<<"TxType:0 failed txs = "<<txCounts[0][1]<<std::endl; //std::cout<<"TxType:1 successful txs = "<<txCounts[1][0]<<std::endl; //std::cout<<"TxType:1 failed txs = "<<txCounts[1][1]<<std::endl; //std::cout<<"TxType:2 successful txs = "<<txCounts[2][0]<<std::endl; //std::cout<<"TxType:2 failed txs = "<<txCounts[2][1]<<std::endl; //std::cout<<"TxType:3 successful txs = "<<txCounts[3][0]<<std::endl; //std::cout<<"TxType:3 failed txs = "<<txCounts[3][1]<<std::endl; } unsigned long TATP_DB::get_random(int thread_id) { //return (getRand()%65536 | min + getRand()%(max - min + 1)) % (max - min + 1) + min; unsigned long tmp; tmp = rndm_seeds[thread_id*10] = (rndm_seeds[thread_id*10] * 16807) % 2147483647; return tmp; } unsigned long TATP_DB::get_random(int thread_id, int min, int max) { //return (getRand()%65536 | min + getRand()%(max - min + 1)) % (max - min + 1) + min; unsigned long tmp; tmp = rndm_seeds[thread_id*10] = (rndm_seeds[thread_id*10] * 16807) % 2147483647; return (min+tmp%(max-min+1)); } unsigned long TATP_DB::get_random_s_id(int thread_id) { unsigned long tmp; tmp = subscriber_rndm_seeds[thread_id*10] = (subscriber_rndm_seeds[thread_id*10] * 16807) % 2147483647; return (1 + tmp%(total_subscribers)); } unsigned long TATP_DB::get_random_vlr(int thread_id) { unsigned long tmp; tmp = vlr_rndm_seeds[thread_id*10] = (vlr_rndm_seeds[thread_id*10] * 16807)%2147483647; return (1 + tmp%(2^32)); }
14,363
39.235294
402
cc
null
NearPMSW-main/nearpmMDsync/checkpointing/TATP_CP/run.sh
#!/usr/bin/env bash sudo rm -rf /mnt/mem/* sudo ./tatp_nvm > out tot=$(grep "tottime" out) grep "cp" out > time cp=$(awk '{sum+= $2;} END{print sum;}' time) echo $1$tot echo $1'cp' $cp
187
16.090909
44
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/TATP_CP/tatp_nvm.cc
/* Author: Vaibhav Gogte <[email protected]> Aasheesh Kolli <[email protected]> This file is the TATP benchmark, performs various transactions as per the specifications. */ #include "tatp_db.h" #include <pthread.h> #include <stdlib.h> #include <stdio.h> #include <iostream> #include <cstdint> #include <assert.h> #include <sys/time.h> #include <string> #include <fstream> #include <libpmem.h> #include <cstring> //Korakit //might need to change parameters #define NUM_SUBSCRIBERS 100000 //100000 #define NUM_OPS_PER_CS 2 #define NUM_OPS 30000 //10000000 #define NUM_THREADS 1 TATP_DB* my_tatp_db; //#include "../DCT/rdtsc.h" void init_db() { unsigned num_subscribers = NUM_SUBSCRIBERS; my_tatp_db = (TATP_DB *)malloc(sizeof(TATP_DB)); my_tatp_db->initialize(num_subscribers,NUM_THREADS); fprintf(stderr, "Created tatp db at %p\n", (void *)my_tatp_db); } void* update_locations(void* args) { int thread_id = *((int*)args); for(int i=0; i<NUM_OPS/NUM_THREADS; i++) { my_tatp_db->update_location(thread_id,NUM_OPS_PER_CS); } return 0; } /////////////////Page fault handling///////////////// #include <bits/types/sig_atomic_t.h> #include <bits/types/sigset_t.h> #include <signal.h> #include <unistd.h> #include <sys/mman.h> #include <fcntl.h> #define SIGSTKSZ 8192 #define SA_SIGINFO 4 #define SA_ONSTACK 0x08000000 /* Use signal stack by using `sa_restorer'. */ #define SA_RESTART 0x10000000 /* Restart syscall on signal return. */ #define SA_NODEFER 0x40000000 /* Don't automatically block the signal when*/ stack_t _sigstk; int updated_page_count = 0; int all_updates = 0; void * checkpoint_start; void * page[50]; void * device; double totTimeCP = 0; static inline uint64_t getCycle(){ uint32_t cycles_high, cycles_low, pid; asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx "mov %%edx, %0\n\t" "mov %%eax, %1\n\t" "mov %%ecx, %2\n\t" :"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars :// no input :"%eax", "%edx", "%ecx" // clobbered by rdtscp ); return((uint64_t)cycles_high << 32) | cycles_low; } #define GRANULARITY 2048 void cmd_issue( uint32_t opcode, uint32_t TXID, uint32_t TID, uint32_t OID, uint64_t data_addr, uint32_t data_size, void * ptr){ //command with thread id encoded as first 8 bits of each word uint32_t issue_cmd[7]; issue_cmd[0] = (TID<<24)|(opcode<<16)|(TXID<<8)|TID; issue_cmd[1] = (TID<<24)|(OID<<16)|(data_addr>>48); issue_cmd[2] = (TID<<24)|((data_addr & 0x0000FFFFFFFFFFFF)>>24); issue_cmd[3] = (TID<<24)|(data_addr & 0x0000000000FFFFFF); issue_cmd[4] = (TID<<24)|(data_size<<8); issue_cmd[5] = (TID<<24)|(0X00FFFFFF>>16); issue_cmd[6] = (TID<<24)|((0X00FFFFFF & 0x0000FFFF)<<8); for(int i=0;i<7;i++){ // printf("%08x\n",issue_cmd[i]); *((u_int32_t *) ptr) = issue_cmd[i]; } } static void segvHandle(int signum, siginfo_t * siginfo, void * context) { #define CPTIME #ifdef CPTIME uint64_t endCycles, startCycles,totalCycles; startCycles = getCycle(); #endif void * addr = siginfo->si_addr; // address of access uint64_t pageNo = ((uint64_t)addr)/4096; unsigned long * pageStart = (unsigned long *)(pageNo*4096); // Check if this was a SEGV that we are supposed to trap. if (siginfo->si_code == SEGV_ACCERR) { mprotect(pageStart, 4096, PROT_READ|PROT_WRITE); if(all_updates >=5 || updated_page_count == 50){ for(int i=0;i<updated_page_count;i++){ //memcpy(checkpoint_start + 4096, pageStart, 4096); //pmem_persist(checkpoint_start + 4096, 4096); cmd_issue(2,0,0,0, (uint64_t)pageStart,4096,device); /* *((uint64_t*)device) = (uint64_t)(checkpoint_start + 4096); *((uint64_t*)(device)+1) = 00; *((uint64_t*)(device)+2) = (uint64_t)0.320580; *((uint64_t*)(device)+3) = ((uint64_t)(((0) << 16)| 6) << 32) | 4096; *(((uint32_t*)(device))+255) = (uint32_t)(((0) << 16)| 6); */ page[updated_page_count] = 0; } updated_page_count = 0; all_updates = 0; } all_updates ++; for(int i=0; i<updated_page_count; i++){ if(page[i] == pageStart){ #ifdef CPTIME endCycles = getCycle(); totalCycles = endCycles - startCycles; double totTime = ((double)totalCycles)/2000000000; printf("cp %f\n", totTime); #endif return;} } page[updated_page_count] = pageStart; //printf("test1 %lx %d %d\n",page[updated_page_count],updated_page_count,all_updates); updated_page_count++; #ifdef CPTIME endCycles = getCycle(); totalCycles = endCycles - startCycles; double totTime = ((double)totalCycles)/2000000000; printf("cp %f\n", totTime); #endif //*((int *)checkpoint_start) = 10; //test++; //printf("test1 %lx %d\n",updated_page_count); } else if (siginfo->si_code == SEGV_MAPERR) { fprintf (stderr, "%d : map error with addr %p!\n", getpid(), addr); abort(); } else { fprintf (stderr, "%d : other access error with addr %p.\n", getpid(), addr); abort(); } } static void installSignalHandler(void) { // Set up an alternate signal stack. printf("page fault handler initialized!!\n"); _sigstk.ss_sp = mmap(NULL, SIGSTKSZ, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); _sigstk.ss_size = SIGSTKSZ; _sigstk.ss_flags = 0; sigaltstack(&_sigstk, (stack_t *) 0); // Now set up a signal handler for SIGSEGV events. struct sigaction siga; sigemptyset(&siga.sa_mask); // Set the following signals to a set sigaddset(&siga.sa_mask, SIGSEGV); sigaddset(&siga.sa_mask, SIGALRM); sigprocmask(SIG_BLOCK, &siga.sa_mask, NULL); // Point to the handler function. siga.sa_flags = SA_SIGINFO | SA_ONSTACK | SA_RESTART | SA_NODEFER; siga.sa_sigaction = segvHandle; if (sigaction(SIGSEGV, &siga, NULL) == -1) { perror("sigaction(SIGSEGV)"); exit(-1); } sigprocmask(SIG_UNBLOCK, &siga.sa_mask, NULL); return; } void* open_device(const char* pathname) { //int fd = os_open("/sys/devices/pci0000:00/0000:00:00.2/iommu/ivhd0/devices/0000:0a:00.0/resource0",O_RDWR|O_SYNC); int fd = open(pathname,O_RDWR|O_SYNC); if(fd == -1) { printf("Couldnt opene file!!\n"); exit(0); } void * ptr = mmap(0,4096,PROT_READ|PROT_WRITE, MAP_SHARED,fd,0); if(ptr == (void *)-1) { printf("Could not map memory!!\n"); exit(0); } printf("opened device without error!!\n"); return ptr; } /////////////////////////////////////////////////////////////////// void installSignalHandler (void) __attribute__ ((constructor)); int main(int argc, char* argv[]) { //printf("in main\n"); //struct timeval tv_start; //struct timeval tv_end; //std::ofstream fexec; //fexec.open("exec.csv",std::ios_base::app); // Korakit: move to the init // LIU device = open_device("/sys/devices/pci0000:00/0000:00:00.2/iommu/ivhd0/devices/0000:0a:00.0/resource0"); uint64_t* tmp = (uint64_t*)device; *tmp = 0xdeadbeefdeadbeef; pmem_persist(tmp,64); *tmp = (uint64_t)tmp; pmem_persist(tmp,64); uint32_t tid; tid = 0;//gettid(); tid = tid & 0x3f; tid = (tid<< 4)| 0;//pop->run_id; //printf("%d %d\n",tid, pop->run_id); *tmp = tid; pmem_persist(tmp,64); init_db(); // LIU: remove output //std::cout<<"done with initialization"<<std::endl; my_tatp_db->populate_tables(NUM_SUBSCRIBERS); // LIU: remove output //std::cout<<"done with populating tables"<<std::endl; pthread_t threads[NUM_THREADS]; int id[NUM_THREADS]; //Korakit //exit to count instructions after initialization //we use memory trace from the beginning to this to test the compression ratio //as update locations(the actual test) only do one update // LIU // gettimeofday(&tv_start, NULL); //CounterAtomic::initCounterCache(); uint64_t endCycles, startCycles,totalCycles; startCycles = getCycle(); for(int i=0; i<NUM_THREADS; i++) { id[i] = i; update_locations((void*)&id[i]); } endCycles = getCycle(); totalCycles = endCycles - startCycles; double totTime = ((double)totalCycles)/2000000000; printf("tottime %f\ndata movement time %f\n", totTime, totTimeCP); //Korakit //Not necessary for single threaded version /* for(int i=0; i<NUM_THREADS; i++) { pthread_join(threads[i], NULL); } */ // LIU: remove the output /* gettimeofday(&tv_end, NULL); fprintf(stderr, "time elapsed %ld us\n", tv_end.tv_usec - tv_start.tv_usec + (tv_end.tv_sec - tv_start.tv_sec) * 1000000); fexec << "TATP" << ", " << std::to_string((tv_end.tv_usec - tv_start.tv_usec) + (tv_end.tv_sec - tv_start.tv_sec) * 1000000) << std::endl; fexec.close(); free(my_tatp_db); std::cout<<"done with threads"<<std::endl; */ return 0; }
8,797
25.5
140
cc
null
NearPMSW-main/nearpmMDsync/checkpointing/TATP_CP/tableEntries.h
/* Author: Vaibhav Gogte <[email protected]> Aasheesh Kolli <[email protected]> This file defines the table entries used by TATP. */ struct subscriber_entry { unsigned s_id; // Subscriber id char sub_nbr[15]; // Subscriber number, s_id in 15 digit string, zeros padded short bit_1, bit_2, bit_3, bit_4, bit_5, bit_6, bit_7, bit_8, bit_9, bit_10; // randomly generated values 0/1 short hex_1, hex_2, hex_3, hex_4, hex_5, hex_6, hex_7, hex_8, hex_9, hex_10; // randomly generated values 0->15 short byte2_1, byte2_2, byte2_3, byte2_4, byte2_5, byte2_6, byte2_7, byte2_8, byte2_9, byte2_10; // randomly generated values 0->255 unsigned msc_location; // Randomly generated value 1->((2^32)-1) unsigned vlr_location; // Randomly generated value 1->((2^32)-1) char padding[40]; }; struct access_info_entry { unsigned s_id; //Subscriber id short ai_type; // Random value 1->4. A subscriber can have a max of 4 and all unique short data_1, data_2; // Randomly generated values 0->255 char data_3[3]; // random 3 char string. All upper case alphabets char data_4[5]; // random 5 char string. All upper case alphabets bool valid; bool padding_1[7]; char padding_2[4+32]; }; struct special_facility_entry { unsigned s_id; //Subscriber id short sf_type; // Random value 1->4. A subscriber can have a max of 4 and all unique short is_active; // 0(15%)/1(85%) short error_cntrl; // Randomly generated values 0->255 short data_a; // Randomly generated values 0->255 char data_b[5]; // random 5 char string. All upper case alphabets char padding_1[7]; bool valid; bool padding_2[4+32]; }; struct call_forwarding_entry { unsigned s_id; // Subscriber id from special facility short sf_type; // sf_type from special facility table int start_time; // 0 or 8 or 16 int end_time; // start_time+N, N randomly generated 1->8 char numberx[15]; // randomly generated 15 digit string char padding_1[7]; bool valid; bool padding_2[24]; };
1,993
35.254545
134
h
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/run.sh
sudo mount -o dax /dev/pmem0 /mnt/pmem sudo rm -rf /mnt/mem/* sudo chown oem /mnt/pmem #./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 #sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --num=10000 --engine=cmap --benchmarks=fillseq,fillrandom,overwrite > out make bench sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --num=10000 --engine=cmap --benchmarks=fillrandom > out #grep "timecp" out > time #awk '{sum+= $3;} END{print sum;}' time grep "cp" out > time log=$(awk '{sum+= $2;} END{print sum;}' time) echo "" echo $1'cp' $log tottime=$(tail -1 out | awk '{print $NF}' | cut -d "," -f10|awk '{print $1/100}') echo $1'tottime' $tottime
674
41.1875
135
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/runtest.sh
sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillrandom sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,overwrite sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,readseq sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,readrandom sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,readmissing sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,deleteseq sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,deleterandom sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,readwhilewriting sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,readrandomwriterandom sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,txfillrandom
1,176
89.538462
117
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/setup.sh
sudo mkfs.ext4 /dev/pmem0 sudo mount -o dax /dev/pmem0 /mnt/pmem sudo chown oem /mnt/pmem #cd Research/Research/pmdk/src/examples/libpmemobj/map/ #cat run.sh
159
25.666667
55
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/entrypoint.sh
#!/bin/sh -l # # SPDX-License-Identifier: Apache-2.0 # Copyright 2021, Intel Corporation set -e set -x echo "$1" project_dir=${WORKDIR:-/pmemkv-bench} echo "run basic test" pytest-3 -v ${project_dir}/tests/test.py
219
12.75
40
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/bench/db_bench.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. // This source code is licensed under the Apache 2.0 License // (found in the LICENSE file in the root directory). // SPDX-License-Identifier: Apache-2.0 // Copyright 2017-2021, Intel Corporation #include <chrono> #include <cstdio> #include <cstdlib> #include <ctime> #include <inttypes.h> #include <iomanip> #include <iostream> #include <memory> #include <sstream> #include <string> #include <sys/types.h> #include <sys/stat.h> #include <vector> #include "csv.h" #include "histogram.h" #include "leveldb/env.h" #include "libpmemkv.hpp" #include "libpmempool.h" #include "mutexlock.h" #include "port/port_posix.h" #include "random.h" #include "testutil.h" static const std::string USAGE = "pmemkv_bench\n" "--engine=<name> (storage engine name, default: cmap)\n" "--db=<location> (path to persistent pool, default: /dev/shm/pmemkv)\n" " (note: file on DAX filesystem, DAX device, or poolset file)\n" "--db_size_in_gb=<integer> (size of persistent pool to create in GB, default: 0)\n" " (note: for existing poolset or device DAX configs use 0 or leave default value)\n" " (note: when pool path is non-existing, value should be > 0)\n" "--histogram=<0|1> (show histograms when reporting latencies)\n" "--num=<integer> (number of keys to place in database, default: 1000000)\n" "--reads=<integer> (number of read operations, default: 1000000)\n" "--threads=<integer> (number of concurrent threads, default: 1)\n" "--key_size=<integer> (size of keys in bytes, default: 16)\n" "--value_size=<integer> (size of values in bytes, default: 100)\n" "--readwritepercent=<integer> (Ratio of reads to reads/writes (expressed " "as percentage) for the ReadRandomWriteRandom workload. The default value " "90 means 90% operations out of all reads and writes operations are reads. " "In other words, 9 gets for every 1 put.) type: int32 default: 90\n" "--tx_size=<integer> (number of elements to insert in a single tx, there will be" "num/tx_size transactions per thread in total, the last tx might be smaller, default: 10)\n" "--disjoint=<0|1> (specifies whether each thread works on disjoint set of keys. " "0 means that all threads read/write to the db using any key between 0 and `num`, so that " "number of ops is `threads` * `num`. 1 means that each thread performs reads/writes using " "only [`thread_id` * `num` / `threads`, (`thread_id` + 1) * `num` / `threads`) subset of keys, " "so that total number of ops is `num`. The default value is 0.)\n" "--benchmarks=<name>, (comma-separated list of benchmarks to run)\n" " fillseq (load N values in sequential key order)\n" " fillrandom (load N values in random key order)\n" " overwrite (replace N values in random key order)\n" " readseq (read N values in sequential key order)\n" " readrandom (read N values in random key order)\n" " readmissing (read N missing values in random key order)\n" " deleteseq (delete N values in sequential key order)\n" " deleterandom (delete N values in random key order)\n" " readwhilewriting (1 writer, N threads doing random reads)\n" " readrandomwriterandom (N threads doing random-read, random-write)\n" " txfillrandom (load N values in random key order transactionally)\n"; // Number of key/values to place in database static int FLAGS_num = 1000000; static bool FLAGS_disjoint = false; // Number of read operations to do. If negative, do FLAGS_num reads. static int FLAGS_reads = -1; // Number of concurrent threads to run. static int FLAGS_threads = 1; static int FLAGS_key_size = 16; // Size of each value static int FLAGS_value_size = 100; // Print histogram of operation timings static bool FLAGS_histogram = false; // Use the db with the following name. static const char *FLAGS_db = "/dev/shm/pmemkv"; // Use following size when opening the database. static int FLAGS_db_size_in_gb = 0; static const double FLAGS_compression_ratio = 1.0; static const int FLAGS_ops_between_duration_checks = 1000; static const int FLAGS_duration = 0; static int FLAGS_readwritepercent = 90; static int FLAGS_tx_size = 10; using namespace leveldb; leveldb::Env *g_env = NULL; #if defined(__linux) static Slice TrimSpace(Slice s) { size_t start = 0; while (start < s.size() && isspace(s[start])) { start++; } size_t limit = s.size(); while (limit > start && isspace(s[limit - 1])) { limit--; } return Slice(s.data() + start, limit - start); } #endif // Helper for quickly generating random data. class RandomGenerator { private: std::string data_; unsigned int pos_; public: RandomGenerator() { // We use a limited amount of data over and over again and ensure // that it is larger than the compression window (32KB), and also // large enough to serve all typical value sizes we want to write. Random rnd(301); std::string piece; while (data_.size() < (unsigned)std::max(1048576, FLAGS_value_size)) { // Add a short fragment that is as compressible as specified // by FLAGS_compression_ratio. test::CompressibleString(&rnd, FLAGS_compression_ratio, 100, &piece); data_.append(piece); } pos_ = 0; } Slice Generate(unsigned int len) { assert(len <= data_.size()); if (pos_ + len > data_.size()) { pos_ = 0; } pos_ += len; return Slice(data_.data() + pos_ - len, len); } Slice GenerateWithTTL(unsigned int len) { assert(len <= data_.size()); if (pos_ + len > data_.size()) { pos_ = 0; } pos_ += len; return Slice(data_.data() + pos_ - len, len); } }; static void AppendWithSpace(std::string *str, Slice msg) { if (msg.empty()) return; if (!str->empty()) { str->push_back(' '); } str->append(msg.data(), msg.size()); } enum OperationType : unsigned char { kRead = 0, kWrite, kDelete, kSeek, kMerge, kUpdate, }; class BenchmarkLogger { private: struct hist { int id; std::string name; std::string histogram; }; int id = 0; std::vector<hist> histograms; CSV<int> csv = CSV<int>("id"); public: void insert(std::string name, Histogram histogram) { histograms.push_back({id, name, histogram.ToString()}); std::vector<double> percentiles = {50, 75, 90, 99.9, 99.99}; for (double &percentile : percentiles) { csv.insert(id, "Percentilie P" + std::to_string(percentile) + " [micros/op]", histogram.Percentile(percentile)); } csv.insert(id, "Median [micros/op]", histogram.Median()); } template <typename T> void insert(std::string column, T data) { csv.insert(id, column, data); } void insert(std::string column, std::time_t time) { std::ostringstream time_stream; time_stream << std::put_time(std::localtime(&time), "%D %T"); insert(column, time_stream.str()); } void print_histogram() { std::cout << "------------------------------------------------" << std::endl; for (auto &histogram : histograms) { std::cout << "benchmark: " << histogram.id << ", " << histogram.name << std::endl << histogram.histogram << std::endl; } } void print() { csv.print(); } void next_benchmark() { id++; } }; class Stats { private: double start_; double finish_; double seconds_; int done_; int next_report_; int64_t bytes_; double last_op_finish_; Histogram hist_; std::string message_; bool exclude_from_merge_; public: Stats() { Start(); } void Start() { next_report_ = 100; last_op_finish_ = start_; hist_.Clear(); done_ = 0; bytes_ = 0; seconds_ = 0; start_ = g_env->NowMicros(); finish_ = start_; message_.clear(); // When set, stats from this thread won't be merged with others. exclude_from_merge_ = false; } void Merge(const Stats &other) { if (other.exclude_from_merge_) return; hist_.Merge(other.hist_); done_ += other.done_; bytes_ += other.bytes_; seconds_ += other.seconds_; if (other.start_ < start_) start_ = other.start_; if (other.finish_ > finish_) finish_ = other.finish_; // Just keep the messages from one thread if (message_.empty()) message_ = other.message_; } void Stop() { finish_ = g_env->NowMicros(); seconds_ = (finish_ - start_) * 1e-6; } void AddMessage(Slice msg) { AppendWithSpace(&message_, msg); } void SetExcludeFromMerge() { exclude_from_merge_ = true; } void FinishedSingleOp() { if (FLAGS_histogram) { double now = g_env->NowMicros(); double micros = now - last_op_finish_; hist_.Add(micros); if (micros > 20000) { fprintf(stderr, "long op: %.1f micros%30s\r", micros, ""); fflush(stderr); } last_op_finish_ = now; } done_++; if (done_ >= next_report_) { if (next_report_ < 1000) next_report_ += 100; else if (next_report_ < 5000) next_report_ += 500; else if (next_report_ < 10000) next_report_ += 1000; else if (next_report_ < 50000) next_report_ += 5000; else if (next_report_ < 100000) next_report_ += 10000; else if (next_report_ < 500000) next_report_ += 50000; else next_report_ += 100000; fprintf(stderr, "... finished %d ops%30s\r", done_, ""); fflush(stderr); } } void AddBytes(int64_t n) { bytes_ += n; } float get_micros_per_op() { // Pretend at least one op was done in case we are running a benchmark // that does not call FinishedSingleOp(). if (done_ < 1) done_ = 1; return seconds_ * 1e6 / done_; } float get_ops_per_sec() { // Pretend at least one op was done in case we are running a benchmark // that does not call FinishedSingleOp(). if (done_ < 1) done_ = 1; double elapsed = (finish_ - start_) * 1e-6; return done_ / elapsed; } float get_throughput() { // Rate and ops/sec is computed on actual elapsed time, not the sum of per-thread // elapsed times. double elapsed = (finish_ - start_) * 1e-6; return (bytes_ / 1048576.0) / elapsed; } std::string get_extra_data() { return message_; } Histogram &get_histogram() { return hist_; } }; // State shared by all concurrent executions of the same benchmark. struct SharedState { port::Mutex mu; port::CondVar cv; int total; // Each thread goes through the following states: // (1) initializing // (2) waiting for others to be initialized // (3) running // (4) done int num_initialized; int num_done; bool start; SharedState() : cv(&mu) { } }; // Per-thread state for concurrent executions of the same benchmark. struct ThreadState { int tid; // 0..n-1 when running in n threads Random rand; // Has different seeds for different threads Stats stats; SharedState *shared; ThreadState(int index) : tid(index), rand(1000 + index) { } }; class Duration { typedef std::chrono::high_resolution_clock::time_point time_point; public: Duration(uint64_t max_seconds, int64_t max_ops, int64_t ops_per_stage = 0) { max_seconds_ = max_seconds; max_ops_ = max_ops; ops_per_stage_ = (ops_per_stage > 0) ? ops_per_stage : max_ops; ops_ = 0; start_at_ = std::chrono::high_resolution_clock::now(); } int64_t GetStage() { return std::min(ops_, max_ops_ - 1) / ops_per_stage_; } bool Done(int64_t increment) { if (increment <= 0) increment = 1; // avoid Done(0) and infinite loops ops_ += increment; if (max_seconds_) { // Recheck every appx 1000 ops (exact iff increment is factor of 1000) auto granularity = FLAGS_ops_between_duration_checks; if ((ops_ / granularity) != ((ops_ - increment) / granularity)) { time_point now = std::chrono::high_resolution_clock::now(); return std::chrono::duration_cast<std::chrono::milliseconds>(now - start_at_) .count() >= max_seconds_; } else { return false; } } else { return ops_ > max_ops_; } } private: uint64_t max_seconds_; int64_t max_ops_; int64_t ops_per_stage_; int64_t ops_; time_point start_at_; }; class Benchmark { private: pmem::kv::db *kv_; int num_; int tx_size_; int value_size_; int key_size_; int reads_; int64_t readwrites_; BenchmarkLogger &logger; Slice name; int n; const char *engine; void (Benchmark::*method)(ThreadState *) = NULL; void PrintHeader() { PrintEnvironment(); logger.insert("Path", FLAGS_db); logger.insert("Engine", engine); logger.insert("Keys [bytes each]", FLAGS_key_size); logger.insert("Values [bytes each]", FLAGS_value_size); logger.insert("Entries", num_); logger.insert("RawSize [MB (estimated)]", ((static_cast<int64_t>(FLAGS_key_size + FLAGS_value_size) * num_) / 1048576.0)); PrintWarnings(); } void PrintWarnings() { #if defined(__GNUC__) && !defined(__OPTIMIZE__) fprintf(stdout, "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"); #endif #ifndef NDEBUG fprintf(stdout, "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n"); #endif } void PrintEnvironment() { #if defined(__linux) auto now = std::time(NULL); logger.insert("Date:", now); FILE *cpuinfo = fopen("/proc/cpuinfo", "r"); if (cpuinfo != NULL) { char line[1000]; int num_cpus = 0; std::string cpu_type; std::string cache_size; while (fgets(line, sizeof(line), cpuinfo) != NULL) { const char *sep = strchr(line, ':'); if (sep == NULL) { continue; } Slice key = TrimSpace(Slice(line, sep - 1 - line)); Slice val = TrimSpace(Slice(sep + 1)); if (key == "model name") { ++num_cpus; cpu_type = val.ToString(); } else if (key == "cache size") { cache_size = val.ToString(); } } fclose(cpuinfo); logger.insert("CPU", std::to_string(num_cpus)); logger.insert("CPU model", cpu_type); logger.insert("CPUCache", cache_size); } #endif } public: Benchmark(Slice name, pmem::kv::db *&kv, int num_threads, const char *engine, BenchmarkLogger &logger) : kv_(kv), num_(FLAGS_num), tx_size_(FLAGS_tx_size), value_size_(FLAGS_value_size), key_size_(FLAGS_key_size), reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads), readwrites_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads), logger(logger), n(num_threads), name(name), engine(engine) { fprintf(stderr, "running %s \n", name.ToString().c_str()); bool fresh_db = false; if (name == Slice("fillseq")) { fresh_db = true; method = &Benchmark::WriteSeq; } else if (name == Slice("fillrandom")) { fresh_db = true; method = &Benchmark::WriteRandom; } else if (name == Slice("txfillrandom")) { fresh_db = true; method = &Benchmark::TxFillRandom; } else if (name == Slice("overwrite")) { method = &Benchmark::WriteRandom; } else if (name == Slice("readseq")) { method = &Benchmark::ReadSeq; } else if (name == Slice("readrandom")) { method = &Benchmark::ReadRandom; } else if (name == Slice("readmissing")) { method = &Benchmark::ReadMissing; } else if (name == Slice("deleteseq")) { method = &Benchmark::DeleteSeq; } else if (name == Slice("deleterandom")) { method = &Benchmark::DeleteRandom; } else if (name == Slice("readwhilewriting")) { ++num_threads; method = &Benchmark::ReadWhileWriting; } else if (name == Slice("readrandomwriterandom")) { method = &Benchmark::ReadRandomWriteRandom; } else { throw std::runtime_error("unknown benchmark: " + name.ToString()); } logger.next_benchmark(); logger.insert("Benchmark", name.ToString()); PrintHeader(); if (fresh_db) { if (kv_ != nullptr) { kv_->close(); delete kv_; kv_ = nullptr; } Create(name.ToString()); kv = kv_; } } Slice AllocateKey(std::unique_ptr<const char[]> &key_guard) { const char *tmp = new char[key_size_]; key_guard.reset(tmp); return Slice(key_guard.get(), key_size_); } void GenerateKeyFromInt(uint64_t v, int64_t num_keys, Slice *key, bool missing = false) { char *start = const_cast<char *>(key->data()); char *pos = start; int bytes_to_fill = std::min(key_size_, 8); if (missing) { int64_t v1 = -v; memcpy(pos, static_cast<void *>(&v1), bytes_to_fill); } else memcpy(pos, static_cast<void *>(&v), bytes_to_fill); pos += bytes_to_fill; if (key_size_ > pos - start) { memset(pos, '0', key_size_ - (pos - start)); } } void Run() { SharedState shared; shared.total = n; shared.num_initialized = 0; shared.num_done = 0; shared.start = false; ThreadArg *arg = new ThreadArg[n]; for (int i = 0; i < n; i++) { arg[i].bm = this; arg[i].method = method; arg[i].shared = &shared; arg[i].thread = new ThreadState(i); arg[i].thread->shared = &shared; g_env->StartThread(ThreadBody, &arg[i]); } shared.mu.Lock(); while (shared.num_initialized < n) { shared.cv.Wait(); } shared.start = true; shared.cv.SignalAll(); while (shared.num_done < n) { shared.cv.Wait(); } shared.mu.Unlock(); for (int i = 1; i < n; i++) { arg[0].thread->stats.Merge(arg[i].thread->stats); } auto thread_stats = arg[0].thread->stats; logger.insert("micros/op (avarage)", thread_stats.get_micros_per_op()); logger.insert("ops/sec", thread_stats.get_ops_per_sec()); logger.insert("throughput [MB/s]", thread_stats.get_throughput()); logger.insert("extra_data", thread_stats.get_extra_data()); if (FLAGS_histogram) { logger.insert(name.ToString(), thread_stats.get_histogram()); } for (int i = 0; i < n; i++) { delete arg[i].thread; } delete[] arg; } private: struct ThreadArg { Benchmark *bm; SharedState *shared; ThreadState *thread; void (Benchmark::*method)(ThreadState *); }; struct DbInserter { DbInserter(pmem::kv::db *db) : db(db) { } pmem::kv::status put(pmem::kv::string_view key, pmem::kv::string_view value) { return db->put(key, value); } pmem::kv::status commit() { return pmem::kv::status::OK; } private: pmem::kv::db *db; }; struct TxInserter { TxInserter(pmem::kv::db *db) : tx(db->tx_begin().get_value()) { } pmem::kv::status put(pmem::kv::string_view key, pmem::kv::string_view value) { return tx.put(key, value); } pmem::kv::status commit() { return tx.commit(); } private: pmem::kv::tx tx; }; static void ThreadBody(void *v) { ThreadArg *arg = reinterpret_cast<ThreadArg *>(v); SharedState *shared = arg->shared; ThreadState *thread = arg->thread; { MutexLock l(&shared->mu); shared->num_initialized++; if (shared->num_initialized >= shared->total) { shared->cv.SignalAll(); } while (!shared->start) { shared->cv.Wait(); } } thread->stats.Start(); (arg->bm->*(arg->method))(thread); thread->stats.Stop(); { MutexLock l(&shared->mu); shared->num_done++; if (shared->num_done >= shared->total) { shared->cv.SignalAll(); } } } void Create(std::string name) { assert(kv_ == nullptr); auto start = g_env->NowMicros(); auto size = 512ULL * 1024ULL * 1024ULL * FLAGS_db_size_in_gb; pmem::kv::config cfg; auto cfg_s = cfg.put_string("path", FLAGS_db); if (cfg_s != pmem::kv::status::OK) throw std::runtime_error("putting 'path' to config failed"); cfg_s = cfg.put_uint64("force_create", 1); if (cfg_s != pmem::kv::status::OK) throw std::runtime_error("putting 'force_create' to config failed"); cfg_s = cfg.put_uint64("size", size); if (cfg_s != pmem::kv::status::OK) throw std::runtime_error("putting 'size' to config failed"); /* Check if the path is a directory or a file * (we don't pass filename in case of memkind * based engines, only dir). If it is a file, * remove the previous file with the same name. */ struct stat info; if (stat(FLAGS_db, &info) == 0 && !(info.st_mode & S_IFDIR)) { auto start = g_env->NowMicros(); /* Remove pool file. This should be * implemented using libpmempool for backward * compatibility. */ if (pmempool_rm(FLAGS_db, PMEMPOOL_RM_FORCE) != 0) { throw std::runtime_error(std::string("Cannot remove pool: ") + FLAGS_db); } logger.insert("Remove [millis/op]", ((g_env->NowMicros() - start) * 1e-3)); } kv_ = new pmem::kv::db; auto s = kv_->open(engine, std::move(cfg)); if (s != pmem::kv::status::OK) { fprintf(stderr, "Cannot start engine (%s) for path (%s) with %i GB capacity\n%s\n\nUSAGE: %s", engine, FLAGS_db, FLAGS_db_size_in_gb, pmem::kv::errormsg().c_str(), USAGE.c_str()); exit(-42); } logger.insert("Open [millis/op]", ((g_env->NowMicros() - start) * 1e-3)); } template <typename Inserter = DbInserter> void DoWrite(ThreadState *thread, bool seq) { if (num_ != FLAGS_num) { char msg[100]; snprintf(msg, sizeof(msg), "(%d ops)", num_); thread->stats.AddMessage(msg); } std::unique_ptr<const char[]> key_guard; Slice key = AllocateKey(key_guard); auto num = FLAGS_disjoint ? num_ / FLAGS_threads : num_; auto start = FLAGS_disjoint ? thread->tid * num : 0; auto end = FLAGS_disjoint ? (thread->tid + 1) * num : num_; pmem::kv::status s; int64_t bytes = 0; auto batch_size = std::is_same<Inserter, TxInserter>::value ? tx_size_ : 1; for (int n = start; n < end; n += batch_size) { Inserter inserter(kv_); for (int i = n; i < n + batch_size; i++) { const int k = seq ? i : (thread->rand.Next() % num) + start; GenerateKeyFromInt(k, FLAGS_num, &key); std::string value = std::string(); value.append(value_size_, 'X'); s = inserter.put(key.ToString(), value); bytes += value_size_ + key.size(); if (s != pmem::kv::status::OK) { fprintf(stdout, "Out of space at key %i\n", i); exit(1); } } s = inserter.commit(); thread->stats.FinishedSingleOp(); if (s != pmem::kv::status::OK) { fprintf(stdout, "Commit failed at batch %i\n", n); exit(1); } } thread->stats.AddBytes(bytes); } void WriteSeq(ThreadState *thread) { DoWrite<DbInserter>(thread, true); } void WriteRandom(ThreadState *thread) { DoWrite<DbInserter>(thread, false); } void DoRead(ThreadState *thread, bool seq, bool missing) { pmem::kv::status s; int64_t bytes = 0; int found = 0; std::unique_ptr<const char[]> key_guard; Slice key = AllocateKey(key_guard); auto num = FLAGS_disjoint ? reads_ / FLAGS_threads : reads_; auto start = FLAGS_disjoint ? thread->tid * num : 0; auto end = FLAGS_disjoint ? (thread->tid + 1) * num : reads_; for (int i = start; i < end; i++) { const int k = seq ? i : (thread->rand.Next() % num) + start; GenerateKeyFromInt(k, FLAGS_num, &key, missing); std::string value; if (kv_->get(key.ToString(), &value) == pmem::kv::status::OK) found++; thread->stats.FinishedSingleOp(); bytes += value.length() + key.size(); } thread->stats.AddBytes(bytes); char msg[100]; snprintf(msg, sizeof(msg), "(%d of %d found by one thread)", found, reads_); thread->stats.AddMessage(msg); } void ReadSeq(ThreadState *thread) { DoRead(thread, true, false); } void ReadRandom(ThreadState *thread) { DoRead(thread, false, false); } void ReadMissing(ThreadState *thread) { DoRead(thread, false, true); } void DoDelete(ThreadState *thread, bool seq) { std::unique_ptr<const char[]> key_guard; Slice key = AllocateKey(key_guard); for (int i = 0; i < num_; i++) { const int k = seq ? i : (thread->rand.Next() % FLAGS_num); GenerateKeyFromInt(k, FLAGS_num, &key); kv_->remove(key.ToString()); thread->stats.FinishedSingleOp(); } } void DeleteSeq(ThreadState *thread) { DoDelete(thread, true); } void DeleteRandom(ThreadState *thread) { DoDelete(thread, false); } void BGWriter(ThreadState *thread, enum OperationType write_merge) { // Special thread that keeps writing until other threads are done. RandomGenerator gen; int64_t bytes = 0; // Don't merge stats from this thread with the readers. thread->stats.SetExcludeFromMerge(); std::unique_ptr<const char[]> key_guard; Slice key = AllocateKey(key_guard); uint32_t written = 0; bool hint_printed = false; while (true) { { MutexLock l(&thread->shared->mu); if (thread->shared->num_done + 1 >= thread->shared->num_initialized) { // Finish the write immediately break; } } GenerateKeyFromInt(thread->rand.Next() % FLAGS_num, FLAGS_num, &key); pmem::kv::status s; if (write_merge == kWrite) { s = kv_->put(key.ToString(), gen.Generate(value_size_).ToString()); } else { fprintf(stderr, "Merge operation not supported\n"); exit(1); } written++; if (s != pmem::kv::status::OK) { fprintf(stderr, "Put error\n"); exit(1); } bytes += key.size() + value_size_; } thread->stats.AddBytes(bytes); } void ReadWhileWriting(ThreadState *thread) { if (thread->tid > 0) { ReadRandom(thread); } else { BGWriter(thread, kWrite); } } void ReadRandomWriteRandom(ThreadState *thread) { RandomGenerator gen; std::string value; int64_t found = 0; int get_weight = 0; int put_weight = 0; int64_t reads_done = 0; int64_t writes_done = 0; int64_t bytes = 0; Duration duration(FLAGS_duration, readwrites_); std::unique_ptr<const char[]> key_guard; Slice key = AllocateKey(key_guard); // the number of iterations is the larger of read_ or write_ while (!duration.Done(1)) { GenerateKeyFromInt(thread->rand.Next() % FLAGS_num, FLAGS_num, &key); if (get_weight == 0 && put_weight == 0) { // one batch completed, reinitialize for next batch get_weight = FLAGS_readwritepercent; put_weight = 100 - get_weight; } if (get_weight > 0) { value.clear(); pmem::kv::status s = kv_->get(key.ToString(), &value); if (s == pmem::kv::status::OK) { found++; } else if (s != pmem::kv::status::NOT_FOUND) { fprintf(stderr, "get error\n"); } bytes += value.length() + key.size(); get_weight--; reads_done++; thread->stats.FinishedSingleOp(); } else if (put_weight > 0) { // then do all the corresponding number of puts // for all the gets we have done earlier pmem::kv::status s = kv_->put(key.ToString(), gen.Generate(value_size_).ToString()); if (s != pmem::kv::status::OK) { fprintf(stderr, "put error\n"); exit(1); } bytes += key.size() + value_size_; put_weight--; writes_done++; thread->stats.FinishedSingleOp(); } } thread->stats.AddBytes(bytes); char msg[100]; snprintf(msg, sizeof(msg), "(reads:%" PRIu64 " writes:%" PRIu64 " total:%" PRIu64 " found:%" PRIu64 ")", reads_done, writes_done, readwrites_, found); thread->stats.AddMessage(msg); } void TxFillRandom(ThreadState *thread) { DoWrite<TxInserter>(thread, false); } }; /////////////////Page fault handling///////////////// #include <bits/types/sig_atomic_t.h> #include <bits/types/sigset_t.h> #include <signal.h> #include <unistd.h> #include <sys/mman.h> #include <fcntl.h> #include <libpmem.h> #define SIGSTKSZ 8192 #define SA_SIGINFO 4 #define SA_ONSTACK 0x08000000 /* Use signal stack by using `sa_restorer'. */ #define SA_RESTART 0x10000000 /* Restart syscall on signal return. */ #define SA_NODEFER 0x40000000 /* Don't automatically block the signal when*/ stack_t _sigstk; int updated_page_count = 0; int all_updates = 0; void * checkpoint_start; void * page[50]; void * device; void cmd_issue( uint32_t opcode, uint32_t TXID, uint32_t TID, uint32_t OID, uint64_t data_addr, uint32_t data_size, void * ptr){ //command with thread id encoded as first 8 bits of each word uint32_t issue_cmd[7]; issue_cmd[0] = (TID<<24)|(opcode<<16)|(TXID<<8)|TID; issue_cmd[1] = (TID<<24)|(OID<<16)|(data_addr>>48); issue_cmd[2] = (TID<<24)|((data_addr & 0x0000FFFFFFFFFFFF)>>24); issue_cmd[3] = (TID<<24)|(data_addr & 0x0000000000FFFFFF); issue_cmd[4] = (TID<<24)|(data_size<<8); issue_cmd[5] = (TID<<24)|(0X00FFFFFF>>16); issue_cmd[6] = (TID<<24)|((0X00FFFFFF & 0x0000FFFF)<<8); for(int i=0;i<7;i++){ //printf("%d %08x\n",TID, issue_cmd[i]); *((u_int32_t *) ptr) = issue_cmd[i]; } } static inline uint64_t getCycle(){ uint32_t cycles_high, cycles_low, pid; asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx "mov %%edx, %0\n\t" "mov %%eax, %1\n\t" "mov %%ecx, %2\n\t" :"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars :// no input :"%eax", "%edx", "%ecx" // clobbered by rdtscp ); return((uint64_t)cycles_high << 32) | cycles_low; } /// @brief Signal handler to trap SEGVs. static void segvHandle(int signum, siginfo_t * siginfo, void * context) { #define CPTIME #ifdef CPTIME uint64_t endCycles, startCycles,totalCycles; startCycles = getCycle(); #endif void * addr = siginfo->si_addr; // address of access uint64_t pageNo = ((uint64_t)addr)/4096; unsigned long * pageStart = (unsigned long *)(pageNo*4096); // Check if this was a SEGV that we are supposed to trap. if (siginfo->si_code == SEGV_ACCERR) { mprotect(pageStart, 4096, PROT_READ|PROT_WRITE); // printf("test1\n"); if(all_updates > 5 || updated_page_count == 50){ for(int i=0;i<updated_page_count;i++){ //memcpy(checkpoint_start + i*4096, page[i],4096); //pmem_persist( checkpoint_start + i*4096,4096); cmd_issue(2,0,0,0, (uint64_t)(checkpoint_start + i*4096),4096,device); page[updated_page_count] = 0; } updated_page_count = 0; all_updates = 0; } all_updates ++; //printf("te\n"); for(int i=0; i<updated_page_count; i++){ if(page[i] == pageStart){ #ifdef CPTIME endCycles = getCycle(); totalCycles = endCycles - startCycles; double totTime = ((double)totalCycles)/2000000000; printf("cp %f\n", totTime); #endif return;} } page[updated_page_count] = pageStart; //printf("test1 %lx %d %d\n",page[updated_page_count],updated_page_count,all_updates); updated_page_count++; #ifdef CPTIME endCycles = getCycle(); totalCycles = endCycles - startCycles; double totTime = ((double)totalCycles)/2000000000; printf("cp %f\n", totTime); #endif //*((int *)checkpoint_start) = 10; //test++; //printf("test1 %lx %d\n",updated_page_count); } else if (siginfo->si_code == SEGV_MAPERR) { fprintf (stderr, "%d : map error with addr %p!\n", getpid(), addr); abort(); } else { fprintf (stderr, "%d : other access error with addr %p.\n", getpid(), addr); abort(); } } static void installSignalHandler(void) { // Set up an alternate signal stack. printf("page fault handler initialized!!\n"); _sigstk.ss_sp = mmap(NULL, SIGSTKSZ, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); _sigstk.ss_size = SIGSTKSZ; _sigstk.ss_flags = 0; sigaltstack(&_sigstk, (stack_t *) 0); // Now set up a signal handler for SIGSEGV events. struct sigaction siga; sigemptyset(&siga.sa_mask); // Set the following signals to a set sigaddset(&siga.sa_mask, SIGSEGV); sigaddset(&siga.sa_mask, SIGALRM); sigprocmask(SIG_BLOCK, &siga.sa_mask, NULL); // Point to the handler function. siga.sa_flags = SA_SIGINFO | SA_ONSTACK | SA_RESTART | SA_NODEFER; siga.sa_sigaction = segvHandle; if (sigaction(SIGSEGV, &siga, NULL) == -1) { perror("sigaction(SIGSEGV)"); exit(-1); } sigprocmask(SIG_UNBLOCK, &siga.sa_mask, NULL); return; } //static void setpage(void * addr){ // uint64_t pageNo = ((uint64_t)addr)/4096; // unsigned long * pageStart = (unsigned long *)(pageNo*4096); // mprotect(pageStart, 4096, PROT_READ); // return; //} static void resetpage(void * addr){ uint64_t pageNo = ((uint64_t)addr)/4096; unsigned long * pageStart = (unsigned long *)(pageNo*4096); mprotect(pageStart, 4096, PROT_READ|PROT_WRITE); return; } void* open_device(const char* pathname) { //int fd = os_open("/sys/devices/pci0000:00/0000:00:00.2/iommu/ivhd0/devices/0000:0a:00.0/resource0",O_RDWR|O_SYNC); int fd = open(pathname,O_RDWR|O_SYNC); if(fd == -1) { printf("Couldnt opene file!!\n"); exit(0); } void * ptr = mmap(0,4096,PROT_READ|PROT_WRITE, MAP_SHARED,fd,0); if(ptr == (void *)-1) { printf("Could not map memory!!\n"); exit(0); } printf("opened device without error!!\n"); return ptr; } /////////////////////////////////////////////////////////////// void installSignalHandler (void) __attribute__ ((constructor)); int main(int argc, char **argv) { size_t mapped_len1; int is_pmem1; if ((checkpoint_start = pmem_map_file("/mnt/mem/checkpoint", 4096*50, PMEM_FILE_CREATE, 0666, &mapped_len1, &is_pmem1)) == NULL) { fprintf(stderr, "pmem_map_file failed\n"); exit(0); } device = open_device("/sys/devices/pci0000:00/0000:00:00.2/iommu/ivhd0/devices/0000:0a:00.0/resource0"); // Default list of comma-separated operations to run static const char *FLAGS_benchmarks = "fillseq,fillrandom,overwrite,readseq,readrandom,readmissing,deleteseq,deleterandom,readwhilewriting,readrandomwriterandom"; // Default engine name static const char *FLAGS_engine = "cmap"; // Print usage statement if necessary if (argc != 1) { if ((strcmp(argv[1], "?") == 0) || (strcmp(argv[1], "-?") == 0) || (strcmp(argv[1], "h") == 0) || (strcmp(argv[1], "-h") == 0) || (strcmp(argv[1], "-help") == 0) || (strcmp(argv[1], "--help") == 0)) { fprintf(stderr, "%s", USAGE.c_str()); exit(1); } } // Parse command-line arguments for (int i = 1; i < argc; i++) { int n; char junk; if (leveldb::Slice(argv[i]).starts_with("--benchmarks=")) { FLAGS_benchmarks = argv[i] + strlen("--benchmarks="); } else if (strncmp(argv[i], "--engine=", 9) == 0) { FLAGS_engine = argv[i] + 9; } else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 && (n == 0 || n == 1)) { FLAGS_histogram = n; } else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) { FLAGS_num = n; } else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) { FLAGS_reads = n; } else if (sscanf(argv[i], "--threads=%d%c", &n, &junk) == 1) { FLAGS_threads = n; } else if (sscanf(argv[i], "--key_size=%d%c", &n, &junk) == 1) { FLAGS_key_size = n; } else if (sscanf(argv[i], "--value_size=%d%c", &n, &junk) == 1) { FLAGS_value_size = n; } else if (sscanf(argv[i], "--readwritepercent=%d%c", &n, &junk) == 1) { FLAGS_readwritepercent = n; } else if (strncmp(argv[i], "--db=", 5) == 0) { FLAGS_db = argv[i] + 5; } else if (sscanf(argv[i], "--db_size_in_gb=%d%c", &n, &junk) == 1) { FLAGS_db_size_in_gb = n; } else if (sscanf(argv[i], "--tx_size=%d%c", &n, &junk) == 1) { FLAGS_tx_size = n; } else if (sscanf(argv[i], "--disjoint=%d%c", &n, &junk) == 1 && (n == 0 || n == 1)) { FLAGS_disjoint = n; } else { fprintf(stderr, "Invalid flag '%s'\n", argv[i]); exit(1); } } // Run benchmark against default environment g_env = leveldb::Env::Default(); BenchmarkLogger logger = BenchmarkLogger(); int return_value = 0; pmem::kv::db *kv = NULL; const char *benchmarks = FLAGS_benchmarks; while (benchmarks != NULL) { const char *sep = strchr(benchmarks, ','); Slice name; if (sep == NULL) { name = benchmarks; benchmarks = NULL; } else { name = Slice(benchmarks, sep - benchmarks); benchmarks = sep + 1; } try { auto benchmark = Benchmark(name, kv, FLAGS_threads, FLAGS_engine, logger); benchmark.Run(); } catch (std::exception &e) { std::cerr << e.what() << std::endl; return_value = 1; break; } } if (kv != NULL) { kv->close(); delete kv; } logger.print(); if (FLAGS_histogram) { logger.print_histogram(); } return return_value; }
35,996
25.984258
126
cc
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/bench/util/csv.h
// SPDX-License-Identifier: Apache-2.0 /* Copyright 2020-2021, Intel Corporation */ #pragma once #include <iostream> #include <map> #include <ostream> #include <set> #include <string> template <typename IdType> class CSV { private: /* Hold data in two-dimensional map of strings: data_matrix[row][column] */ std::map<IdType, std::map<std::string, std::string>> data_matrix; /* List of all columns, which is filled during inserts. Needed for * printing header and data in the same order. * */ std::set<std::string> columns; std::string id_name; public: CSV(std::string id_column_name) : id_name(id_column_name){}; void insert(IdType row, std::string column, std::string data) { columns.insert(column); data_matrix[row][column] = data; } void insert(IdType row, std::string column, const char *data) { insert(row, column, std::string(data)); } template <typename T> void insert(IdType row, std::string column, T data) { insert(row, column, std::to_string(data)); } void print() { // Print first column name std::cout << id_name; for (auto &column : columns) { std::cout << "," << column; } std::cout << "\r\n" << std::flush; for (auto &row : data_matrix) { std::cout << row.first; for (auto &column : columns) { std::cout << "," << data_matrix[row.first][column]; } std::cout << "\r\n" << std::flush; } } };
1,381
21.290323
73
h
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/bench/util/env.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. #include "leveldb/env.h" namespace leveldb { Env::~Env() { } Status Env::NewAppendableFile(const std::string &fname, WritableFile **result) { return Status::NotSupported("NewAppendableFile", fname); } SequentialFile::~SequentialFile() { } RandomAccessFile::~RandomAccessFile() { } WritableFile::~WritableFile() { } Logger::~Logger() { } FileLock::~FileLock() { } void Log(Logger *info_log, const char *format, ...) { if (info_log != NULL) { va_list ap; va_start(ap, format); info_log->Logv(format, ap); va_end(ap); } } static Status DoWriteStringToFile(Env *env, const Slice &data, const std::string &fname, bool should_sync) { WritableFile *file; Status s = env->NewWritableFile(fname, &file); if (!s.ok()) { return s; } s = file->Append(data); if (s.ok() && should_sync) { s = file->Sync(); } if (s.ok()) { s = file->Close(); } delete file; // Will auto-close if we did not close above if (!s.ok()) { env->DeleteFile(fname); } return s; } Status WriteStringToFile(Env *env, const Slice &data, const std::string &fname) { return DoWriteStringToFile(env, data, fname, false); } Status WriteStringToFileSync(Env *env, const Slice &data, const std::string &fname) { return DoWriteStringToFile(env, data, fname, true); } Status ReadFileToString(Env *env, const std::string &fname, std::string *data) { data->clear(); SequentialFile *file; Status s = env->NewSequentialFile(fname, &file); if (!s.ok()) { return s; } static const int kBufferSize = 8192; char *space = new char[kBufferSize]; while (true) { Slice fragment; s = file->Read(kBufferSize, &fragment, space); if (!s.ok()) { break; } data->append(fragment.data(), fragment.size()); if (fragment.empty()) { break; } } delete[] space; delete file; return s; } EnvWrapper::~EnvWrapper() { } } // namespace leveldb
2,069
17.648649
106
cc
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/bench/util/logging.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 /* Copyright 2020, Intel Corporation */ #include "util/logging.h" #include "leveldb/env.h" #include "leveldb/slice.h" #include <errno.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> namespace leveldb { void AppendNumberTo(std::string *str, uint64_t num) { char buf[30]; snprintf(buf, sizeof(buf), "%llu", (unsigned long long)num); str->append(buf); } void AppendEscapedStringTo(std::string *str, const Slice &value) { for (size_t i = 0; i < value.size(); i++) { char c = value[i]; if (c >= ' ' && c <= '~') { str->push_back(c); } else { char buf[10]; snprintf(buf, sizeof(buf), "\\x%02x", static_cast<unsigned int>(c) & 0xff); str->append(buf); } } } std::string NumberToString(uint64_t num) { std::string r; AppendNumberTo(&r, num); return r; } std::string EscapeString(const Slice &value) { std::string r; AppendEscapedStringTo(&r, value); return r; } bool ConsumeDecimalNumber(Slice *in, uint64_t *val) { uint64_t v = 0; int digits = 0; while (!in->empty()) { char c = (*in)[0]; if (c >= '0' && c <= '9') { ++digits; // |delta| intentionally unit64_t to avoid Android crash (see log). const uint64_t delta = (c - '0'); static const uint64_t kMaxUint64 = ~static_cast<uint64_t>(0); if (v > kMaxUint64 / 10 || (v == kMaxUint64 / 10 && delta > kMaxUint64 % 10)) { // Overflow return false; } v = (v * 10) + delta; in->remove_prefix(1); } else { break; } } *val = v; return (digits > 0); } } // namespace leveldb
1,775
20.925926
82
cc
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/bench/util/logging.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // Must not be included from any .h files to avoid polluting the namespace // with macros. #ifndef STORAGE_LEVELDB_UTIL_LOGGING_H_ #define STORAGE_LEVELDB_UTIL_LOGGING_H_ #include "port/port_posix.h" #include <stdint.h> #include <stdio.h> #include <string> namespace leveldb { class Slice; class WritableFile; // Append a human-readable printout of "num" to *str extern void AppendNumberTo(std::string *str, uint64_t num); // Append a human-readable printout of "value" to *str. // Escapes any non-printable characters found in "value". extern void AppendEscapedStringTo(std::string *str, const Slice &value); // Return a human-readable printout of "num" extern std::string NumberToString(uint64_t num); // Return a human-readable version of "value". // Escapes any non-printable characters found in "value". extern std::string EscapeString(const Slice &value); // Parse a human-readable number from "*in" into *value. On success, // advances "*in" past the consumed number and sets "*val" to the // numeric value. Otherwise, returns false and leaves *in in an // unspecified state. extern bool ConsumeDecimalNumber(Slice *in, uint64_t *val); } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_LOGGING_H_
1,519
30.666667
81
h
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/bench/util/status.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 /* Copyright 2020, Intel Corporation */ #include "leveldb/status.h" #include "port/port_posix.h" #include <stdio.h> namespace leveldb { const char *Status::CopyState(const char *state) { uint32_t size; memcpy(&size, state, sizeof(size)); char *result = new char[size + 5]; memcpy(result, state, size + 5); return result; } Status::Status(Code code, const Slice &msg, const Slice &msg2) { assert(code != kOk); const uint32_t len1 = msg.size(); const uint32_t len2 = msg2.size(); const uint32_t size = len1 + (len2 ? (2 + len2) : 0); char *result = new char[size + 5]; memcpy(result, &size, sizeof(size)); result[4] = static_cast<char>(code); memcpy(result + 5, msg.data(), len1); if (len2) { result[5 + len1] = ':'; result[6 + len1] = ' '; memcpy(result + 7 + len1, msg2.data(), len2); } state_ = result; } std::string Status::ToString() const { if (state_ == NULL) { return "OK"; } else { char tmp[30]; const char *type; switch (code()) { case kOk: type = "OK"; break; case kNotFound: type = "NotFound: "; break; case kCorruption: type = "Corruption: "; break; case kNotSupported: type = "Not implemented: "; break; case kInvalidArgument: type = "Invalid argument: "; break; case kIOError: type = "IO error: "; break; default: snprintf(tmp, sizeof(tmp), "Unknown code(%d): ", static_cast<int>(code())); type = tmp; break; } std::string result(type); uint32_t length; memcpy(&length, state_, sizeof(length)); result.append(state_ + 5, length); return result; } } } // namespace leveldb
1,877
21.902439
81
cc
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/bench/util/testutil.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation #ifndef STORAGE_LEVELDB_UTIL_TESTUTIL_H_ #define STORAGE_LEVELDB_UTIL_TESTUTIL_H_ #include "leveldb/env.h" #include "leveldb/slice.h" #include "util/random.h" namespace leveldb { namespace test { // Store in *dst a random string of length "len" and return a Slice that // references the generated data. Slice RandomString(Random *rnd, int len, std::string *dst); // Return a random key with the specified length that may contain interesting // characters (e.g. \x00, \xff, etc.). std::string RandomKey(Random *rnd, int len); // Store in *dst a string of length "len" that will compress to // "N*compressed_fraction" bytes and return a Slice that references // the generated data. Slice CompressibleString(Random *rnd, double compressed_fraction, size_t len, std::string *dst); // A wrapper that allows injection of errors. class ErrorEnv : public EnvWrapper { public: bool writable_file_error_; int num_writable_file_errors_; ErrorEnv() : EnvWrapper(Env::Default()), writable_file_error_(false), num_writable_file_errors_(0) { } virtual Status NewWritableFile(const std::string &fname, WritableFile **result) { if (writable_file_error_) { ++num_writable_file_errors_; *result = nullptr; return Status::IOError(fname, "fake error"); } return target()->NewWritableFile(fname, result); } virtual Status NewAppendableFile(const std::string &fname, WritableFile **result) { if (writable_file_error_) { ++num_writable_file_errors_; *result = nullptr; return Status::IOError(fname, "fake error"); } return target()->NewAppendableFile(fname, result); } }; } // namespace test } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_TESTUTIL_H_
1,984
28.191176
99
h
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/bench/util/mutexlock.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation #ifndef STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_ #define STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_ #include "port/port_posix.h" #include "port/thread_annotations.h" namespace leveldb { // Helper class that locks a mutex on construction and unlocks the mutex when // the destructor of the MutexLock object is invoked. // // Typical usage: // // void MyClass::MyMethod() { // MutexLock l(&mu_); // mu_ is an instance variable // ... some complex code, possibly with multiple return paths ... // } class SCOPED_LOCKABLE MutexLock { public: explicit MutexLock(port::Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) { this->mu_->Lock(); } ~MutexLock() UNLOCK_FUNCTION() { this->mu_->Unlock(); } private: port::Mutex *const mu_; // No copying allowed MutexLock(const MutexLock &); void operator=(const MutexLock &); }; } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_
1,202
24.0625
81
h
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/bench/util/histogram.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2017-2020, Intel Corporation #ifndef STORAGE_LEVELDB_UTIL_HISTOGRAM_H_ #define STORAGE_LEVELDB_UTIL_HISTOGRAM_H_ #include <string> namespace leveldb { class Histogram { public: Histogram() { } ~Histogram() { } void Clear(); void Add(double value); void Merge(const Histogram &other); std::string ToString() const; double Median() const; double Percentile(double p) const; double Average() const; double StandardDeviation() const; private: double min_; double max_; double num_; double sum_; double sum_squares_; enum { kNumBuckets = 154 }; static const double kBucketLimit[kNumBuckets]; double buckets_[kNumBuckets]; }; } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_HISTOGRAM_H_
993
18.490196
81
h
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/bench/util/testutil.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. #include "util/testutil.h" #include "util/random.h" namespace leveldb { namespace test { Slice RandomString(Random *rnd, int len, std::string *dst) { dst->resize(len); for (int i = 0; i < len; i++) { (*dst)[i] = static_cast<char>(' ' + rnd->Uniform(95)); // ' ' .. '~' } return Slice(*dst); } std::string RandomKey(Random *rnd, int len) { // Make sure to generate a wide variety of characters so we // test the boundary conditions for short-key optimizations. static const char kTestChars[] = {'\0', '\1', 'a', 'b', 'c', 'd', 'e', '\xfd', '\xfe', '\xff'}; std::string result; for (int i = 0; i < len; i++) { result += kTestChars[rnd->Uniform(sizeof(kTestChars))]; } return result; } Slice CompressibleString(Random *rnd, double compressed_fraction, size_t len, std::string *dst) { int raw = static_cast<int>(len * compressed_fraction); if (raw < 1) raw = 1; std::string raw_data; RandomString(rnd, raw, &raw_data); // Duplicate the random data until we have filled "len" bytes dst->clear(); while (dst->size() < len) { dst->append(raw_data); } dst->resize(len); return Slice(*dst); } } // namespace test } // namespace leveldb
1,384
24.648148
96
cc
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/bench/util/histogram.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 /* Copyright 2020, Intel Corporation */ #include "util/histogram.h" #include "port/port_posix.h" #include <math.h> #include <stdio.h> namespace leveldb { // clang-format off const double Histogram::kBucketLimit[kNumBuckets] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 25, 30, 35, 40, 45, 50, 60, 70, 80, 90, 100, 120, 140, 160, 180, 200, 250, 300, 350, 400, 450, 500, 600, 700, 800, 900, 1000, 1200, 1400, 1600, 1800, 2000, 2500, 3000, 3500, 4000, 4500, 5000, 6000, 7000, 8000, 9000, 10000, 12000, 14000, 16000, 18000, 20000, 25000, 30000, 35000, 40000, 45000, 50000, 60000, 70000, 80000, 90000, 100000, 120000, 140000, 160000, 180000, 200000, 250000, 300000, 350000, 400000, 450000, 500000, 600000, 700000, 800000, 900000, 1000000, 1200000, 1400000, 1600000, 1800000, 2000000, 2500000, 3000000, 3500000, 4000000, 4500000, 5000000, 6000000, 7000000, 8000000, 9000000, 10000000, 12000000, 14000000, 16000000, 18000000, 20000000, 25000000, 30000000, 35000000, 40000000, 45000000, 50000000, 60000000, 70000000, 80000000, 90000000, 100000000, 120000000, 140000000, 160000000, 180000000, 200000000, 250000000, 300000000, 350000000, 400000000, 450000000, 500000000, 600000000, 700000000, 800000000, 900000000, 1000000000, 1200000000, 1400000000, 1600000000, 1800000000, 2000000000, 2500000000.0, 3000000000.0, 3500000000.0, 4000000000.0, 4500000000.0, 5000000000.0, 6000000000.0, 7000000000.0, 8000000000.0, 9000000000.0, 1e200, }; // clang-format on void Histogram::Clear() { min_ = kBucketLimit[kNumBuckets - 1]; max_ = 0; num_ = 0; sum_ = 0; sum_squares_ = 0; for (int i = 0; i < kNumBuckets; i++) { buckets_[i] = 0; } } void Histogram::Add(double value) { // Linear search is fast enough for our usage in db_bench int b = 0; while (b < kNumBuckets - 1 && kBucketLimit[b] <= value) { b++; } buckets_[b] += 1.0; if (min_ > value) min_ = value; if (max_ < value) max_ = value; num_++; sum_ += value; sum_squares_ += (value * value); } void Histogram::Merge(const Histogram &other) { if (other.min_ < min_) min_ = other.min_; if (other.max_ > max_) max_ = other.max_; num_ += other.num_; sum_ += other.sum_; sum_squares_ += other.sum_squares_; for (int b = 0; b < kNumBuckets; b++) { buckets_[b] += other.buckets_[b]; } } double Histogram::Median() const { return Percentile(50.0); } double Histogram::Percentile(double p) const { double threshold = num_ * (p / 100.0); double sum = 0; for (int b = 0; b < kNumBuckets; b++) { sum += buckets_[b]; if (sum >= threshold) { // Scale linearly within this bucket double left_point = (b == 0) ? 0 : kBucketLimit[b - 1]; double right_point = kBucketLimit[b]; double left_sum = sum - buckets_[b]; double right_sum = sum; double pos = (threshold - left_sum) / (right_sum - left_sum); double r = left_point + (right_point - left_point) * pos; if (r < min_) r = min_; if (r > max_) r = max_; return r; } } return max_; } double Histogram::Average() const { if (num_ == 0.0) return 0; return sum_ / num_; } double Histogram::StandardDeviation() const { if (num_ == 0.0) return 0; double variance = (sum_squares_ * num_ - sum_ * sum_) / (num_ * num_); return sqrt(variance); } std::string Histogram::ToString() const { std::string r; char buf[200]; snprintf(buf, sizeof(buf), "Count: %.0f Average: %.4f StdDev: %.2f\n", num_, Average(), StandardDeviation()); r.append(buf); snprintf(buf, sizeof(buf), "Min: %.4f Median: %.4f Max: %.4f\n", (num_ == 0.0 ? 0.0 : min_), Median(), max_); r.append(buf); snprintf(buf, sizeof(buf), "Percentiles: P50: %.2f P75: %.2f P99: %.2f P99.9: %.2f P99.99: %.2f\n", Percentile(50), Percentile(75), Percentile(99), Percentile(99.9), Percentile(99.99)); r.append(buf); r.append("------------------------------------------------------\n"); const double mult = 100.0 / num_; double sum = 0; for (int b = 0; b < kNumBuckets; b++) { if (buckets_[b] <= 0.0) continue; sum += buckets_[b]; snprintf(buf, sizeof(buf), "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ", ((b == 0) ? 0.0 : kBucketLimit[b - 1]), // left kBucketLimit[b], // right buckets_[b], // count mult * buckets_[b], // percentage mult * sum); // cumulative percentage r.append(buf); // Add hash marks based on percentage; 20 marks for 100%. int marks = static_cast<int>(20 * (buckets_[b] / num_) + 0.5); r.append(marks, '#'); r.push_back('\n'); } return r; } } // namespace leveldb
4,793
28.411043
100
cc
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/bench/util/env_posix.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 /* Copyright 2020, Intel Corporation */ #include <deque> #include <dirent.h> #include <errno.h> #include <fcntl.h> #include <limits> #include <pthread.h> #include <set> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/mman.h> #include <sys/resource.h> #include <sys/stat.h> #include <sys/time.h> #include <sys/types.h> #include <time.h> #include <unistd.h> #include "leveldb/env.h" #include "leveldb/slice.h" #include "port/port_posix.h" #include "util/env_posix_test_helper.h" #include "util/logging.h" #include "util/mutexlock.h" #include "util/posix_logger.h" namespace leveldb { namespace { static int open_read_only_file_limit = -1; static int mmap_limit = -1; static const size_t kBufSize = 65536; static Status PosixError(const std::string &context, int err_number) { if (err_number == ENOENT) { return Status::NotFound(context, strerror(err_number)); } else { return Status::IOError(context, strerror(err_number)); } } // Helper class to limit resource usage to avoid exhaustion. // Currently used to limit read-only file descriptors and mmap file usage // so that we do not end up running out of file descriptors, virtual memory, // or running into kernel performance problems for very large databases. class Limiter { public: // Limit maximum number of resources to |n|. Limiter(intptr_t n) { SetAllowed(n); } // If another resource is available, acquire it and return true. // Else return false. bool Acquire() { if (GetAllowed() <= 0) { return false; } MutexLock l(&mu_); intptr_t x = GetAllowed(); if (x <= 0) { return false; } else { SetAllowed(x - 1); return true; } } // Release a resource acquired by a previous call to Acquire() that returned // true. void Release() { MutexLock l(&mu_); SetAllowed(GetAllowed() + 1); } private: port::Mutex mu_; port::AtomicPointer allowed_; intptr_t GetAllowed() const { return reinterpret_cast<intptr_t>(allowed_.Acquire_Load()); } // REQUIRES: mu_ must be held void SetAllowed(intptr_t v) { allowed_.Release_Store(reinterpret_cast<void *>(v)); } Limiter(const Limiter &); void operator=(const Limiter &); }; class PosixSequentialFile : public SequentialFile { private: std::string filename_; int fd_; public: PosixSequentialFile(const std::string &fname, int fd) : filename_(fname), fd_(fd) { } virtual ~PosixSequentialFile() { close(fd_); } virtual Status Read(size_t n, Slice *result, char *scratch) { Status s; while (true) { ssize_t r = read(fd_, scratch, n); if (r < 0) { if (errno == EINTR) { continue; // Retry } s = PosixError(filename_, errno); break; } *result = Slice(scratch, r); break; } return s; } virtual Status Skip(uint64_t n) { if (lseek(fd_, n, SEEK_CUR) == static_cast<off_t>(-1)) { return PosixError(filename_, errno); } return Status::OK(); } }; // pread() based random-access class PosixRandomAccessFile : public RandomAccessFile { private: std::string filename_; bool temporary_fd_; // If true, fd_ is -1 and we open on every read. int fd_; Limiter *limiter_; public: PosixRandomAccessFile(const std::string &fname, int fd, Limiter *limiter) : filename_(fname), fd_(fd), limiter_(limiter) { temporary_fd_ = !limiter->Acquire(); if (temporary_fd_) { // Open file on every access. close(fd_); fd_ = -1; } } virtual ~PosixRandomAccessFile() { if (!temporary_fd_) { close(fd_); limiter_->Release(); } } virtual Status Read(uint64_t offset, size_t n, Slice *result, char *scratch) const { int fd = fd_; if (temporary_fd_) { fd = open(filename_.c_str(), O_RDONLY); if (fd < 0) { return PosixError(filename_, errno); } } Status s; ssize_t r = pread(fd, scratch, n, static_cast<off_t>(offset)); *result = Slice(scratch, (r < 0) ? 0 : r); if (r < 0) { // An error: return a non-ok status s = PosixError(filename_, errno); } if (temporary_fd_) { // Close the temporary file descriptor opened earlier. close(fd); } return s; } }; // mmap() based random-access class PosixMmapReadableFile : public RandomAccessFile { private: std::string filename_; void *mmapped_region_; size_t length_; Limiter *limiter_; public: // base[0,length-1] contains the mmapped contents of the file. PosixMmapReadableFile(const std::string &fname, void *base, size_t length, Limiter *limiter) : filename_(fname), mmapped_region_(base), length_(length), limiter_(limiter) { } virtual ~PosixMmapReadableFile() { munmap(mmapped_region_, length_); limiter_->Release(); } virtual Status Read(uint64_t offset, size_t n, Slice *result, char *scratch) const { Status s; if (offset + n > length_) { *result = Slice(); s = PosixError(filename_, EINVAL); } else { *result = Slice(reinterpret_cast<char *>(mmapped_region_) + offset, n); } return s; } }; class PosixWritableFile : public WritableFile { private: // buf_[0, pos_-1] contains data to be written to fd_. std::string filename_; int fd_; char buf_[kBufSize]; size_t pos_; public: PosixWritableFile(const std::string &fname, int fd) : filename_(fname), fd_(fd), pos_(0) { } ~PosixWritableFile() { if (fd_ >= 0) { // Ignoring any potential errors Close(); } } virtual Status Append(const Slice &data) { size_t n = data.size(); const char *p = data.data(); // Fit as much as possible into buffer. size_t copy = std::min(n, kBufSize - pos_); memcpy(buf_ + pos_, p, copy); p += copy; n -= copy; pos_ += copy; if (n == 0) { return Status::OK(); } // Can't fit in buffer, so need to do at least one write. Status s = FlushBuffered(); if (!s.ok()) { return s; } // Small writes go to buffer, large writes are written directly. if (n < kBufSize) { memcpy(buf_, p, n); pos_ = n; return Status::OK(); } return WriteRaw(p, n); } virtual Status Close() { Status result = FlushBuffered(); const int r = close(fd_); if (r < 0 && result.ok()) { result = PosixError(filename_, errno); } fd_ = -1; return result; } virtual Status Flush() { return FlushBuffered(); } Status SyncDirIfManifest() { const char *f = filename_.c_str(); const char *sep = strrchr(f, '/'); Slice basename; std::string dir; if (sep == NULL) { dir = "."; basename = f; } else { dir = std::string(f, sep - f); basename = sep + 1; } Status s; if (basename.starts_with("MANIFEST")) { int fd = open(dir.c_str(), O_RDONLY); if (fd < 0) { s = PosixError(dir, errno); } else { if (fsync(fd) < 0) { s = PosixError(dir, errno); } close(fd); } } return s; } virtual Status Sync() { // Ensure new files referred to by the manifest are in the filesystem. Status s = SyncDirIfManifest(); if (!s.ok()) { return s; } s = FlushBuffered(); if (s.ok()) { if (fdatasync(fd_) != 0) { s = PosixError(filename_, errno); } } return s; } private: Status FlushBuffered() { Status s = WriteRaw(buf_, pos_); pos_ = 0; return s; } Status WriteRaw(const char *p, size_t n) { while (n > 0) { ssize_t r = write(fd_, p, n); if (r < 0) { if (errno == EINTR) { continue; // Retry } return PosixError(filename_, errno); } p += r; n -= r; } return Status::OK(); } }; static int LockOrUnlock(int fd, bool lock) { errno = 0; struct flock f; memset(&f, 0, sizeof(f)); f.l_type = (lock ? F_WRLCK : F_UNLCK); f.l_whence = SEEK_SET; f.l_start = 0; f.l_len = 0; // Lock/unlock entire file return fcntl(fd, F_SETLK, &f); } class PosixFileLock : public FileLock { public: int fd_; std::string name_; }; // Set of locked files. We keep a separate set instead of just // relying on fcntrl(F_SETLK) since fcntl(F_SETLK) does not provide // any protection against multiple uses from the same process. class PosixLockTable { private: port::Mutex mu_; std::set<std::string> locked_files_; public: bool Insert(const std::string &fname) { MutexLock l(&mu_); return locked_files_.insert(fname).second; } void Remove(const std::string &fname) { MutexLock l(&mu_); locked_files_.erase(fname); } }; class PosixEnv : public Env { public: PosixEnv(); virtual ~PosixEnv() { char msg[] = "Destroying Env::Default()\n"; fwrite(msg, 1, sizeof(msg), stderr); abort(); } virtual Status NewSequentialFile(const std::string &fname, SequentialFile **result) { int fd = open(fname.c_str(), O_RDONLY); if (fd < 0) { *result = NULL; return PosixError(fname, errno); } else { *result = new PosixSequentialFile(fname, fd); return Status::OK(); } } virtual Status NewRandomAccessFile(const std::string &fname, RandomAccessFile **result) { *result = NULL; Status s; int fd = open(fname.c_str(), O_RDONLY); if (fd < 0) { s = PosixError(fname, errno); } else if (mmap_limit_.Acquire()) { uint64_t size; s = GetFileSize(fname, &size); if (s.ok()) { void *base = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0); if (base != MAP_FAILED) { *result = new PosixMmapReadableFile(fname, base, size, &mmap_limit_); } else { s = PosixError(fname, errno); } } close(fd); if (!s.ok()) { mmap_limit_.Release(); } } else { *result = new PosixRandomAccessFile(fname, fd, &fd_limit_); } return s; } virtual Status NewWritableFile(const std::string &fname, WritableFile **result) { Status s; int fd = open(fname.c_str(), O_TRUNC | O_WRONLY | O_CREAT, 0644); if (fd < 0) { *result = NULL; s = PosixError(fname, errno); } else { *result = new PosixWritableFile(fname, fd); } return s; } virtual Status NewAppendableFile(const std::string &fname, WritableFile **result) { Status s; int fd = open(fname.c_str(), O_APPEND | O_WRONLY | O_CREAT, 0644); if (fd < 0) { *result = NULL; s = PosixError(fname, errno); } else { *result = new PosixWritableFile(fname, fd); } return s; } virtual bool FileExists(const std::string &fname) { return access(fname.c_str(), F_OK) == 0; } virtual Status GetChildren(const std::string &dir, std::vector<std::string> *result) { result->clear(); DIR *d = opendir(dir.c_str()); if (d == NULL) { return PosixError(dir, errno); } struct dirent *entry; while ((entry = readdir(d)) != NULL) { result->push_back(entry->d_name); } closedir(d); return Status::OK(); } virtual Status DeleteFile(const std::string &fname) { Status result; if (unlink(fname.c_str()) != 0) { result = PosixError(fname, errno); } return result; } virtual Status CreateDir(const std::string &name) { Status result; if (mkdir(name.c_str(), 0755) != 0) { result = PosixError(name, errno); } return result; } virtual Status DeleteDir(const std::string &name) { Status result; if (rmdir(name.c_str()) != 0) { result = PosixError(name, errno); } return result; } virtual Status GetFileSize(const std::string &fname, uint64_t *size) { Status s; struct stat sbuf; if (stat(fname.c_str(), &sbuf) != 0) { *size = 0; s = PosixError(fname, errno); } else { *size = sbuf.st_size; } return s; } virtual Status RenameFile(const std::string &src, const std::string &target) { Status result; if (rename(src.c_str(), target.c_str()) != 0) { result = PosixError(src, errno); } return result; } virtual Status LockFile(const std::string &fname, FileLock **lock) { *lock = NULL; Status result; int fd = open(fname.c_str(), O_RDWR | O_CREAT, 0644); if (fd < 0) { result = PosixError(fname, errno); } else if (!locks_.Insert(fname)) { close(fd); result = Status::IOError("lock " + fname, "already held by process"); } else if (LockOrUnlock(fd, true) == -1) { result = PosixError("lock " + fname, errno); close(fd); locks_.Remove(fname); } else { PosixFileLock *my_lock = new PosixFileLock; my_lock->fd_ = fd; my_lock->name_ = fname; *lock = my_lock; } return result; } virtual Status UnlockFile(FileLock *lock) { PosixFileLock *my_lock = reinterpret_cast<PosixFileLock *>(lock); Status result; if (LockOrUnlock(my_lock->fd_, false) == -1) { result = PosixError("unlock", errno); } locks_.Remove(my_lock->name_); close(my_lock->fd_); delete my_lock; return result; } virtual void Schedule(void (*function)(void *), void *arg); virtual void StartThread(void (*function)(void *arg), void *arg); virtual Status GetTestDirectory(std::string *result) { const char *env = getenv("TEST_TMPDIR"); if (env && env[0] != '\0') { *result = env; } else { char buf[100]; snprintf(buf, sizeof(buf), "/tmp/leveldbtest-%d", int(geteuid())); *result = buf; } // Directory may already exist CreateDir(*result); return Status::OK(); } static uint64_t gettid() { pthread_t tid = pthread_self(); uint64_t thread_id = 0; memcpy(&thread_id, &tid, std::min(sizeof(thread_id), sizeof(tid))); return thread_id; } virtual Status NewLogger(const std::string &fname, Logger **result) { FILE *f = fopen(fname.c_str(), "w"); if (f == NULL) { *result = NULL; return PosixError(fname, errno); } else { *result = new PosixLogger(f, &PosixEnv::gettid); return Status::OK(); } } virtual uint64_t NowMicros() { struct timeval tv; gettimeofday(&tv, NULL); return static_cast<uint64_t>(tv.tv_sec) * 1000000 + tv.tv_usec; } virtual void SleepForMicroseconds(int micros) { usleep(micros); } private: void PthreadCall(const char *label, int result) { if (result != 0) { fprintf(stderr, "pthread %s: %s\n", label, strerror(result)); abort(); } } // BGThread() is the body of the background thread void BGThread(); static void *BGThreadWrapper(void *arg) { reinterpret_cast<PosixEnv *>(arg)->BGThread(); return NULL; } pthread_mutex_t mu_; pthread_cond_t bgsignal_; pthread_t bgthread_; bool started_bgthread_; // Entry per Schedule() call struct BGItem { void *arg; void (*function)(void *); }; typedef std::deque<BGItem> BGQueue; BGQueue queue_; PosixLockTable locks_; Limiter mmap_limit_; Limiter fd_limit_; }; // Return the maximum number of concurrent mmaps. static int MaxMmaps() { if (mmap_limit >= 0) { return mmap_limit; } // Up to 1000 mmaps for 64-bit binaries; none for smaller pointer sizes. mmap_limit = sizeof(void *) >= 8 ? 1000 : 0; return mmap_limit; } // Return the maximum number of read-only files to keep open. static intptr_t MaxOpenFiles() { if (open_read_only_file_limit >= 0) { return open_read_only_file_limit; } struct rlimit rlim; if (getrlimit(RLIMIT_NOFILE, &rlim)) { // getrlimit failed, fallback to hard-coded default. open_read_only_file_limit = 50; } else if (rlim.rlim_cur == RLIM_INFINITY) { open_read_only_file_limit = std::numeric_limits<int>::max(); } else { // Allow use of 20% of available file descriptors for read-only files. open_read_only_file_limit = rlim.rlim_cur / 5; } return open_read_only_file_limit; } PosixEnv::PosixEnv() : started_bgthread_(false), mmap_limit_(MaxMmaps()), fd_limit_(MaxOpenFiles()) { PthreadCall("mutex_init", pthread_mutex_init(&mu_, NULL)); PthreadCall("cvar_init", pthread_cond_init(&bgsignal_, NULL)); } void PosixEnv::Schedule(void (*function)(void *), void *arg) { PthreadCall("lock", pthread_mutex_lock(&mu_)); // Start background thread if necessary if (!started_bgthread_) { started_bgthread_ = true; PthreadCall("create thread", pthread_create(&bgthread_, NULL, &PosixEnv::BGThreadWrapper, this)); } // If the queue is currently empty, the background thread may currently be // waiting. if (queue_.empty()) { PthreadCall("signal", pthread_cond_signal(&bgsignal_)); } // Add to priority queue queue_.push_back(BGItem()); queue_.back().function = function; queue_.back().arg = arg; PthreadCall("unlock", pthread_mutex_unlock(&mu_)); } void PosixEnv::BGThread() { while (true) { // Wait until there is an item that is ready to run PthreadCall("lock", pthread_mutex_lock(&mu_)); while (queue_.empty()) { PthreadCall("wait", pthread_cond_wait(&bgsignal_, &mu_)); } void (*function)(void *) = queue_.front().function; void *arg = queue_.front().arg; queue_.pop_front(); PthreadCall("unlock", pthread_mutex_unlock(&mu_)); (*function)(arg); } } namespace { struct StartThreadState { void (*user_function)(void *); void *arg; }; } static void *StartThreadWrapper(void *arg) { StartThreadState *state = reinterpret_cast<StartThreadState *>(arg); state->user_function(state->arg); delete state; return NULL; } void PosixEnv::StartThread(void (*function)(void *arg), void *arg) { pthread_t t; StartThreadState *state = new StartThreadState; state->user_function = function; state->arg = arg; PthreadCall("start thread", pthread_create(&t, NULL, &StartThreadWrapper, state)); } } // namespace static pthread_once_t once = PTHREAD_ONCE_INIT; static Env *default_env; static void InitDefaultEnv() { default_env = new PosixEnv; } void EnvPosixTestHelper::SetReadOnlyFDLimit(int limit) { assert(default_env == NULL); open_read_only_file_limit = limit; } void EnvPosixTestHelper::SetReadOnlyMMapLimit(int limit) { assert(default_env == NULL); mmap_limit = limit; } Env *Env::Default() { pthread_once(&once, InitDefaultEnv); return default_env; } } // namespace leveldb
17,689
20.812577
99
cc
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/bench/util/random.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation #ifndef STORAGE_LEVELDB_UTIL_RANDOM_H_ #define STORAGE_LEVELDB_UTIL_RANDOM_H_ #include <stdint.h> namespace leveldb { // A very simple random number generator. Not especially good at // generating truly random bits, but good enough for our needs in this // package. class Random { private: uint32_t seed_; public: explicit Random(uint32_t s) : seed_(s & 0x7fffffffu) { // Avoid bad seeds. if (seed_ == 0 || seed_ == 2147483647L) { seed_ = 1; } } uint32_t Next() { static const uint32_t M = 2147483647L; // 2^31-1 static const uint64_t A = 16807; // bits 14, 8, 7, 5, 2, 1, 0 // We are computing // seed_ = (seed_ * A) % M, where M = 2^31-1 // // seed_ must not be zero or M, or else all subsequent computed values // will be zero or M respectively. For all other values, seed_ will end // up cycling through every number in [1,M-1] uint64_t product = seed_ * A; // Compute (product % M) using the fact that ((x << 31) % M) == x. seed_ = static_cast<uint32_t>((product >> 31) + (product & M)); // The first reduction may overflow by 1 bit, so we may need to // repeat. mod == M is not possible; using > allows the faster // sign-bit-based test. if (seed_ > M) { seed_ -= M; } return seed_; } // Returns a uniformly distributed value in the range [0..n-1] // REQUIRES: n > 0 uint32_t Uniform(int n) { return Next() % n; } // Randomly returns true ~"1/n" of the time, and false otherwise. // REQUIRES: n > 0 bool OneIn(int n) { return (Next() % n) == 0; } // Skewed: pick "base" uniformly from range [0,max_log] and then // return "base" random bits. The effect is to pick a number in the // range [0,2^max_log-1] with exponential bias towards smaller numbers. uint32_t Skewed(int max_log) { return Uniform(1 << Uniform(max_log + 1)); } }; } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_RANDOM_H_
2,202
26.886076
81
h
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/bench/util/posix_logger.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // // Logger implementation that can be shared by all environments // where enough posix functionality is available. #ifndef STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_ #define STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_ #include "leveldb/env.h" #include <algorithm> #include <stdio.h> #include <sys/time.h> #include <time.h> namespace leveldb { class PosixLogger : public Logger { private: FILE *file_; uint64_t (*gettid_)(); // Return the thread id for the current thread public: PosixLogger(FILE *f, uint64_t (*gettid)()) : file_(f), gettid_(gettid) { } virtual ~PosixLogger() { fclose(file_); } virtual void Logv(const char *format, va_list ap) { const uint64_t thread_id = (*gettid_)(); // We try twice: the first time with a fixed-size stack allocated buffer, // and the second time with a much larger dynamically allocated buffer. char buffer[500]; for (int iter = 0; iter < 2; iter++) { char *base; int bufsize; if (iter == 0) { bufsize = sizeof(buffer); base = buffer; } else { bufsize = 30000; base = new char[bufsize]; } char *p = base; char *limit = base + bufsize; struct timeval now_tv; gettimeofday(&now_tv, NULL); const time_t seconds = now_tv.tv_sec; struct tm t; localtime_r(&seconds, &t); p += snprintf(p, limit - p, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ", t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec, static_cast<int>(now_tv.tv_usec), static_cast<long long unsigned int>(thread_id)); // Print the message if (p < limit) { va_list backup_ap; va_copy(backup_ap, ap); p += vsnprintf(p, limit - p, format, backup_ap); va_end(backup_ap); } // Truncate to available space if necessary if (p >= limit) { if (iter == 0) { continue; // Try again with larger buffer } else { p = limit - 1; } } // Add newline if necessary if (p == base || p[-1] != '\n') { *p++ = '\n'; } assert(p <= limit); fwrite(base, 1, p - base, file_); fflush(file_); if (base != buffer) { delete[] base; } break; } } }; } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_
2,503
23.54902
81
h
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/bench/util/env_posix_test_helper.h
// Copyright 2017 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation #ifndef STORAGE_LEVELDB_UTIL_ENV_POSIX_TEST_HELPER_H_ #define STORAGE_LEVELDB_UTIL_ENV_POSIX_TEST_HELPER_H_ namespace leveldb { class EnvPosixTest; // A helper for the POSIX Env to facilitate testing. class EnvPosixTestHelper { private: friend class EnvPosixTest; // Set the maximum number of read-only files that will be opened. // Must be called before creating an Env. static void SetReadOnlyFDLimit(int limit); // Set the maximum number of read-only files that will be mapped via mmap. // Must be called before creating an Env. static void SetReadOnlyMMapLimit(int limit); }; } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_ENV_POSIX_TEST_HELPER_H_
967
28.333333
81
h
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/bench/port/port_posix.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // See port_example.h for documentation for the following types/functions. #ifndef STORAGE_LEVELDB_PORT_PORT_POSIX_H_ #define STORAGE_LEVELDB_PORT_PORT_POSIX_H_ #undef PLATFORM_IS_LITTLE_ENDIAN #if defined(__APPLE__) #include <machine/endian.h> #if defined(__DARWIN_LITTLE_ENDIAN) && defined(__DARWIN_BYTE_ORDER) #define PLATFORM_IS_LITTLE_ENDIAN (__DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN) #endif #elif defined(OS_SOLARIS) #include <sys/isa_defs.h> #ifdef _LITTLE_ENDIAN #define PLATFORM_IS_LITTLE_ENDIAN true #else #define PLATFORM_IS_LITTLE_ENDIAN false #endif #elif defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || defined(OS_DRAGONFLYBSD) #include <sys/endian.h> #include <sys/types.h> #define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN) #elif defined(OS_HPUX) #define PLATFORM_IS_LITTLE_ENDIAN false #elif defined(OS_ANDROID) // Due to a bug in the NDK x86 <sys/endian.h> definition, // _BYTE_ORDER must be used instead of __BYTE_ORDER on Android. // See http://code.google.com/p/android/issues/detail?id=39824 #include <endian.h> #define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN) #else #include <endian.h> #endif #include <pthread.h> #if defined(HAVE_CRC32C) #include <crc32c/crc32c.h> #endif // defined(HAVE_CRC32C) #ifdef HAVE_SNAPPY #include <snappy.h> #endif // defined(HAVE_SNAPPY) #include "port/atomic_pointer.h" #include <stdint.h> #include <string> #ifndef PLATFORM_IS_LITTLE_ENDIAN #define PLATFORM_IS_LITTLE_ENDIAN (__BYTE_ORDER == __LITTLE_ENDIAN) #endif #if defined(__APPLE__) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD) // Use fsync() on platforms without fdatasync() #define fdatasync fsync #endif #if defined(OS_ANDROID) && __ANDROID_API__ < 9 // fdatasync() was only introduced in API level 9 on Android. Use fsync() // when targetting older platforms. #define fdatasync fsync #endif namespace leveldb { namespace port { static const bool kLittleEndian = PLATFORM_IS_LITTLE_ENDIAN; #undef PLATFORM_IS_LITTLE_ENDIAN class CondVar; class Mutex { public: Mutex(); ~Mutex(); void Lock(); void Unlock(); void AssertHeld() { } private: friend class CondVar; pthread_mutex_t mu_; // No copying Mutex(const Mutex &); void operator=(const Mutex &); }; class CondVar { public: explicit CondVar(Mutex *mu); ~CondVar(); void Wait(); void Signal(); void SignalAll(); private: pthread_cond_t cv_; Mutex *mu_; }; typedef pthread_once_t OnceType; #define LEVELDB_ONCE_INIT PTHREAD_ONCE_INIT extern void InitOnce(OnceType *once, void (*initializer)()); inline bool Snappy_Compress(const char *input, size_t length, ::std::string *output) { #ifdef HAVE_SNAPPY output->resize(snappy::MaxCompressedLength(length)); size_t outlen; snappy::RawCompress(input, length, &(*output)[0], &outlen); output->resize(outlen); return true; #endif // defined(HAVE_SNAPPY) return false; } inline bool Snappy_GetUncompressedLength(const char *input, size_t length, size_t *result) { #ifdef HAVE_SNAPPY return snappy::GetUncompressedLength(input, length, result); #else return false; #endif // defined(HAVE_SNAPPY) } inline bool Snappy_Uncompress(const char *input, size_t length, char *output) { #ifdef HAVE_SNAPPY return snappy::RawUncompress(input, length, output); #else return false; #endif // defined(HAVE_SNAPPY) } inline bool GetHeapProfile(void (*func)(void *, const char *, int), void *arg) { return false; } inline uint32_t AcceleratedCRC32C(uint32_t crc, const char *buf, size_t size) { #if defined(HAVE_CRC32C) return ::crc32c::Extend(crc, reinterpret_cast<const uint8_t *>(buf), size); #else return 0; #endif // defined(HAVE_CRC32C) } } // namespace port } // namespace leveldb #endif // STORAGE_LEVELDB_PORT_PORT_POSIX_H_
4,061
23.768293
98
h
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/bench/port/port_posix.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 /* Copyright 2020, Intel Corporation */ #include "port/port_posix.h" #include <cstdlib> #include <stdio.h> #include <string.h> namespace leveldb { namespace port { static void PthreadCall(const char *label, int result) { if (result != 0) { fprintf(stderr, "pthread %s: %s\n", label, strerror(result)); abort(); } } Mutex::Mutex() { PthreadCall("init mutex", pthread_mutex_init(&mu_, NULL)); } Mutex::~Mutex() { PthreadCall("destroy mutex", pthread_mutex_destroy(&mu_)); } void Mutex::Lock() { PthreadCall("lock", pthread_mutex_lock(&mu_)); } void Mutex::Unlock() { PthreadCall("unlock", pthread_mutex_unlock(&mu_)); } CondVar::CondVar(Mutex *mu) : mu_(mu) { PthreadCall("init cv", pthread_cond_init(&cv_, NULL)); } CondVar::~CondVar() { PthreadCall("destroy cv", pthread_cond_destroy(&cv_)); } void CondVar::Wait() { PthreadCall("wait", pthread_cond_wait(&cv_, &mu_->mu_)); } void CondVar::Signal() { PthreadCall("signal", pthread_cond_signal(&cv_)); } void CondVar::SignalAll() { PthreadCall("broadcast", pthread_cond_broadcast(&cv_)); } void InitOnce(OnceType *once, void (*initializer)()) { PthreadCall("once", pthread_once(once, initializer)); } } // namespace port } // namespace leveldb
1,484
17.797468
81
cc
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/bench/port/thread_annotations.h
// Copyright (c) 2012 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation #ifndef STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_ #define STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_ // Some environments provide custom macros to aid in static thread-safety // analysis. Provide empty definitions of such macros unless they are already // defined. #ifndef EXCLUSIVE_LOCKS_REQUIRED #define EXCLUSIVE_LOCKS_REQUIRED(...) #endif #ifndef SHARED_LOCKS_REQUIRED #define SHARED_LOCKS_REQUIRED(...) #endif #ifndef LOCKS_EXCLUDED #define LOCKS_EXCLUDED(...) #endif #ifndef LOCK_RETURNED #define LOCK_RETURNED(x) #endif #ifndef LOCKABLE #define LOCKABLE #endif #ifndef SCOPED_LOCKABLE #define SCOPED_LOCKABLE #endif #ifndef EXCLUSIVE_LOCK_FUNCTION #define EXCLUSIVE_LOCK_FUNCTION(...) #endif #ifndef SHARED_LOCK_FUNCTION #define SHARED_LOCK_FUNCTION(...) #endif #ifndef EXCLUSIVE_TRYLOCK_FUNCTION #define EXCLUSIVE_TRYLOCK_FUNCTION(...) #endif #ifndef SHARED_TRYLOCK_FUNCTION #define SHARED_TRYLOCK_FUNCTION(...) #endif #ifndef UNLOCK_FUNCTION #define UNLOCK_FUNCTION(...) #endif #ifndef NO_THREAD_SAFETY_ANALYSIS #define NO_THREAD_SAFETY_ANALYSIS #endif #endif // STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
1,429
21.34375
81
h
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/bench/port/atomic_pointer.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // AtomicPointer provides storage for a lock-free pointer. // Platform-dependent implementation of AtomicPointer: // - If the platform provides a cheap barrier, we use it with raw pointers // - If <atomic> is present (on newer versions of gcc, it is), we use // a <atomic>-based AtomicPointer. However we prefer the memory // barrier based version, because at least on a gcc 4.4 32-bit build // on linux, we have encountered a buggy <atomic> implementation. // Also, some <atomic> implementations are much slower than a memory-barrier // based implementation (~16ns for <atomic> based acquire-load vs. ~1ns for // a barrier based acquire-load). // This code is based on atomicops-internals-* in Google's perftools: // http://code.google.com/p/google-perftools/source/browse/#svn%2Ftrunk%2Fsrc%2Fbase #ifndef PORT_ATOMIC_POINTER_H_ #define PORT_ATOMIC_POINTER_H_ #include <stdint.h> #ifdef LEVELDB_ATOMIC_PRESENT #include <atomic> #endif #ifdef OS_WIN #include <windows.h> #endif #ifdef __APPLE__ #include <libkern/OSAtomic.h> #endif #if defined(_M_X64) || defined(__x86_64__) #define ARCH_CPU_X86_FAMILY 1 #elif defined(_M_IX86) || defined(__i386__) || defined(__i386) #define ARCH_CPU_X86_FAMILY 1 #elif defined(__ARMEL__) #define ARCH_CPU_ARM_FAMILY 1 #elif defined(__aarch64__) #define ARCH_CPU_ARM64_FAMILY 1 #elif defined(__ppc__) || defined(__powerpc__) || defined(__powerpc64__) #define ARCH_CPU_PPC_FAMILY 1 #elif defined(__mips__) #define ARCH_CPU_MIPS_FAMILY 1 #endif namespace leveldb { namespace port { // Define MemoryBarrier() if available // Windows on x86 #if defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY) // windows.h already provides a MemoryBarrier(void) macro // http://msdn.microsoft.com/en-us/library/ms684208(v=vs.85).aspx #define LEVELDB_HAVE_MEMORY_BARRIER // Mac OS #elif defined(__APPLE__) inline void MemoryBarrier() { OSMemoryBarrier(); } #define LEVELDB_HAVE_MEMORY_BARRIER // Gcc on x86 #elif defined(ARCH_CPU_X86_FAMILY) && defined(__GNUC__) inline void MemoryBarrier() { // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering. __asm__ __volatile__("" : : : "memory"); } #define LEVELDB_HAVE_MEMORY_BARRIER // Sun Studio #elif defined(ARCH_CPU_X86_FAMILY) && defined(__SUNPRO_CC) inline void MemoryBarrier() { // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering. asm volatile("" : : : "memory"); } #define LEVELDB_HAVE_MEMORY_BARRIER // ARM Linux #elif defined(ARCH_CPU_ARM_FAMILY) && defined(__linux__) typedef void (*LinuxKernelMemoryBarrierFunc)(void); // The Linux ARM kernel provides a highly optimized device-specific memory // barrier function at a fixed memory address that is mapped in every // user-level process. // // This beats using CPU-specific instructions which are, on single-core // devices, un-necessary and very costly (e.g. ARMv7-A "dmb" takes more // than 180ns on a Cortex-A8 like the one on a Nexus One). Benchmarking // shows that the extra function call cost is completely negligible on // multi-core devices. // inline void MemoryBarrier() { (*(LinuxKernelMemoryBarrierFunc)0xffff0fa0)(); } #define LEVELDB_HAVE_MEMORY_BARRIER // ARM64 #elif defined(ARCH_CPU_ARM64_FAMILY) inline void MemoryBarrier() { asm volatile("dmb sy" : : : "memory"); } #define LEVELDB_HAVE_MEMORY_BARRIER // PPC #elif defined(ARCH_CPU_PPC_FAMILY) && defined(__GNUC__) inline void MemoryBarrier() { // TODO for some powerpc expert: is there a cheaper suitable variant? // Perhaps by having separate barriers for acquire and release ops. asm volatile("sync" : : : "memory"); } #define LEVELDB_HAVE_MEMORY_BARRIER // MIPS #elif defined(ARCH_CPU_MIPS_FAMILY) && defined(__GNUC__) inline void MemoryBarrier() { __asm__ __volatile__("sync" : : : "memory"); } #define LEVELDB_HAVE_MEMORY_BARRIER #endif // AtomicPointer built using platform-specific MemoryBarrier() #if defined(LEVELDB_HAVE_MEMORY_BARRIER) class AtomicPointer { private: void *rep_; public: AtomicPointer() { } explicit AtomicPointer(void *p) : rep_(p) { } inline void *NoBarrier_Load() const { return rep_; } inline void NoBarrier_Store(void *v) { rep_ = v; } inline void *Acquire_Load() const { void *result = rep_; MemoryBarrier(); return result; } inline void Release_Store(void *v) { MemoryBarrier(); rep_ = v; } }; // AtomicPointer based on <cstdatomic> #elif defined(LEVELDB_ATOMIC_PRESENT) class AtomicPointer { private: std::atomic<void *> rep_; public: AtomicPointer() { } explicit AtomicPointer(void *v) : rep_(v) { } inline void *Acquire_Load() const { return rep_.load(std::memory_order_acquire); } inline void Release_Store(void *v) { rep_.store(v, std::memory_order_release); } inline void *NoBarrier_Load() const { return rep_.load(std::memory_order_relaxed); } inline void NoBarrier_Store(void *v) { rep_.store(v, std::memory_order_relaxed); } }; // Atomic pointer based on sparc memory barriers #elif defined(__sparcv9) && defined(__GNUC__) class AtomicPointer { private: void *rep_; public: AtomicPointer() { } explicit AtomicPointer(void *v) : rep_(v) { } inline void *Acquire_Load() const { void *val; __asm__ __volatile__("ldx [%[rep_]], %[val] \n\t" "membar #LoadLoad|#LoadStore \n\t" : [val] "=r"(val) : [rep_] "r"(&rep_) : "memory"); return val; } inline void Release_Store(void *v) { __asm__ __volatile__("membar #LoadStore|#StoreStore \n\t" "stx %[v], [%[rep_]] \n\t" : : [rep_] "r"(&rep_), [v] "r"(v) : "memory"); } inline void *NoBarrier_Load() const { return rep_; } inline void NoBarrier_Store(void *v) { rep_ = v; } }; // Atomic pointer based on ia64 acq/rel #elif defined(__ia64) && defined(__GNUC__) class AtomicPointer { private: void *rep_; public: AtomicPointer() { } explicit AtomicPointer(void *v) : rep_(v) { } inline void *Acquire_Load() const { void *val; __asm__ __volatile__("ld8.acq %[val] = [%[rep_]] \n\t" : [val] "=r"(val) : [rep_] "r"(&rep_) : "memory"); return val; } inline void Release_Store(void *v) { __asm__ __volatile__("st8.rel [%[rep_]] = %[v] \n\t" : : [rep_] "r"(&rep_), [v] "r"(v) : "memory"); } inline void *NoBarrier_Load() const { return rep_; } inline void NoBarrier_Store(void *v) { rep_ = v; } }; // We have neither MemoryBarrier(), nor <atomic> #else #error Please implement AtomicPointer for this platform. #endif #undef LEVELDB_HAVE_MEMORY_BARRIER #undef ARCH_CPU_X86_FAMILY #undef ARCH_CPU_ARM_FAMILY #undef ARCH_CPU_ARM64_FAMILY #undef ARCH_CPU_PPC_FAMILY } // namespace port } // namespace leveldb #endif // PORT_ATOMIC_POINTER_H_
7,207
23.26936
84
h
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/bench/include/leveldb/status.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // A Status encapsulates the result of an operation. It may indicate success, // or it may indicate an error with an associated error message. // // Multiple threads can invoke const methods on a Status without // external synchronization, but if any of the threads may call a // non-const method, all threads accessing the same Status must use // external synchronization. #ifndef STORAGE_LEVELDB_INCLUDE_STATUS_H_ #define STORAGE_LEVELDB_INCLUDE_STATUS_H_ #include "leveldb/slice.h" #include <string> namespace leveldb { class Status { public: // Create a success status. Status() : state_(NULL) { } ~Status() { delete[] state_; } // Copy the specified status. Status(const Status &s); void operator=(const Status &s); // Return a success status. static Status OK() { return Status(); } // Return error status of an appropriate type. static Status NotFound(const Slice &msg, const Slice &msg2 = Slice()) { return Status(kNotFound, msg, msg2); } static Status Corruption(const Slice &msg, const Slice &msg2 = Slice()) { return Status(kCorruption, msg, msg2); } static Status NotSupported(const Slice &msg, const Slice &msg2 = Slice()) { return Status(kNotSupported, msg, msg2); } static Status InvalidArgument(const Slice &msg, const Slice &msg2 = Slice()) { return Status(kInvalidArgument, msg, msg2); } static Status IOError(const Slice &msg, const Slice &msg2 = Slice()) { return Status(kIOError, msg, msg2); } // Returns true iff the status indicates success. bool ok() const { return (state_ == NULL); } // Returns true iff the status indicates a NotFound error. bool IsNotFound() const { return code() == kNotFound; } // Returns true iff the status indicates a Corruption error. bool IsCorruption() const { return code() == kCorruption; } // Returns true iff the status indicates an IOError. bool IsIOError() const { return code() == kIOError; } // Returns true iff the status indicates a NotSupportedError. bool IsNotSupportedError() const { return code() == kNotSupported; } // Returns true iff the status indicates an InvalidArgument. bool IsInvalidArgument() const { return code() == kInvalidArgument; } // Return a string representation of this status suitable for printing. // Returns the string "OK" for success. std::string ToString() const; private: // OK status has a NULL state_. Otherwise, state_ is a new[] array // of the following form: // state_[0..3] == length of message // state_[4] == code // state_[5..] == message const char *state_; enum Code { kOk = 0, kNotFound = 1, kCorruption = 2, kNotSupported = 3, kInvalidArgument = 4, kIOError = 5 }; Code code() const { return (state_ == NULL) ? kOk : static_cast<Code>(state_[4]); } Status(Code code, const Slice &msg, const Slice &msg2); static const char *CopyState(const char *s); }; inline Status::Status(const Status &s) { state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_); } inline void Status::operator=(const Status &s) { // The following condition catches both aliasing (when this == &s), // and the common case where both s and *this are ok. if (state_ != s.state_) { delete[] state_; state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_); } } } // namespace leveldb #endif // STORAGE_LEVELDB_INCLUDE_STATUS_H_
3,658
23.231788
81
h
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/bench/include/leveldb/slice.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // Slice is a simple structure containing a pointer into some external // storage and a size. The user of a Slice must ensure that the slice // is not used after the corresponding external storage has been // deallocated. // // Multiple threads can invoke const methods on a Slice without // external synchronization, but if any of the threads may call a // non-const method, all threads accessing the same Slice must use // external synchronization. #ifndef STORAGE_LEVELDB_INCLUDE_SLICE_H_ #define STORAGE_LEVELDB_INCLUDE_SLICE_H_ #include <assert.h> #include <stddef.h> #include <string.h> #include <string> namespace leveldb { class Slice { public: // Create an empty slice. Slice() : data_(""), size_(0) { } // Create a slice that refers to d[0,n-1]. Slice(const char *d, size_t n) : data_(d), size_(n) { } // Create a slice that refers to the contents of "s" Slice(const std::string &s) : data_(s.data()), size_(s.size()) { } // Create a slice that refers to s[0,strlen(s)-1] Slice(const char *s) : data_(s), size_(strlen(s)) { } // Return a pointer to the beginning of the referenced data const char *data() const { return data_; } // Return the length (in bytes) of the referenced data size_t size() const { return size_; } // Return true iff the length of the referenced data is zero bool empty() const { return size_ == 0; } // Return the ith byte in the referenced data. // REQUIRES: n < size() char operator[](size_t n) const { assert(n < size()); return data_[n]; } // Change this slice to refer to an empty array void clear() { data_ = ""; size_ = 0; } // Drop the first "n" bytes from this slice. void remove_prefix(size_t n) { assert(n <= size()); data_ += n; size_ -= n; } // Return a string that contains the copy of the referenced data. std::string ToString() const { return std::string(data_, size_); } // Three-way comparison. Returns value: // < 0 iff "*this" < "b", // == 0 iff "*this" == "b", // > 0 iff "*this" > "b" int compare(const Slice &b) const; // Return true iff "x" is a prefix of "*this" bool starts_with(const Slice &x) const { return ((size_ >= x.size_) && (memcmp(data_, x.data_, x.size_) == 0)); } private: const char *data_; size_t size_; // Intentionally copyable }; inline bool operator==(const Slice &x, const Slice &y) { return ((x.size() == y.size()) && (memcmp(x.data(), y.data(), x.size()) == 0)); } inline bool operator!=(const Slice &x, const Slice &y) { return !(x == y); } inline int Slice::compare(const Slice &b) const { const size_t min_len = (size_ < b.size_) ? size_ : b.size_; int r = memcmp(data_, b.data_, min_len); if (r == 0) { if (size_ < b.size_) r = -1; else if (size_ > b.size_) r = +1; } return r; } } // namespace leveldb #endif // STORAGE_LEVELDB_INCLUDE_SLICE_H_
3,163
21.125874
81
h
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/bench/include/leveldb/env.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // An Env is an interface used by the leveldb implementation to access // operating system functionality like the filesystem etc. Callers // may wish to provide a custom Env object when opening a database to // get fine gain control; e.g., to rate limit file system operations. // // All Env implementations are safe for concurrent access from // multiple threads without any external synchronization. #ifndef STORAGE_LEVELDB_INCLUDE_ENV_H_ #define STORAGE_LEVELDB_INCLUDE_ENV_H_ #include "leveldb/status.h" #include <stdarg.h> #include <stdint.h> #include <string> #include <vector> namespace leveldb { class FileLock; class Logger; class RandomAccessFile; class SequentialFile; class Slice; class WritableFile; class Env { public: Env() { } virtual ~Env(); // Return a default environment suitable for the current operating // system. Sophisticated users may wish to provide their own Env // implementation instead of relying on this default environment. // // The result of Default() belongs to leveldb and must never be deleted. static Env *Default(); // Create a brand new sequentially-readable file with the specified name. // On success, stores a pointer to the new file in *result and returns OK. // On failure stores NULL in *result and returns non-OK. If the file does // not exist, returns a non-OK status. Implementations should return a // NotFound status when the file does not exist. // // The returned file will only be accessed by one thread at a time. virtual Status NewSequentialFile(const std::string &fname, SequentialFile **result) = 0; // Create a brand new random access read-only file with the // specified name. On success, stores a pointer to the new file in // *result and returns OK. On failure stores NULL in *result and // returns non-OK. If the file does not exist, returns a non-OK // status. Implementations should return a NotFound status when the file does // not exist. // // The returned file may be concurrently accessed by multiple threads. virtual Status NewRandomAccessFile(const std::string &fname, RandomAccessFile **result) = 0; // Create an object that writes to a new file with the specified // name. Deletes any existing file with the same name and creates a // new file. On success, stores a pointer to the new file in // *result and returns OK. On failure stores NULL in *result and // returns non-OK. // // The returned file will only be accessed by one thread at a time. virtual Status NewWritableFile(const std::string &fname, WritableFile **result) = 0; // Create an object that either appends to an existing file, or // writes to a new file (if the file does not exist to begin with). // On success, stores a pointer to the new file in *result and // returns OK. On failure stores NULL in *result and returns // non-OK. // // The returned file will only be accessed by one thread at a time. // // May return an IsNotSupportedError error if this Env does // not allow appending to an existing file. Users of Env (including // the leveldb implementation) must be prepared to deal with // an Env that does not support appending. virtual Status NewAppendableFile(const std::string &fname, WritableFile **result); // Returns true iff the named file exists. virtual bool FileExists(const std::string &fname) = 0; // Store in *result the names of the children of the specified directory. // The names are relative to "dir". // Original contents of *results are dropped. virtual Status GetChildren(const std::string &dir, std::vector<std::string> *result) = 0; // Delete the named file. virtual Status DeleteFile(const std::string &fname) = 0; // Create the specified directory. virtual Status CreateDir(const std::string &dirname) = 0; // Delete the specified directory. virtual Status DeleteDir(const std::string &dirname) = 0; // Store the size of fname in *file_size. virtual Status GetFileSize(const std::string &fname, uint64_t *file_size) = 0; // Rename file src to target. virtual Status RenameFile(const std::string &src, const std::string &target) = 0; // Lock the specified file. Used to prevent concurrent access to // the same db by multiple processes. On failure, stores NULL in // *lock and returns non-OK. // // On success, stores a pointer to the object that represents the // acquired lock in *lock and returns OK. The caller should call // UnlockFile(*lock) to release the lock. If the process exits, // the lock will be automatically released. // // If somebody else already holds the lock, finishes immediately // with a failure. I.e., this call does not wait for existing locks // to go away. // // May create the named file if it does not already exist. virtual Status LockFile(const std::string &fname, FileLock **lock) = 0; // Release the lock acquired by a previous successful call to LockFile. // REQUIRES: lock was returned by a successful LockFile() call // REQUIRES: lock has not already been unlocked. virtual Status UnlockFile(FileLock *lock) = 0; // Arrange to run "(*function)(arg)" once in a background thread. // // "function" may run in an unspecified thread. Multiple functions // added to the same Env may run concurrently in different threads. // I.e., the caller may not assume that background work items are // serialized. virtual void Schedule(void (*function)(void *arg), void *arg) = 0; // Start a new thread, invoking "function(arg)" within the new thread. // When "function(arg)" returns, the thread will be destroyed. virtual void StartThread(void (*function)(void *arg), void *arg) = 0; // *path is set to a temporary directory that can be used for testing. It may // or many not have just been created. The directory may or may not differ // between runs of the same process, but subsequent calls will return the // same directory. virtual Status GetTestDirectory(std::string *path) = 0; // Create and return a log file for storing informational messages. virtual Status NewLogger(const std::string &fname, Logger **result) = 0; // Returns the number of micro-seconds since some fixed point in time. Only // useful for computing deltas of time. virtual uint64_t NowMicros() = 0; // Sleep/delay the thread for the prescribed number of micro-seconds. virtual void SleepForMicroseconds(int micros) = 0; private: // No copying allowed Env(const Env &); void operator=(const Env &); }; // A file abstraction for reading sequentially through a file class SequentialFile { public: SequentialFile() { } virtual ~SequentialFile(); // Read up to "n" bytes from the file. "scratch[0..n-1]" may be // written by this routine. Sets "*result" to the data that was // read (including if fewer than "n" bytes were successfully read). // May set "*result" to point at data in "scratch[0..n-1]", so // "scratch[0..n-1]" must be live when "*result" is used. // If an error was encountered, returns a non-OK status. // // REQUIRES: External synchronization virtual Status Read(size_t n, Slice *result, char *scratch) = 0; // Skip "n" bytes from the file. This is guaranteed to be no // slower that reading the same data, but may be faster. // // If end of file is reached, skipping will stop at the end of the // file, and Skip will return OK. // // REQUIRES: External synchronization virtual Status Skip(uint64_t n) = 0; private: // No copying allowed SequentialFile(const SequentialFile &); void operator=(const SequentialFile &); }; // A file abstraction for randomly reading the contents of a file. class RandomAccessFile { public: RandomAccessFile() { } virtual ~RandomAccessFile(); // Read up to "n" bytes from the file starting at "offset". // "scratch[0..n-1]" may be written by this routine. Sets "*result" // to the data that was read (including if fewer than "n" bytes were // successfully read). May set "*result" to point at data in // "scratch[0..n-1]", so "scratch[0..n-1]" must be live when // "*result" is used. If an error was encountered, returns a non-OK // status. // // Safe for concurrent use by multiple threads. virtual Status Read(uint64_t offset, size_t n, Slice *result, char *scratch) const = 0; private: // No copying allowed RandomAccessFile(const RandomAccessFile &); void operator=(const RandomAccessFile &); }; // A file abstraction for sequential writing. The implementation // must provide buffering since callers may append small fragments // at a time to the file. class WritableFile { public: WritableFile() { } virtual ~WritableFile(); virtual Status Append(const Slice &data) = 0; virtual Status Close() = 0; virtual Status Flush() = 0; virtual Status Sync() = 0; private: // No copying allowed WritableFile(const WritableFile &); void operator=(const WritableFile &); }; // An interface for writing log messages. class Logger { public: Logger() { } virtual ~Logger(); // Write an entry to the log file with the specified format. virtual void Logv(const char *format, va_list ap) = 0; private: // No copying allowed Logger(const Logger &); void operator=(const Logger &); }; // Identifies a locked file. class FileLock { public: FileLock() { } virtual ~FileLock(); private: // No copying allowed FileLock(const FileLock &); void operator=(const FileLock &); }; // Log the specified data to *info_log if info_log is non-NULL. extern void Log(Logger *info_log, const char *format, ...) #if defined(__GNUC__) || defined(__clang__) __attribute__((__format__(__printf__, 2, 3))) #endif ; // A utility routine: write "data" to the named file. Status WriteStringToFile(Env *env, const Slice &data, const std::string &fname); // A utility routine: read contents of named file into *data Status ReadFileToString(Env *env, const std::string &fname, std::string *data); // An implementation of Env that forwards all calls to another Env. // May be useful to clients who wish to override just part of the // functionality of another Env. class EnvWrapper : public Env { public: // Initialize an EnvWrapper that delegates all calls to *t explicit EnvWrapper(Env *t) : target_(t) { } virtual ~EnvWrapper(); // Return the target to which this Env forwards all calls Env *target() const { return target_; } // The following text is boilerplate that forwards all methods to target() Status NewSequentialFile(const std::string &f, SequentialFile **r) { return target_->NewSequentialFile(f, r); } Status NewRandomAccessFile(const std::string &f, RandomAccessFile **r) { return target_->NewRandomAccessFile(f, r); } Status NewWritableFile(const std::string &f, WritableFile **r) { return target_->NewWritableFile(f, r); } Status NewAppendableFile(const std::string &f, WritableFile **r) { return target_->NewAppendableFile(f, r); } bool FileExists(const std::string &f) { return target_->FileExists(f); } Status GetChildren(const std::string &dir, std::vector<std::string> *r) { return target_->GetChildren(dir, r); } Status DeleteFile(const std::string &f) { return target_->DeleteFile(f); } Status CreateDir(const std::string &d) { return target_->CreateDir(d); } Status DeleteDir(const std::string &d) { return target_->DeleteDir(d); } Status GetFileSize(const std::string &f, uint64_t *s) { return target_->GetFileSize(f, s); } Status RenameFile(const std::string &s, const std::string &t) { return target_->RenameFile(s, t); } Status LockFile(const std::string &f, FileLock **l) { return target_->LockFile(f, l); } Status UnlockFile(FileLock *l) { return target_->UnlockFile(l); } void Schedule(void (*f)(void *), void *a) { return target_->Schedule(f, a); } void StartThread(void (*f)(void *), void *a) { return target_->StartThread(f, a); } virtual Status GetTestDirectory(std::string *path) { return target_->GetTestDirectory(path); } virtual Status NewLogger(const std::string &fname, Logger **result) { return target_->NewLogger(fname, result); } uint64_t NowMicros() { return target_->NowMicros(); } void SleepForMicroseconds(int micros) { target_->SleepForMicroseconds(micros); } private: Env *target_; }; } // namespace leveldb #endif // STORAGE_LEVELDB_INCLUDE_ENV_H_
12,539
30.827411
93
h
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/utils/jenkins/scripts/createNamespace.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2019-2020, Intel Corporation # createNamespace.sh - Remove old namespaces and create new set -e # region used for dax namespaces. DEV_DAX_R=0x0000 # region used for fsdax namespaces. FS_DAX_R=0x0001 CREATE_DAX=false CREATE_PMEM=false MOUNT_POINT="/mnt/pmem0" SIZE=100G function usage() { echo "" echo "Script for creating namespaces, mountpoint, and configuring file permissions." echo "Usage: $(basename $1) [-h|--help] [-d|--dax] [-p|--pmem] [--size]" echo "-h, --help Print help and exit" echo "-d, --dax Create dax device." echo "-p, --pmem Create fsdax device and create mountpoint." echo "--size Set size for namespaces [default: $SIZE]" } function clear_namespaces() { scriptdir=$(readlink -f $(dirname ${BASH_SOURCE[0]})) $scriptdir/removeNamespaces.sh } function create_devdax() { local align=$1 local size=$2 local cmd="sudo ndctl create-namespace --mode devdax -a ${align} -s ${size} -r ${DEV_DAX_R} -f" result=$(${cmd}) if [ $? -ne 0 ]; then exit 1; fi jq -r '.daxregion.devices[].chardev' <<< $result } function create_fsdax() { local size=$1 local cmd="sudo ndctl create-namespace --mode fsdax -s ${size} -r ${FS_DAX_R} -f" result=$(${cmd}) if [ $? -ne 0 ]; then exit 1; fi jq -r '.blockdev' <<< $result } while getopts ":dhp-:" optchar; do case "${optchar}" in -) case "$OPTARG" in help) usage $0 && exit 0 ;; dax) CREATE_DAX=true ;; pmem) CREATE_PMEM=true ;; size=*) SIZE="${OPTARG#*=}" ;; *) echo "Invalid argument '$OPTARG'"; usage $0 && exit 1 ;; esac ;; p) CREATE_PMEM=true ;; d) CREATE_DAX=true ;; h) usage $0 && exit 0 ;; *) echo "Invalid argument '$OPTARG'"; usage $0 && exit 1 ;; esac done # There is no default test cofiguration in this script. Configurations has to be specified. if ! $CREATE_DAX && ! $CREATE_PMEM; then echo "" echo "ERROR: No config type selected. Please select one or more config types." exit 1 fi # Remove existing namespaces. clear_namespaces # Creating namespaces. trap 'echo "ERROR: Failed to create namespaces"; clear_namespaces; exit 1' ERR SIGTERM SIGABRT if $CREATE_DAX; then create_devdax 4k $SIZE fi if $CREATE_PMEM; then pmem_name=$(create_fsdax $SIZE) fi # Creating mountpoint. trap 'echo "ERROR: Failed to create mountpoint"; clear_namespaces; exit 1' ERR SIGTERM SIGABRT if $CREATE_PMEM; then if [ ! -d "$MOUNT_POINT" ]; then sudo mkdir $MOUNT_POINT fi if ! grep -qs "$MOUNT_POINT " /proc/mounts; then sudo mkfs.ext4 -F /dev/$pmem_name sudo mount -o dax /dev/$pmem_name $MOUNT_POINT fi fi # Changing file permissions. sudo chmod 777 $MOUNT_POINT || true sudo chmod 777 /dev/dax* || true sudo chmod a+rw /sys/bus/nd/devices/region*/deep_flush sudo chmod +r /sys/bus/nd/devices/ndbus*/region*/resource sudo chmod +r /sys/bus/nd/devices/ndbus*/region*/dax*/resource # Print created namespaces. ndctl list -X | jq -r '.[] | select(.mode=="devdax") | [.daxregion.devices[].chardev, "align: "+(.daxregion.align/1024|tostring+"k"), "size: "+(.size/1024/1024/1024|tostring+"G") ]' ndctl list | jq -r '.[] | select(.mode=="fsdax") | [.blockdev, "size: "+(.size/1024/1024/1024|tostring+"G") ]'
3,239
26.457627
181
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/utils/jenkins/scripts/removeNamespaces.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2019-2020, Intel Corporation # removeNamespaces.sh - clear all existing namespaces. set -e MOUNT_POINT="/mnt/pmem*" sudo umount $MOUNT_POINT || true namespace_names=$(ndctl list -X | jq -r '.[].dev') for n in $namespace_names do sudo ndctl clear-errors $n -v done sudo ndctl disable-namespace all || true sudo ndctl destroy-namespace all || true
424
20.25
54
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/pmemkv-bench-chekpointing/utils/jenkins/scripts/common.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2019-2020, Intel Corporation # common.sh - contains bash functions used in all jenkins pipelines. set -o pipefail scriptdir=$(readlink -f $(dirname ${BASH_SOURCE[0]})) function system_info { echo "********** system-info **********" cat /etc/os-release | grep -oP "PRETTY_NAME=\K.*" uname -r echo "libndctl: $(pkg-config --modversion libndctl || echo 'libndctl not found')" echo "libfabric: $(pkg-config --modversion libfabric || echo 'libfabric not found')" echo "libpmem: $(pkg-config --modversion libpmem || echo 'libpmem not found')" echo "libpmemobj: $(pkg-config --modversion libpmemobj || echo 'libpmemobj not found')" echo "libpmemobj++: $(pkg-config --modversion libpmemobj++ || echo 'libpmemobj++ not found')" echo "memkind: $(pkg-config --modversion memkind || echo 'memkind not found')" echo "TBB : $(pkg-config --modversion TBB || echo 'TBB not found')" echo "valgrind: $(pkg-config --modversion valgrind || echo 'valgrind not found')" echo "**********memory-info**********" sudo ipmctl show -dimm || true sudo ipmctl show -topology || true echo "**********list-existing-namespaces**********" sudo ndctl list -M -N echo "**********installed-packages**********" zypper se --installed-only 2>/dev/null || true apt list --installed 2>/dev/null || true yum list installed 2>/dev/null || true echo "**********/proc/cmdline**********" cat /proc/cmdline echo "**********/proc/modules**********" cat /proc/modules echo "**********/proc/cpuinfo**********" cat /proc/cpuinfo echo "**********/proc/meminfo**********" cat /proc/meminfo eco "**********/proc/swaps**********" cat /proc/swaps echo "**********/proc/version**********" cat /proc/version echo "**********check-updates**********" sudo zypper list-updates 2>/dev/null || true sudo apt-get update 2>/dev/null || true ; apt upgrade --dry-run 2>/dev/null || true sudo dnf check-update 2>/dev/null || true echo "**********list-enviroment**********" env } function set_warning_message { local info_addr=$1 sudo bash -c "cat > /etc/motd <<EOL ___ ___ / \ / \ HELLO! \_ \ / __/ THIS NODE IS CONNECTED TO PMEMKV JENKINS _\ \ / /__ THERE ARE TESTS CURRENTLY RUNNING ON THIS MACHINE \___ \____/ __/ PLEASE GO AWAY :) \_ _/ | @ @ \_ | FOR MORE INFORMATION GO: ${info_addr} _/ /\ /o) (o/\ \_ \_____/ / \____/ EOL" } function disable_warning_message { sudo rm /etc/motd || true } # Check host linux distribution and return distro name function check_distro { distro=$(cat /etc/os-release | grep -e ^NAME= | cut -c6-) && echo "${distro//\"}" }
2,808
34.556962
94
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/pmdk-checkpoint1/builddatastoreall.sh
make clobber make -j12 EXTRA_CFLAGS+=-DRUN_COUNT=1 EXTRA_CFLAGS+=-DUSE_NDP_REDO EXTRA_CFLAGS+=-DUSE_NDP_CLOBBER make EXTRA_CFLAGS+=-DRUN_COUNT=1 EXTRA_CFLAGS+=-DUSE_NDP_REDO EXTRA_CFLAGS+=-DUSE_NDP_CLOBBER cat builddatastoreall.sh
236
46.4
99
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/pmdk-checkpoint1/buildclobber.sh
make clobber make -j12 EXTRA_CFLAGS+=-DUSE_NDP_CLOBBER EXTRA_CFLAGS+=-DRUN_COUNT=1 make EXTRA_CFLAGS+=-DUSE_NDP_CLOBBER EXTRA_CFLAGS+=-DRUN_COUNT=1 cat buildclobber.sh
171
33.4
70
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/pmdk-checkpoint1/buildredo.sh
make clobber make -j12 EXTRA_CFLAGS+=-DUSE_NDP_REDO EXTRA_CFLAGS+=-DRUN_COUNT=1 make EXTRA_CFLAGS+=-DUSE_NDP_REDO EXTRA_CFLAGS+=-DRUN_COUNT=1 cat buildredo.sh
166
32.4
69
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/pmdk-checkpoint1/builddatastoreclobber.sh
make clobber make -j12 EXTRA_CFLAGS+=-DRUN_COUNT=1 EXTRA_CFLAGS+=-DUSE_NDP_CLOBBER make EXTRA_CFLAGS+=-DRUN_COUNT=1 EXTRA_CFLAGS+=-DUSE_NDP_CLOBBER cat builddatastoreclobber.sh
182
35.6
70
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/pmdk-checkpoint1/run.sh
make EXTRA_CFLAGS+=-DGET_NDP_PERFORMENCE EXTRA_CFLAGS+=-DRUN_COUNT=100000 make EXTRA_CFLAGS+=-DGET_NDP_PERFORMENCE EXTRA_CFLAGS+=-DUSE_NDP_REDO EXTRA_CFLAGS+=-DRUN_COUNT=100000 make EXTRA_CFLAGS+=-DGET_NDP_PERFORMENCE EXTRA_CFLAGS+=-DGET_NDP_BREAKDOWN make -j12 EXTRA_CFLAGS+=-DGET_NDP_PERFORMENCE EXTRA_CFLAGS+=-DGET_NDP_BREAKDOWN make -j12 EXTRA_CFLAGS+=-DGET_NDP_PERFORMENCE EXTRA_CFLAGS+=-DRUN_COUNT=10000 EXTRA_CFLAGS+=-DUSE_NDP_CLOBBER EXTRA_CFLAGS="-Wno-error"
481
67.857143
112
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/pmdk-checkpoint1/build.sh
make clobber make -j12 EXTRA_CFLAGS+=-DGET_NDP_PERFORMENCE EXTRA_CFLAGS+=-DRUN_COUNT=10000 make EXTRA_CFLAGS+=-DGET_NDP_PERFORMENCE EXTRA_CFLAGS+=-DRUN_COUNT=10000 cat run.sh
180
35.2
79
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/pmdk-checkpoint1/builddatastore.sh
make clobber make -j12 EXTRA_CFLAGS+=-DRUN_COUNT=1 make EXTRA_CFLAGS+=-DRUN_COUNT=1 cat builddatastore.sh
111
21.4
38
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/pmdk-checkpoint1/builddatastoreredo.sh
make clobber make -j12 EXTRA_CFLAGS+=-DRUN_COUNT=1 EXTRA_CFLAGS+=-DUSE_NDP_REDO make EXTRA_CFLAGS+=-DRUN_COUNT=1 EXTRA_CFLAGS+=-DUSE_NDP_REDO cat builddatastoreredo.sh
173
33.8
67
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/pmdk-checkpoint1/buildall.sh
make clobber make -j12 EXTRA_CFLAGS+=-DGET_NDP_PERFORMENCE EXTRA_CFLAGS+=-DUSE_NDP_REDO EXTRA_CFLAGS+=-DUSE_NDP_CLOBBER EXTRA_CFLAGS+=-DRUN_COUNT=10000 make EXTRA_CFLAGS+=-DGET_NDP_PERFORMENCE EXTRA_CFLAGS+=-DUSE_NDP_REDO EXTRA_CFLAGS+=-DUSE_NDP_CLOBBER EXTRA_CFLAGS+=-DRUN_COUNT=10000 cat run.sh
300
59.2
139
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/pmdk-checkpoint1/src/tools/rpmemd/rpmemd_config.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * rpmemd_config.h -- internal definitions for rpmemd config */ #include <stdint.h> #include <stdbool.h> #ifndef RPMEMD_DEFAULT_LOG_FILE #define RPMEMD_DEFAULT_LOG_FILE ("/var/log/" DAEMON_NAME ".log") #endif #ifndef RPMEMD_GLOBAL_CONFIG_FILE #define RPMEMD_GLOBAL_CONFIG_FILE ("/etc/" DAEMON_NAME "/" DAEMON_NAME\ ".conf") #endif #define RPMEMD_USER_CONFIG_FILE ("." DAEMON_NAME ".conf") #define RPMEM_DEFAULT_MAX_LANES 1024 #define RPMEM_DEFAULT_NTHREADS 0 #define HOME_ENV "HOME" #define HOME_STR_PLACEHOLDER ("$" HOME_ENV) struct rpmemd_config { char *log_file; char *poolset_dir; const char *rm_poolset; bool force; bool pool_set; bool persist_apm; bool persist_general; bool use_syslog; uint64_t max_lanes; enum rpmemd_log_level log_level; size_t nthreads; }; int rpmemd_config_read(struct rpmemd_config *config, int argc, char *argv[]); void rpmemd_config_free(struct rpmemd_config *config);
1,012
21.021739
77
h