repo
stringlengths
1
152
file
stringlengths
15
205
code
stringlengths
0
41.6M
file_length
int64
0
41.6M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
90 values
null
NearPMSW-main/nearpmMDsync/logging/pmdk/src/include/libpmemobj/base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemobj/base.h -- definitions of base libpmemobj entry points */ #ifndef LIBPMEMOBJ_BASE_H #define LIBPMEMOBJ_BASE_H 1 #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS #endif #include <stddef.h> #include <stdint.h> #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmemobj_check_version pmemobj_check_versionW #define pmemobj_errormsg pmemobj_errormsgW #else #define pmemobj_check_version pmemobj_check_versionU #define pmemobj_errormsg pmemobj_errormsgU #endif #endif #ifdef __cplusplus extern "C" { #endif /* * opaque type internal to libpmemobj */ typedef struct pmemobjpool PMEMobjpool; #define PMEMOBJ_MAX_ALLOC_SIZE ((size_t)0x3FFDFFFC0) /* * allocation functions flags */ #define POBJ_FLAG_ZERO (((uint64_t)1) << 0) #define POBJ_FLAG_NO_FLUSH (((uint64_t)1) << 1) #define POBJ_FLAG_NO_SNAPSHOT (((uint64_t)1) << 2) #define POBJ_FLAG_ASSUME_INITIALIZED (((uint64_t)1) << 3) #define POBJ_FLAG_TX_NO_ABORT (((uint64_t)1) << 4) #define POBJ_CLASS_ID(id) (((uint64_t)(id)) << 48) #define POBJ_ARENA_ID(id) (((uint64_t)(id)) << 32) #define POBJ_XALLOC_CLASS_MASK ((((uint64_t)1 << 16) - 1) << 48) #define POBJ_XALLOC_ARENA_MASK ((((uint64_t)1 << 16) - 1) << 32) #define POBJ_XALLOC_ZERO POBJ_FLAG_ZERO #define POBJ_XALLOC_NO_FLUSH POBJ_FLAG_NO_FLUSH #define POBJ_XALLOC_NO_ABORT POBJ_FLAG_TX_NO_ABORT /* * pmemobj_mem* flags */ #define PMEMOBJ_F_MEM_NODRAIN (1U << 0) #define PMEMOBJ_F_MEM_NONTEMPORAL (1U << 1) #define PMEMOBJ_F_MEM_TEMPORAL (1U << 2) #define PMEMOBJ_F_MEM_WC (1U << 3) #define PMEMOBJ_F_MEM_WB (1U << 4) #define PMEMOBJ_F_MEM_NOFLUSH (1U << 5) /* * pmemobj_mem*, pmemobj_xflush & pmemobj_xpersist flags */ #define PMEMOBJ_F_RELAXED (1U << 31) /* * Persistent memory object */ /* * Object handle */ typedef struct pmemoid { uint64_t pool_uuid_lo; uint64_t off; } PMEMoid; static const PMEMoid OID_NULL = { 0, 0 }; #define OID_IS_NULL(o) ((o).off == 0) #define OID_EQUALS(lhs, rhs)\ ((lhs).off == (rhs).off &&\ (lhs).pool_uuid_lo == (rhs).pool_uuid_lo) PMEMobjpool *pmemobj_pool_by_ptr(const void *addr); PMEMobjpool *pmemobj_pool_by_oid(PMEMoid oid); #ifndef _WIN32 extern int _pobj_cache_invalidate; extern __thread struct _pobj_pcache { PMEMobjpool *pop; uint64_t uuid_lo; int invalidate; } _pobj_cached_pool; /* * Returns the direct pointer of an object. */ static inline void * pmemobj_direct_inline(PMEMoid oid) { if (oid.off == 0 || oid.pool_uuid_lo == 0) return NULL; struct _pobj_pcache *cache = &_pobj_cached_pool; if (_pobj_cache_invalidate != cache->invalidate || cache->uuid_lo != oid.pool_uuid_lo) { cache->invalidate = _pobj_cache_invalidate; if (!(cache->pop = pmemobj_pool_by_oid(oid))) { cache->uuid_lo = 0; return NULL; } cache->uuid_lo = oid.pool_uuid_lo; } return (void *)((uintptr_t)cache->pop + oid.off); } #endif /* _WIN32 */ /* * Returns the direct pointer of an object. */ #if defined(_WIN32) || defined(_PMEMOBJ_INTRNL) ||\ defined(PMEMOBJ_DIRECT_NON_INLINE) void *pmemobj_direct(PMEMoid oid); #else #define pmemobj_direct pmemobj_direct_inline #endif struct pmemvlt { uint64_t runid; }; #define PMEMvlt(T)\ struct {\ struct pmemvlt vlt;\ T value;\ } /* * Returns lazily initialized volatile variable. (EXPERIMENTAL) */ void *pmemobj_volatile(PMEMobjpool *pop, struct pmemvlt *vlt, void *ptr, size_t size, int (*constr)(void *ptr, void *arg), void *arg); /* * Returns the OID of the object pointed to by addr. */ PMEMoid pmemobj_oid(const void *addr); /* * Returns the number of usable bytes in the object. May be greater than * the requested size of the object because of internal alignment. * * Can be used with objects allocated by any of the available methods. */ size_t pmemobj_alloc_usable_size(PMEMoid oid); /* * Returns the type number of the object. */ uint64_t pmemobj_type_num(PMEMoid oid); /* * Pmemobj specific low-level memory manipulation functions. * * These functions are meant to be used with pmemobj pools, because they provide * additional functionality specific to this type of pool. These may include * for example replication support. They also take advantage of the knowledge * of the type of memory in the pool (pmem/non-pmem) to assure persistence. */ /* * Pmemobj version of memcpy. Data copied is made persistent. */ void *pmemobj_memcpy_persist(PMEMobjpool *pop, void *dest, const void *src, size_t len); /* * Pmemobj version of memset. Data range set is made persistent. */ void *pmemobj_memset_persist(PMEMobjpool *pop, void *dest, int c, size_t len); /* * Pmemobj version of memcpy. Data copied is made persistent (unless opted-out * using flags). */ void *pmemobj_memcpy(PMEMobjpool *pop, void *dest, const void *src, size_t len, unsigned flags); /* * Pmemobj version of memmove. Data copied is made persistent (unless opted-out * using flags). */ void *pmemobj_memmove(PMEMobjpool *pop, void *dest, const void *src, size_t len, unsigned flags); /* * Pmemobj version of memset. Data range set is made persistent (unless * opted-out using flags). */ void *pmemobj_memset(PMEMobjpool *pop, void *dest, int c, size_t len, unsigned flags); /* * Pmemobj version of pmem_persist. */ void pmemobj_persist(PMEMobjpool *pop, const void *addr, size_t len); /* * Pmemobj version of pmem_persist with additional flags argument. */ int pmemobj_xpersist(PMEMobjpool *pop, const void *addr, size_t len, unsigned flags); /* * Pmemobj version of pmem_flush. */ void pmemobj_flush(PMEMobjpool *pop, const void *addr, size_t len); /* * Pmemobj version of pmem_flush with additional flags argument. */ int pmemobj_xflush(PMEMobjpool *pop, const void *addr, size_t len, unsigned flags); /* * Pmemobj version of pmem_drain. */ void pmemobj_drain(PMEMobjpool *pop); /* * Version checking. */ /* * PMEMOBJ_MAJOR_VERSION and PMEMOBJ_MINOR_VERSION provide the current version * of the libpmemobj API as provided by this header file. Applications can * verify that the version available at run-time is compatible with the version * used at compile-time by passing these defines to pmemobj_check_version(). */ #define PMEMOBJ_MAJOR_VERSION 2 #define PMEMOBJ_MINOR_VERSION 4 #ifndef _WIN32 const char *pmemobj_check_version(unsigned major_required, unsigned minor_required); #else const char *pmemobj_check_versionU(unsigned major_required, unsigned minor_required); const wchar_t *pmemobj_check_versionW(unsigned major_required, unsigned minor_required); #endif /* * Passing NULL to pmemobj_set_funcs() tells libpmemobj to continue to use the * default for that function. The replacement functions must not make calls * back into libpmemobj. */ void pmemobj_set_funcs( void *(*malloc_func)(size_t size), void (*free_func)(void *ptr), void *(*realloc_func)(void *ptr, size_t size), char *(*strdup_func)(const char *s)); typedef int (*pmemobj_constr)(PMEMobjpool *pop, void *ptr, void *arg); /* * (debug helper function) logs notice message if used inside a transaction */ void _pobj_debug_notice(const char *func_name, const char *file, int line); #ifndef _WIN32 const char *pmemobj_errormsg(void); #else const char *pmemobj_errormsgU(void); const wchar_t *pmemobj_errormsgW(void); #endif #ifdef __cplusplus } #endif #endif /* libpmemobj/base.h */
7,415
23.72
80
h
null
NearPMSW-main/nearpmMDsync/logging/pmdk/src/include/libpmemobj/tx.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemobj/tx.h -- definitions of libpmemobj transactional macros */ #ifndef LIBPMEMOBJ_TX_H #define LIBPMEMOBJ_TX_H 1 #include <errno.h> #include <string.h> #include <libpmemobj/tx_base.h> #include <libpmemobj/types.h> extern uint64_t waitCycles; extern uint64_t resetCycles; #ifdef __cplusplus extern "C" { #endif #ifdef POBJ_TX_CRASH_ON_NO_ONABORT #define TX_ONABORT_CHECK do {\ if (_stage == TX_STAGE_ONABORT)\ abort();\ } while (0) #else #define TX_ONABORT_CHECK do {} while (0) #endif #define _POBJ_TX_BEGIN(pop, ...)\ {\ jmp_buf _tx_env;\ enum pobj_tx_stage _stage;\ int _pobj_errno;\ if (setjmp(_tx_env)) {\ errno = pmemobj_tx_errno();\ } else {\ _pobj_errno = pmemobj_tx_begin(pop, _tx_env, __VA_ARGS__,\ TX_PARAM_NONE);\ if (_pobj_errno)\ errno = _pobj_errno;\ }\ while ((_stage = pmemobj_tx_stage()) != TX_STAGE_NONE) {\ switch (_stage) {\ case TX_STAGE_WORK: #define TX_BEGIN_PARAM(pop, ...)\ _POBJ_TX_BEGIN(pop, ##__VA_ARGS__) #define TX_BEGIN_LOCK TX_BEGIN_PARAM /* Just to let compiler warn when incompatible function pointer is used */ static inline pmemobj_tx_callback _pobj_validate_cb_sig(pmemobj_tx_callback cb) { return cb; } #define TX_BEGIN_CB(pop, cb, arg, ...) _POBJ_TX_BEGIN(pop, TX_PARAM_CB,\ _pobj_validate_cb_sig(cb), arg, ##__VA_ARGS__) #define TX_BEGIN(pop) _POBJ_TX_BEGIN(pop, TX_PARAM_NONE) #define TX_ONABORT\ pmemobj_tx_process();\ break;\ case TX_STAGE_ONABORT: #define TX_ONCOMMIT\ pmemobj_tx_process();\ break;\ case TX_STAGE_ONCOMMIT: #define TX_FINALLY\ pmemobj_tx_process();\ break;\ case TX_STAGE_FINALLY: #define TX_END\ pmemobj_tx_process();\ break;\ default:\ TX_ONABORT_CHECK;\ pmemobj_tx_process();\ break;\ }\ }\ _pobj_errno = pmemobj_tx_end();\ if (_pobj_errno)\ errno = _pobj_errno;\ } #define TX_ADD(o)\ pmemobj_tx_add_range((o).oid, 0, sizeof(*(o)._type)) #define TX_ADD_FIELD(o, field)\ TX_ADD_DIRECT(&(D_RO(o)->field)) #define TX_ADD_DIRECT(p)\ pmemobj_tx_add_range_direct(p, sizeof(*(p))) #define TX_ADD_FIELD_DIRECT(p, field)\ pmemobj_tx_add_range_direct(&(p)->field, sizeof((p)->field)) #define TX_XADD(o, flags)\ pmemobj_tx_xadd_range((o).oid, 0, sizeof(*(o)._type), flags) #define TX_XADD_FIELD(o, field, flags)\ TX_XADD_DIRECT(&(D_RO(o)->field), flags) #define TX_XADD_DIRECT(p, flags)\ pmemobj_tx_xadd_range_direct(p, sizeof(*(p)), flags) #define TX_XADD_FIELD_DIRECT(p, field, flags)\ pmemobj_tx_xadd_range_direct(&(p)->field, sizeof((p)->field), flags) #define TX_NEW(t)\ ((TOID(t))pmemobj_tx_alloc(sizeof(t), TOID_TYPE_NUM(t))) #define TX_ALLOC(t, size)\ ((TOID(t))pmemobj_tx_alloc(size, TOID_TYPE_NUM(t))) #define TX_ZNEW(t)\ ((TOID(t))pmemobj_tx_zalloc(sizeof(t), TOID_TYPE_NUM(t))) #define TX_ZALLOC(t, size)\ ((TOID(t))pmemobj_tx_zalloc(size, TOID_TYPE_NUM(t))) #define TX_XALLOC(t, size, flags)\ ((TOID(t))pmemobj_tx_xalloc(size, TOID_TYPE_NUM(t), flags)) /* XXX - not available when compiled with VC++ as C code (/TC) */ #if !defined(_MSC_VER) || defined(__cplusplus) #define TX_REALLOC(o, size)\ ((__typeof__(o))pmemobj_tx_realloc((o).oid, size, TOID_TYPE_NUM_OF(o))) #define TX_ZREALLOC(o, size)\ ((__typeof__(o))pmemobj_tx_zrealloc((o).oid, size, TOID_TYPE_NUM_OF(o))) #endif /* !defined(_MSC_VER) || defined(__cplusplus) */ #define TX_STRDUP(s, type_num)\ pmemobj_tx_strdup(s, type_num) #define TX_XSTRDUP(s, type_num, flags)\ pmemobj_tx_xstrdup(s, type_num, flags) #define TX_WCSDUP(s, type_num)\ pmemobj_tx_wcsdup(s, type_num) #define TX_XWCSDUP(s, type_num, flags)\ pmemobj_tx_xwcsdup(s, type_num, flags) #define TX_FREE(o)\ pmemobj_tx_free((o).oid) #define TX_XFREE(o, flags)\ pmemobj_tx_xfree((o).oid, flags) #define TX_SET(o, field, value) (\ TX_ADD_FIELD(o, field),\ D_RW(o)->field = (value)) #define TX_SET_DIRECT(p, field, value) (\ TX_ADD_FIELD_DIRECT(p, field),\ (p)->field = (value)) static inline void * TX_MEMCPY(void *dest, const void *src, size_t num) { pmemobj_tx_add_range_direct(dest, num); return memcpy(dest, src, num); } static inline void * TX_MEMSET(void *dest, int c, size_t num) { pmemobj_tx_add_range_direct(dest, num); return memset(dest, c, num); } #ifdef __cplusplus } #endif #endif /* libpmemobj/tx.h */
4,353
22.037037
74
h
null
NearPMSW-main/nearpmMDsync/logging/pmdk/src/include/libpmemobj/atomic_base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemobj/atomic_base.h -- definitions of libpmemobj atomic entry points */ #ifndef LIBPMEMOBJ_ATOMIC_BASE_H #define LIBPMEMOBJ_ATOMIC_BASE_H 1 #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * Non-transactional atomic allocations * * Those functions can be used outside transactions. The allocations are always * aligned to the cache-line boundary. */ #define POBJ_XALLOC_VALID_FLAGS (POBJ_XALLOC_ZERO |\ POBJ_XALLOC_CLASS_MASK) /* * Allocates a new object from the pool and calls a constructor function before * returning. It is guaranteed that allocated object is either properly * initialized, or if it's interrupted before the constructor completes, the * memory reserved for the object is automatically reclaimed. */ int pmemobj_alloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num, pmemobj_constr constructor, void *arg); /* * Allocates with flags a new object from the pool. */ int pmemobj_xalloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num, uint64_t flags, pmemobj_constr constructor, void *arg); /* * Allocates a new zeroed object from the pool. */ int pmemobj_zalloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num); /* * Resizes an existing object. */ int pmemobj_realloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num); /* * Resizes an existing object, if extended new space is zeroed. */ int pmemobj_zrealloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num); /* * Allocates a new object with duplicate of the string s. */ int pmemobj_strdup(PMEMobjpool *pop, PMEMoid *oidp, const char *s, uint64_t type_num); /* * Allocates a new object with duplicate of the wide character string s. */ int pmemobj_wcsdup(PMEMobjpool *pop, PMEMoid *oidp, const wchar_t *s, uint64_t type_num); /* * Frees an existing object. */ void pmemobj_free(PMEMoid *oidp); struct pobj_defrag_result { size_t total; /* number of processed objects */ size_t relocated; /* number of relocated objects */ }; /* * Performs defragmentation on the provided array of objects. */ int pmemobj_defrag(PMEMobjpool *pop, PMEMoid **oidv, size_t oidcnt, struct pobj_defrag_result *result); #ifdef __cplusplus } #endif #endif /* libpmemobj/atomic_base.h */
2,386
24.393617
79
h
null
NearPMSW-main/nearpmMDsync/logging/pmdk/src/include/libpmemobj/thread.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * libpmemobj/thread.h -- definitions of libpmemobj thread/locking entry points */ #ifndef LIBPMEMOBJ_THREAD_H #define LIBPMEMOBJ_THREAD_H 1 #include <time.h> #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * Locking. */ #define _POBJ_CL_SIZE 64 /* cache line size */ typedef union { long long align; char padding[_POBJ_CL_SIZE]; } PMEMmutex; typedef union { long long align; char padding[_POBJ_CL_SIZE]; } PMEMrwlock; typedef union { long long align; char padding[_POBJ_CL_SIZE]; } PMEMcond; void pmemobj_mutex_zero(PMEMobjpool *pop, PMEMmutex *mutexp); int pmemobj_mutex_lock(PMEMobjpool *pop, PMEMmutex *mutexp); int pmemobj_mutex_timedlock(PMEMobjpool *pop, PMEMmutex *__restrict mutexp, const struct timespec *__restrict abs_timeout); int pmemobj_mutex_trylock(PMEMobjpool *pop, PMEMmutex *mutexp); int pmemobj_mutex_unlock(PMEMobjpool *pop, PMEMmutex *mutexp); void pmemobj_rwlock_zero(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_rdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_wrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_timedrdlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp, const struct timespec *__restrict abs_timeout); int pmemobj_rwlock_timedwrlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp, const struct timespec *__restrict abs_timeout); int pmemobj_rwlock_tryrdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_trywrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_unlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); void pmemobj_cond_zero(PMEMobjpool *pop, PMEMcond *condp); int pmemobj_cond_broadcast(PMEMobjpool *pop, PMEMcond *condp); int pmemobj_cond_signal(PMEMobjpool *pop, PMEMcond *condp); int pmemobj_cond_timedwait(PMEMobjpool *pop, PMEMcond *__restrict condp, PMEMmutex *__restrict mutexp, const struct timespec *__restrict abs_timeout); int pmemobj_cond_wait(PMEMobjpool *pop, PMEMcond *condp, PMEMmutex *__restrict mutexp); #ifdef __cplusplus } #endif #endif /* libpmemobj/thread.h */
2,150
28.875
79
h
null
NearPMSW-main/nearpmMDsync/logging/pmdk/src/include/libpmemobj/action.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2018, Intel Corporation */ /* * libpmemobj/action.h -- definitions of libpmemobj action interface */ #ifndef LIBPMEMOBJ_ACTION_H #define LIBPMEMOBJ_ACTION_H 1 #include <libpmemobj/action_base.h> #ifdef __cplusplus extern "C" { #endif #define POBJ_RESERVE_NEW(pop, t, act)\ ((TOID(t))pmemobj_reserve(pop, act, sizeof(t), TOID_TYPE_NUM(t))) #define POBJ_RESERVE_ALLOC(pop, t, size, act)\ ((TOID(t))pmemobj_reserve(pop, act, size, TOID_TYPE_NUM(t))) #define POBJ_XRESERVE_NEW(pop, t, act, flags)\ ((TOID(t))pmemobj_xreserve(pop, act, sizeof(t), TOID_TYPE_NUM(t), flags)) #define POBJ_XRESERVE_ALLOC(pop, t, size, act, flags)\ ((TOID(t))pmemobj_xreserve(pop, act, size, TOID_TYPE_NUM(t), flags)) #ifdef __cplusplus } #endif #endif /* libpmemobj/action_base.h */
829
23.411765
73
h
null
NearPMSW-main/nearpmMDsync/logging/pmdk/src/include/libpmemobj/atomic.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * libpmemobj/atomic.h -- definitions of libpmemobj atomic macros */ #ifndef LIBPMEMOBJ_ATOMIC_H #define LIBPMEMOBJ_ATOMIC_H 1 #include <libpmemobj/atomic_base.h> #include <libpmemobj/types.h> #ifdef __cplusplus extern "C" { #endif #define POBJ_NEW(pop, o, t, constr, arg)\ pmemobj_alloc((pop), (PMEMoid *)(o), sizeof(t), TOID_TYPE_NUM(t),\ (constr), (arg)) #define POBJ_ALLOC(pop, o, t, size, constr, arg)\ pmemobj_alloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t),\ (constr), (arg)) #define POBJ_ZNEW(pop, o, t)\ pmemobj_zalloc((pop), (PMEMoid *)(o), sizeof(t), TOID_TYPE_NUM(t)) #define POBJ_ZALLOC(pop, o, t, size)\ pmemobj_zalloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t)) #define POBJ_REALLOC(pop, o, t, size)\ pmemobj_realloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t)) #define POBJ_ZREALLOC(pop, o, t, size)\ pmemobj_zrealloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t)) #define POBJ_FREE(o)\ pmemobj_free((PMEMoid *)(o)) #ifdef __cplusplus } #endif #endif /* libpmemobj/atomic.h */
1,115
23.26087
66
h
null
NearPMSW-main/nearpmMDsync/logging/pmdk/src/include/libpmemobj/pool.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * libpmemobj/pool.h -- definitions of libpmemobj pool macros */ #ifndef LIBPMEMOBJ_POOL_H #define LIBPMEMOBJ_POOL_H 1 #include <libpmemobj/pool_base.h> #include <libpmemobj/types.h> #define POBJ_ROOT(pop, t) (\ (TOID(t))pmemobj_root((pop), sizeof(t))) #endif /* libpmemobj/pool.h */
379
20.111111
61
h
null
NearPMSW-main/nearpmMDsync/logging/pmdk/src/include/libpmemobj/iterator_base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemobj/iterator_base.h -- definitions of libpmemobj iterator entry points */ #ifndef LIBPMEMOBJ_ITERATOR_BASE_H #define LIBPMEMOBJ_ITERATOR_BASE_H 1 #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * The following functions allow access to the entire collection of objects. * * Use with conjunction with non-transactional allocations. Pmemobj pool acts * as a generic container (list) of objects that are not assigned to any * user-defined data structures. */ /* * Returns the first object of the specified type number. */ PMEMoid pmemobj_first(PMEMobjpool *pop); /* * Returns the next object of the same type. */ PMEMoid pmemobj_next(PMEMoid oid); #ifdef __cplusplus } #endif #endif /* libpmemobj/iterator_base.h */
855
20.4
80
h
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/magic-install.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2014-2017, Intel Corporation # # magic-install.sh -- Script for installing magic script # set -e if ! grep -q "File: pmdk" /etc/magic then echo "Appending PMDK magic to /etc/magic" cat /usr/share/pmdk/pmdk.magic >> /etc/magic else echo "PMDK magic already exists" fi
343
20.5
56
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/md2man.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2020, Intel Corporation # # # md2man.sh -- convert markdown to groff man pages # # usage: md2man.sh file template outfile # # This script converts markdown file into groff man page using pandoc. # It performs some pre- and post-processing for better results: # - uses m4 to preprocess OS-specific directives. See doc/macros.man. # - parse input file for YAML metadata block and read man page title, # section and version # - cut-off metadata block and license # - unindent code blocks # - cut-off windows and web specific parts of documentation # # If the TESTOPTS variable is set, generates a preprocessed markdown file # with the header stripped off for testing purposes. # set -e set -o pipefail filename=$1 template=$2 outfile=$3 title=`sed -n 's/^title:\ _MP(*\([A-Za-z0-9_-]*\).*$/\1/p' $filename` section=`sed -n 's/^title:.*\([0-9]\))$/\1/p' $filename` version=`sed -n 's/^date:\ *\(.*\)$/\1/p' $filename` if [ "$TESTOPTS" != "" ]; then m4 $TESTOPTS macros.man $filename | sed -n -e '/# NAME #/,$p' > $outfile else OPTS= if [ "$WIN32" == 1 ]; then OPTS="$OPTS -DWIN32" else OPTS="$OPTS -UWIN32" fi if [ "$(uname -s)" == "FreeBSD" ]; then OPTS="$OPTS -DFREEBSD" else OPTS="$OPTS -UFREEBSD" fi if [ "$WEB" == 1 ]; then OPTS="$OPTS -DWEB" mkdir -p "$(dirname $outfile)" m4 $OPTS macros.man $filename | sed -n -e '/---/,$p' > $outfile else SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(date +%s)}" COPYRIGHT=$(grep -rwI "\[comment]: <> (Copyright" $filename |\ sed "s/\[comment\]: <> (\([^)]*\))/\1/") dt=$(date -u -d "@$SOURCE_DATE_EPOCH" +%F 2>/dev/null || date -u -r "$SOURCE_DATE_EPOCH" +%F 2>/dev/null || date -u +%F) m4 $OPTS macros.man $filename | sed -n -e '/# NAME #/,$p' |\ pandoc -s -t man -o $outfile --template=$template \ -V title=$title -V section=$section \ -V date="$dt" -V version="$version" \ -V copyright="$COPYRIGHT" fi fi
1,955
27.764706
73
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/check-area.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2018-2020, Intel Corporation # # Finds applicable area name for specified commit id. # if [ -z "$1" ]; then echo "Missing commit id argument." exit 1 fi files=$(git show $1 --format=oneline --name-only | grep -v -e "$1") git show -q $1 | cat echo echo "Modified files:" echo "$files" function categorize() { category=$1 shift cat_files=`echo "$files" | grep $*` if [ -n "${cat_files}" ]; then echo "$category" files=`echo "$files" | grep -v $*` fi } echo echo "Areas computed basing on the list of modified files: (see utils/check-area.sh for full algorithm)" categorize core -e "^src/core/" categorize pmem -e "^src/libpmem/" -e "^src/include/libpmem.h" categorize pmem2 -e "^src/libpmem2/" -e "^src/include/libpmem2.h" categorize rpmem -e "^src/librpmem/" -e "^src/include/librpmem.h" -e "^src/tools/rpmemd/" -e "^src/rpmem_common/" categorize log -e "^src/libpmemlog/" -e "^src/include/libpmemlog.h" categorize blk -e "^src/libpmemblk/" -e "^src/include/libpmemblk.h" categorize obj -e "^src/libpmemobj/" -e "^src/include/libpmemobj.h" -e "^src/include/libpmemobj/" categorize pool -e "^src/libpmempool/" -e "^src/include/libpmempool.h" -e "^src/tools/pmempool/" categorize benchmark -e "^src/benchmarks/" categorize examples -e "^src/examples/" categorize daxio -e "^src/tools/daxio/" categorize pmreorder -e "^src/tools/pmreorder/" categorize test -e "^src/test/" categorize doc -e "^doc/" -e ".md\$" -e "^ChangeLog" -e "README" categorize common -e "^src/common/" \ -e "^utils/" \ -e ".inc\$" \ -e ".yml\$" \ -e ".gitattributes" \ -e ".gitignore" \ -e "^.mailmap\$" \ -e "^src/PMDK.sln\$" \ -e "Makefile\$" \ -e "^src/freebsd/" \ -e "^src/windows/" \ -e "^src/include/pmemcompat.h" echo echo "If the above list contains more than 1 entry, please consider splitting" echo "your change into more commits, unless those changes don't make sense " echo "individually (they do not build, tests do not pass, etc)." echo "For example, it's perfectly fine to use 'obj' prefix for one commit that" echo "changes libpmemobj source code, its tests and documentation." if [ -n "$files" ]; then echo echo "Uncategorized files:" echo "$files" fi
2,340
30.213333
120
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/check-shebang.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2017-2019, Intel Corporation # # utils/check-shebang.sh -- interpreter directive check script # set -e err_count=0 for file in $@ ; do [ ! -f $file ] && continue SHEBANG=`head -n1 $file | cut -d" " -f1` [ "${SHEBANG:0:2}" != "#!" ] && continue if [ "$SHEBANG" != "#!/usr/bin/env" -a $SHEBANG != "#!/bin/sh" ]; then INTERP=`echo $SHEBANG | rev | cut -d"/" -f1 | rev` echo "$file:1: error: invalid interpreter directive:" >&2 echo " (is: \"$SHEBANG\", should be: \"#!/usr/bin/env $INTERP\")" >&2 ((err_count+=1)) fi done if [ "$err_count" == "0" ]; then echo "Interpreter directives are OK." else echo "Found $err_count errors in interpreter directives!" >&2 err_count=1 fi exit $err_count
787
24.419355
71
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/check-commits.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2020, Intel Corporation # # Used to check whether all the commit messages in a pull request # follow the GIT/PMDK guidelines. # # usage: ./check-commits.sh [range] # if [ -z "$1" ]; then # on CI run this check only for pull requests if [ -n "$CI_REPO_SLUG" ]; then if [[ "$CI_REPO_SLUG" != "$GITHUB_REPO" \ || $CI_EVENT_TYPE != "pull_request" ]]; then echo "SKIP: $0 can only be executed for pull requests to $GITHUB_REPO" exit 0 fi fi # CI_COMMIT_RANGE can be invalid for force pushes - use another # method to determine the list of commits if [[ $(git rev-list $CI_COMMIT_RANGE 2>/dev/null) || -n "$CI_COMMIT_RANGE" ]]; then MERGE_BASE=$(echo $CI_COMMIT_RANGE | cut -d. -f1) [ -z $MERGE_BASE ] && \ MERGE_BASE=$(git log --pretty="%cN:%H" | grep GitHub | head -n1 | cut -d: -f2) RANGE=$MERGE_BASE..$CI_COMMIT else MERGE_BASE=$(git log --pretty="%cN:%H" | grep GitHub | head -n1 | cut -d: -f2) RANGE=$MERGE_BASE..HEAD fi else RANGE="$1" fi COMMITS=$(git log --pretty=%H $RANGE) set -e for commit in $COMMITS; do `dirname $0`/check-commit.sh $commit done
1,174
25.704545
85
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/get_aliases.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2017-2020, Intel Corporation # # # get_aliases.sh -- generate map of manuals functions and libraries # # usage: run from /pmdk/doc/generated location without parameters: # ./../../utils/get_aliases.sh # # This script searches manpages from section 7 then # takes all functions from each section using specified pattern # and at the end to every function it assign real markdown file # representation based on *.gz file content # # Generated libs_map.yml file is used on gh-pages # to handle functions and their aliases # list=("$@") man_child=("$@") function search_aliases { children=$1 parent=$2 for i in ${children[@]} do if [ -e ../$parent/$i ] then echo "Man: $i" content=$(head -c 150 ../$parent/$i) if [[ "$content" == ".so "* ]] ; then content=$(basename ${content#".so"}) i="${i%.*}" echo " $i: $content" >> $map_file else r="${i%.*}" echo " $r: $i" >> $map_file fi fi done } function list_pages { parent="${1%.*}" list=("$@") man_child=("$@") if [ "$parent" == "libpmem" ]; then man_child=($(ls -1 ../libpmem | grep -e ".*\.3$")) echo -n "- $parent: " >> $map_file echo "${man_child[@]}" >> $map_file fi if [ "$parent" == "libpmem2" ]; then man_child=($(ls -1 ../libpmem2 | grep -e ".*\.3$")) echo -n "- $parent: " >> $map_file echo "${man_child[@]}" >> $map_file fi if [ "$parent" == "libpmemblk" ]; then man_child=($(ls -1 ../libpmemblk | grep -e ".*\.3$")) echo -n "- $parent: " >> $map_file echo "${man_child[@]}" >> $map_file fi if [ "$parent" == "libpmemlog" ]; then man_child=($(ls -1 ../libpmemlog | grep -e ".*\.3$")) echo -n "- $parent: " >> $map_file echo "${man_child[@]}" >> $map_file fi if [ "$parent" == "libpmemobj" ]; then man_child=($(ls -1 ../libpmemobj | grep -e ".*\.3$")) echo -n "- $parent: " >> $map_file echo "${man_child[@]}" >> $map_file fi if [ "$parent" == "libpmempool" ]; then man_child=($(ls -1 ../libpmempool | grep -e ".*\.3$")) echo -n "- $parent: " >> $map_file echo "${man_child[@]}" >> $map_file fi if [ "$parent" == "librpmem" ]; then man_child=($(ls -1 ../librpmem | grep -e ".*\.3$")) echo -n "- $parent: " >> $map_file echo "${man_child[@]}" >> $map_file fi if [ ${#man_child[@]} -ne 0 ] then list=${man_child[@]} search_aliases "${list[@]}" "$parent" fi } man7=($(ls -1 ../*/ | grep -e ".*\.7$")) map_file=libs_map.yml [ -e $map_file ] && rm $map_file touch $map_file for i in "${man7[@]}" do echo "Library: $i" list_pages $i done
2,570
22.162162
67
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/copy-source.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2018, Intel Corporation # # utils/copy-source.sh -- copy source files (from HEAD) to 'path_to_dir/pmdk' # directory whether in git repository or not. # # usage: ./copy-source.sh [path_to_dir] [srcversion] set -e DESTDIR="$1" SRCVERSION=$2 if [ -d .git ]; then if [ -n "$(git status --porcelain)" ]; then echo "Error: Working directory is dirty: $(git status --porcelain)" exit 1 fi else echo "Warning: You are not in git repository, working directory might be dirty." fi mkdir -p "$DESTDIR"/pmdk echo -n $SRCVERSION > "$DESTDIR"/pmdk/.version if [ -d .git ]; then git archive HEAD | tar -x -C "$DESTDIR"/pmdk else find . \ -maxdepth 1 \ -not -name $(basename "$DESTDIR") \ -not -name . \ -exec cp -r "{}" "$DESTDIR"/pmdk \; fi
818
21.135135
81
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/check-commit.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2020, Intel Corporation # # Used to check whether all the commit messages in a pull request # follow the GIT/PMDK guidelines. # # usage: ./check-commit.sh commit # if [ -z "$1" ]; then echo "Usage: check-commit.sh commit-id" exit 1 fi echo "Checking $1" subject=$(git log --format="%s" -n 1 $1) if [[ $subject =~ ^Merge.* ]]; then # skip exit 0 fi if [[ $subject =~ ^Revert.* ]]; then # skip exit 0 fi # valid area names AREAS="pmem\|pmem2\|rpmem\|log\|blk\|obj\|pool\|test\|benchmark\|examples\|doc\|core\|common\|daxio\|pmreorder" prefix=$(echo $subject | sed -n "s/^\($AREAS\)\:.*/\1/p") if [ "$prefix" = "" ]; then echo "FAIL: subject line in commit message does not contain valid area name" echo `dirname $0`/check-area.sh $1 exit 1 fi commit_len=$(git log --format="%s%n%b" -n 1 $1 | wc -L) if [ $commit_len -gt 73 ]; then echo "FAIL: commit message exceeds 72 chars per line (commit_len)" echo git log -n 1 $1 | cat exit 1 fi
1,035
19.313725
111
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/build-rpm.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2014-2019, Intel Corporation # # build-rpm.sh - Script for building rpm packages # set -e SCRIPT_DIR=$(dirname $0) source $SCRIPT_DIR/pkg-common.sh check_tool rpmbuild check_file $SCRIPT_DIR/pkg-config.sh source $SCRIPT_DIR/pkg-config.sh # # usage -- print usage message and exit # usage() { [ "$1" ] && echo Error: $1 cat >&2 <<EOF Usage: $0 [ -h ] -t version-tag -s source-dir -w working-dir -o output-dir [ -d distro ] [ -e build-experimental ] [ -c run-check ] [ -r build-rpmem ] [ -n with-ndctl ] [ -f testconfig-file ] [ -p build-libpmem2 ] -h print this help message -t version-tag source version tag -s source-dir source directory -w working-dir working directory -o output-dir output directory -d distro Linux distro name -e build-experimental build experimental packages -c run-check run package check -r build-rpmem build librpmem and rpmemd packages -n with-ndctl build with libndctl -f testconfig-file custom testconfig.sh -p build-libpmem2 build libpmem2 packages EOF exit 1 } # # command-line argument processing... # args=`getopt he:c:r:n:t:d:s:w:o:f:p: $*` [ $? != 0 ] && usage set -- $args for arg do receivetype=auto case "$arg" in -e) EXPERIMENTAL="$2" shift 2 ;; -c) BUILD_PACKAGE_CHECK="$2" shift 2 ;; -f) TEST_CONFIG_FILE="$2" shift 2 ;; -r) BUILD_RPMEM="$2" shift 2 ;; -n) NDCTL_ENABLE="$2" shift 2 ;; -t) PACKAGE_VERSION_TAG="$2" shift 2 ;; -s) SOURCE="$2" shift 2 ;; -w) WORKING_DIR="$2" shift 2 ;; -o) OUT_DIR="$2" shift 2 ;; -d) DISTRO="$2" shift 2 ;; -p) PMEM2_INSTALL="$2" shift 2 ;; --) shift break ;; esac done # check for mandatory arguments if [ -z "$PACKAGE_VERSION_TAG" -o -z "$SOURCE" -o -z "$WORKING_DIR" -o -z "$OUT_DIR" ] then error "Mandatory arguments missing" usage fi # detected distro or defined in cmd if [ -z "${DISTRO}" ] then OS=$(get_os) if [ "$OS" != "1" ] then echo "Detected OS: $OS" DISTRO=$OS else error "Unknown distribution" exit 1 fi fi if [ "$EXTRA_CFLAGS_RELEASE" = "" ]; then export EXTRA_CFLAGS_RELEASE="-ggdb -fno-omit-frame-pointer" fi LIBFABRIC_MIN_VERSION=1.4.2 NDCTL_MIN_VERSION=60.1 RPMBUILD_OPTS=( ) PACKAGE_VERSION=$(get_version $PACKAGE_VERSION_TAG) if [ -z "$PACKAGE_VERSION" ] then error "Can not parse version from '${PACKAGE_VERSION_TAG}'" exit 1 fi PACKAGE_SOURCE=${PACKAGE_NAME}-${PACKAGE_VERSION} SOURCE=$PACKAGE_NAME PACKAGE_TARBALL=$PACKAGE_SOURCE.tar.gz RPM_SPEC_FILE=$PACKAGE_SOURCE/$PACKAGE_NAME.spec MAGIC_INSTALL=$PACKAGE_SOURCE/utils/magic-install.sh MAGIC_UNINSTALL=$PACKAGE_SOURCE/utils/magic-uninstall.sh OLDPWD=$PWD [ -d $WORKING_DIR ] || mkdir -v $WORKING_DIR [ -d $OUT_DIR ] || mkdir $OUT_DIR cd $WORKING_DIR check_dir $SOURCE mv $SOURCE $PACKAGE_SOURCE if [ "$DISTRO" = "SLES_like" ] then RPM_LICENSE="BSD-3-Clause" RPM_GROUP_SYS_BASE="System\/Base" RPM_GROUP_SYS_LIBS="System\/Libraries" RPM_GROUP_DEV_LIBS="Development\/Libraries\/C and C++" RPM_PKG_NAME_SUFFIX="1" RPM_MAKE_FLAGS="BINDIR=""%_bindir"" NORPATH=1" RPM_MAKE_INSTALL="%fdupes %{buildroot}\/%{_prefix}" else RPM_LICENSE="BSD" RPM_GROUP_SYS_BASE="System Environment\/Base" RPM_GROUP_SYS_LIBS="System Environment\/Libraries" RPM_GROUP_DEV_LIBS="Development\/Libraries" RPM_PKG_NAME_SUFFIX="" RPM_MAKE_FLAGS="NORPATH=1" RPM_MAKE_INSTALL="" fi # # Create parametrized spec file required by rpmbuild. # Most of variables are set in pkg-config.sh file in order to # keep descriptive values separately from this script. # sed -e "s/__VERSION__/$PACKAGE_VERSION/g" \ -e "s/__LICENSE__/$RPM_LICENSE/g" \ -e "s/__PACKAGE_MAINTAINER__/$PACKAGE_MAINTAINER/g" \ -e "s/__PACKAGE_SUMMARY__/$PACKAGE_SUMMARY/g" \ -e "s/__GROUP_SYS_BASE__/$RPM_GROUP_SYS_BASE/g" \ -e "s/__GROUP_SYS_LIBS__/$RPM_GROUP_SYS_LIBS/g" \ -e "s/__GROUP_DEV_LIBS__/$RPM_GROUP_DEV_LIBS/g" \ -e "s/__PKG_NAME_SUFFIX__/$RPM_PKG_NAME_SUFFIX/g" \ -e "s/__MAKE_FLAGS__/$RPM_MAKE_FLAGS/g" \ -e "s/__MAKE_INSTALL_FDUPES__/$RPM_MAKE_INSTALL/g" \ -e "s/__LIBFABRIC_MIN_VER__/$LIBFABRIC_MIN_VERSION/g" \ -e "s/__NDCTL_MIN_VER__/$NDCTL_MIN_VERSION/g" \ $OLDPWD/$SCRIPT_DIR/pmdk.spec.in > $RPM_SPEC_FILE if [ "$DISTRO" = "SLES_like" ] then sed -i '/^#.*bugzilla.redhat/d' $RPM_SPEC_FILE fi # do not split on space IFS=$'\n' # experimental features if [ "${EXPERIMENTAL}" = "y" ] then # no experimental features for now RPMBUILD_OPTS+=( ) fi # libpmem2 if [ "${PMEM2_INSTALL}" == "y" ] then RPMBUILD_OPTS+=(--define "_pmem2_install 1") fi # librpmem & rpmemd if [ "${BUILD_RPMEM}" = "y" ] then RPMBUILD_OPTS+=(--with fabric) else RPMBUILD_OPTS+=(--without fabric) fi # daxio & RAS if [ "${NDCTL_ENABLE}" = "n" ] then RPMBUILD_OPTS+=(--without ndctl) else RPMBUILD_OPTS+=(--with ndctl) fi # use specified testconfig file or default if [[( -n "${TEST_CONFIG_FILE}") && ( -f "$TEST_CONFIG_FILE" ) ]] then echo "Test config file: $TEST_CONFIG_FILE" RPMBUILD_OPTS+=(--define "_testconfig $TEST_CONFIG_FILE") else echo -e "Test config file $TEST_CONFIG_FILE does not exist.\n"\ "Default test config will be used." fi # run make check or not if [ "${BUILD_PACKAGE_CHECK}" == "n" ] then RPMBUILD_OPTS+=(--define "_skip_check 1") fi tar zcf $PACKAGE_TARBALL $PACKAGE_SOURCE # Create directory structure for rpmbuild mkdir -v BUILD SPECS echo "opts: ${RPMBUILD_OPTS[@]}" rpmbuild --define "_topdir `pwd`"\ --define "_rpmdir ${OUT_DIR}"\ --define "_srcrpmdir ${OUT_DIR}"\ -ta $PACKAGE_TARBALL \ ${RPMBUILD_OPTS[@]} echo "Building rpm packages done" exit 0
5,618
19.966418
86
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/pkg-common.sh
# SPDX-License-Identifier: BSD-3-Clause # Copyright 2014-2019, Intel Corporation # # pkg-common.sh - common functions and variables for building packages # export LC_ALL="C" function error() { echo -e "error: $@" } function check_dir() { if [ ! -d $1 ] then error "Directory '$1' does not exist." exit 1 fi } function check_file() { if [ ! -f $1 ] then error "File '$1' does not exist." exit 1 fi } function check_tool() { local tool=$1 if [ -z "$(which $tool 2>/dev/null)" ] then error "'${tool}' not installed or not in PATH" exit 1 fi } function get_version() { echo -n $1 | sed "s/-rc/~rc/" } function get_os() { if [ -f /etc/os-release ] then local OS=$(cat /etc/os-release | grep -m1 -o -P '(?<=NAME=).*($)') [[ "$OS" =~ SLES|openSUSE ]] && echo -n "SLES_like" || ([[ "$OS" =~ "Fedora"|"Red Hat"|"CentOS" ]] && echo -n "RHEL_like" || echo 1) else echo 1 fi } REGEX_DATE_AUTHOR="([a-zA-Z]{3} [a-zA-Z]{3} [0-9]{2} [0-9]{4})\s*(.*)" REGEX_MESSAGE_START="\s*\*\s*(.*)" REGEX_MESSAGE="\s*(\S.*)"
1,042
17.298246
79
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/build-dpkg.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2014-2020, Intel Corporation # # build-dpkg.sh - Script for building deb packages # set -e SCRIPT_DIR=$(dirname $0) source $SCRIPT_DIR/pkg-common.sh # # usage -- print usage message and exit # usage() { [ "$1" ] && echo Error: $1 cat >&2 <<EOF Usage: $0 [ -h ] -t version-tag -s source-dir -w working-dir -o output-dir [ -e build-experimental ] [ -c run-check ] [ -n with-ndctl ] [ -f testconfig-file ] [ -p build-libpmem2 ] -h print this help message -t version-tag source version tag -s source-dir source directory -w working-dir working directory -o output-dir output directory -e build-experimental build experimental packages -c run-check run package check -n with-ndctl build with libndctl -f testconfig-file custom testconfig.sh -p build-libpmem2 build libpmem2 packages EOF exit 1 } # # command-line argument processing... # args=`getopt he:c:r:n:t:d:s:w:o:f:p: $*` [ $? != 0 ] && usage set -- $args for arg do receivetype=auto case "$arg" in -e) EXPERIMENTAL="$2" shift 2 ;; -c) BUILD_PACKAGE_CHECK="$2" shift 2 ;; -f) TEST_CONFIG_FILE="$2" shift 2 ;; -r) BUILD_RPMEM="$2" shift 2 ;; -n) NDCTL_ENABLE="$2" shift 2 ;; -t) PACKAGE_VERSION_TAG="$2" shift 2 ;; -s) SOURCE="$2" shift 2 ;; -w) WORKING_DIR="$2" shift 2 ;; -o) OUT_DIR="$2" shift 2 ;; -p) PMEM2_INSTALL="$2" shift 2 ;; --) shift break ;; esac done # check for mandatory arguments if [ -z "$PACKAGE_VERSION_TAG" -o -z "$SOURCE" -o -z "$WORKING_DIR" -o -z "$OUT_DIR" ] then error "Mandatory arguments missing" usage fi PREFIX=usr LIB_DIR=$PREFIX/lib/$(dpkg-architecture -qDEB_HOST_MULTIARCH) INC_DIR=$PREFIX/include MAN1_DIR=$PREFIX/share/man/man1 MAN3_DIR=$PREFIX/share/man/man3 MAN5_DIR=$PREFIX/share/man/man5 MAN7_DIR=$PREFIX/share/man/man7 DOC_DIR=$PREFIX/share/doc if [ "$EXTRA_CFLAGS_RELEASE" = "" ]; then export EXTRA_CFLAGS_RELEASE="-ggdb -fno-omit-frame-pointer" fi LIBFABRIC_MIN_VERSION=1.4.2 NDCTL_MIN_VERSION=60.1 function convert_changelog() { while read line do if [[ $line =~ $REGEX_DATE_AUTHOR ]] then DATE="${BASH_REMATCH[1]}" AUTHOR="${BASH_REMATCH[2]}" echo " * ${DATE} ${AUTHOR}" elif [[ $line =~ $REGEX_MESSAGE_START ]] then MESSAGE="${BASH_REMATCH[1]}" echo " - ${MESSAGE}" elif [[ $line =~ $REGEX_MESSAGE ]] then MESSAGE="${BASH_REMATCH[1]}" echo " ${MESSAGE}" fi done < $1 } function rpmem_install_triggers_overrides() { cat << EOF > debian/librpmem.install $LIB_DIR/librpmem.so.* EOF cat << EOF > debian/librpmem.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug librpmem: package-name-doesnt-match-sonames EOF cat << EOF > debian/librpmem-dev.install $LIB_DIR/pmdk_debug/librpmem.a $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/librpmem.so $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/librpmem.so.* $LIB_DIR/pmdk_dbg/ $LIB_DIR/librpmem.so $LIB_DIR/pkgconfig/librpmem.pc $INC_DIR/librpmem.h $MAN7_DIR/librpmem.7 $MAN3_DIR/rpmem_*.3 EOF cat << EOF > debian/librpmem-dev.triggers interest man-db EOF cat << EOF > debian/librpmem-dev.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug # The following warnings are triggered by a bug in debhelper: # https://bugs.debian.org/204975 postinst-has-useless-call-to-ldconfig postrm-has-useless-call-to-ldconfig # We do not want to compile with -O2 for debug version hardening-no-fortify-functions $LIB_DIR/pmdk_dbg/* EOF cat << EOF > debian/rpmemd.install usr/bin/rpmemd $MAN1_DIR/rpmemd.1 EOF cat << EOF > debian/rpmemd.triggers interest man-db EOF cat << EOF > debian/rpmemd.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug EOF } function append_rpmem_control() { cat << EOF >> $CONTROL_FILE Package: librpmem Architecture: any Depends: \${shlibs:Depends}, \${misc:Depends} Description: Persistent Memory remote access support library librpmem provides low-level support for remote access to persistent memory (pmem) utilizing RDMA-capable RNICs. The library can be used to replicate remotely a memory region over RDMA protocol. It utilizes appropriate persistency mechanism based on remote node’s platform capabilities. The librpmem utilizes the ssh client to authenticate a user on remote node and for encryption of connection’s out-of-band configuration data. . This library is for applications that use remote persistent memory directly, without the help of any library-supplied transactions or memory allocation. Higher-level libraries that build on libpmem are available and are recommended for most applications. Package: librpmem-dev Section: libdevel Architecture: any Depends: librpmem (=\${binary:Version}), libpmem-dev, \${shlibs:Depends}, \${misc:Depends} Description: Development files for librpmem librpmem provides low-level support for remote access to persistent memory (pmem) utilizing RDMA-capable RNICs. . This package contains libraries and header files used for linking programs against librpmem. Package: rpmemd Section: misc Architecture: any Priority: optional Depends: \${shlibs:Depends}, \${misc:Depends} Description: rpmem daemon Daemon for Remote Persistent Memory support. EOF } function libpmem2_install_triggers_overrides() { cat << EOF > debian/libpmem2.install $LIB_DIR/libpmem2.so.* EOF cat << EOF > debian/libpmem2.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug libpmem2: package-name-doesnt-match-sonames EOF cat << EOF > debian/libpmem2-dev.install $LIB_DIR/pmdk_debug/libpmem2.a $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmem2.so $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmem2.so.* $LIB_DIR/pmdk_dbg/ $LIB_DIR/libpmem2.so $LIB_DIR/pkgconfig/libpmem2.pc $INC_DIR/libpmem2.h $MAN7_DIR/libpmem2.7 $MAN3_DIR/pmem2_*.3 EOF cat << EOF > debian/libpmem2-dev.triggers interest man-db EOF cat << EOF > debian/libpmem2-dev.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug # The following warnings are triggered by a bug in debhelper: # https://bugs.debian.org/204975 postinst-has-useless-call-to-ldconfig postrm-has-useless-call-to-ldconfig # We do not want to compile with -O2 for debug version hardening-no-fortify-functions $LIB_DIR/pmdk_dbg/* EOF } function append_libpmem2_control() { cat << EOF >> $CONTROL_FILE Package: libpmem2 Architecture: any Depends: \${shlibs:Depends}, \${misc:Depends} Description: Persistent Memory low level support library libpmem2 provides low level persistent memory support. In particular, support for the persistent memory instructions for flushing changes to pmem is provided. (EXPERIMENTAL) Package: libpmem2-dev Section: libdevel Architecture: any Depends: libpmem2 (=\${binary:Version}), \${shlibs:Depends}, \${misc:Depends} Description: Development files for libpmem2 libpmem2 provides low level persistent memory support. In particular, support for the persistent memory instructions for flushing changes to pmem is provided. (EXPERIMENTAL) EOF } function daxio_install_triggers_overrides() { cat << EOF > debian/daxio.install usr/bin/daxio $MAN1_DIR/daxio.1 EOF cat << EOF > debian/daxio.triggers interest man-db EOF cat << EOF > debian/daxio.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug EOF } function append_daxio_control() { cat << EOF >> $CONTROL_FILE Package: daxio Section: misc Architecture: any Priority: optional Depends: libpmem (=\${binary:Version}), \${shlibs:Depends}, \${misc:Depends} Description: dd-like tool to read/write to a devdax device The daxio utility performs I/O on Device DAX devices or zeroes a Device DAX device. Since the standard I/O APIs (read/write) cannot be used with Device DAX, data transfer is performed on a memory-mapped device. The daxio may be used to dump Device DAX data to a file, restore data from a backup copy, move/copy data to another device or to erase data from a device. EOF } if [ "${BUILD_PACKAGE_CHECK}" == "y" ] then CHECK_CMD=" override_dh_auto_test: dh_auto_test if [ -f $TEST_CONFIG_FILE ]; then\ cp $TEST_CONFIG_FILE src/test/testconfig.sh;\ else\ echo 'PMEM_FS_DIR=/tmp' > src/test/testconfig.sh; \ echo 'PMEM_FS_DIR_FORCE_PMEM=1' >> src/test/testconfig.sh; \ echo 'TEST_BUILD=\"debug nondebug\"' >> src/test/testconfig.sh; \ echo 'TEST_FS=\"pmem any none\"' >> src/test/testconfig.sh; \ fi make pcheck ${PCHECK_OPTS} " else CHECK_CMD=" override_dh_auto_test: " fi check_tool debuild check_tool dch check_file $SCRIPT_DIR/pkg-config.sh source $SCRIPT_DIR/pkg-config.sh PACKAGE_VERSION=$(get_version $PACKAGE_VERSION_TAG) PACKAGE_RELEASE=1 PACKAGE_SOURCE=${PACKAGE_NAME}-${PACKAGE_VERSION} PACKAGE_TARBALL_ORIG=${PACKAGE_NAME}_${PACKAGE_VERSION}.orig.tar.gz MAGIC_INSTALL=utils/magic-install.sh MAGIC_UNINSTALL=utils/magic-uninstall.sh CONTROL_FILE=debian/control [ -d $WORKING_DIR ] || mkdir $WORKING_DIR [ -d $OUT_DIR ] || mkdir $OUT_DIR OLD_DIR=$PWD cd $WORKING_DIR check_dir $SOURCE mv $SOURCE $PACKAGE_SOURCE tar zcf $PACKAGE_TARBALL_ORIG $PACKAGE_SOURCE cd $PACKAGE_SOURCE rm -rf debian mkdir debian # Generate compat file cat << EOF > debian/compat 9 EOF # Generate control file cat << EOF > $CONTROL_FILE Source: $PACKAGE_NAME Maintainer: $PACKAGE_MAINTAINER Section: libs Priority: optional Standards-version: 4.1.4 Build-Depends: debhelper (>= 9) Homepage: https://pmem.io/pmdk/ Package: libpmem Architecture: any Depends: \${shlibs:Depends}, \${misc:Depends} Description: Persistent Memory low level support library libpmem provides low level persistent memory support. In particular, support for the persistent memory instructions for flushing changes to pmem is provided. Package: libpmem-dev Section: libdevel Architecture: any Depends: libpmem (=\${binary:Version}), \${shlibs:Depends}, \${misc:Depends} Description: Development files for libpmem libpmem provides low level persistent memory support. In particular, support for the persistent memory instructions for flushing changes to pmem is provided. Package: libpmemblk Architecture: any Depends: libpmem (=\${binary:Version}), \${shlibs:Depends}, \${misc:Depends} Description: Persistent Memory block array support library libpmemblk implements a pmem-resident array of blocks, all the same size, where a block is updated atomically with respect to power failure or program interruption (no torn blocks). Package: libpmemblk-dev Section: libdevel Architecture: any Depends: libpmemblk (=\${binary:Version}), libpmem-dev, \${shlibs:Depends}, \${misc:Depends} Description: Development files for libpmemblk libpmemblk implements a pmem-resident array of blocks, all the same size, where a block is updated atomically with respect to power failure or program interruption (no torn blocks). Package: libpmemlog Architecture: any Depends: libpmem (=\${binary:Version}), \${shlibs:Depends}, \${misc:Depends} Description: Persistent Memory log file support library libpmemlog implements a pmem-resident log file. Package: libpmemlog-dev Section: libdevel Architecture: any Depends: libpmemlog (=\${binary:Version}), libpmem-dev, \${shlibs:Depends}, \${misc:Depends} Description: Development files for libpmemlog libpmemlog implements a pmem-resident log file. Package: libpmemobj Architecture: any Depends: libpmem (=\${binary:Version}), \${shlibs:Depends}, \${misc:Depends} Description: Persistent Memory object store support library libpmemobj turns a persistent memory file into a flexible object store, supporting transactions, memory management, locking, lists, and a number of other features. Package: libpmemobj-dev Section: libdevel Architecture: any Depends: libpmemobj (=\${binary:Version}), libpmem-dev, \${shlibs:Depends}, \${misc:Depends} Description: Development files for libpmemobj libpmemobj turns a persistent memory file into a flexible object store, supporting transactions, memory management, locking, lists, and a number of other features. . This package contains libraries and header files used for linking programs against libpmemobj. Package: libpmempool Architecture: any Depends: libpmem (=\${binary:Version}), \${shlibs:Depends}, \${misc:Depends} Description: Persistent Memory pool management support library libpmempool provides a set of utilities for management, diagnostics and repair of persistent memory pools. A pool in this context means a pmemobj pool, pmemblk pool, pmemlog pool or BTT layout, independent of the underlying storage. The libpmempool is for applications that need high reliability or built-in troubleshooting. It may be useful for testing and debugging purposes also. Package: libpmempool-dev Section: libdevel Architecture: any Depends: libpmempool (=\${binary:Version}), libpmem-dev, \${shlibs:Depends}, \${misc:Depends} Description: Development files for libpmempool libpmempool provides a set of utilities for management, diagnostics and repair of persistent memory pools. . This package contains libraries and header files used for linking programs against libpmempool. Package: $PACKAGE_NAME-dbg Section: debug Priority: optional Architecture: any Depends: libpmem (=\${binary:Version}), libpmemblk (=\${binary:Version}), libpmemlog (=\${binary:Version}), libpmemobj (=\${binary:Version}), libpmempool (=\${binary:Version}), \${misc:Depends} Description: Debug symbols for PMDK libraries Debug symbols for all PMDK libraries. Package: pmempool Section: misc Architecture: any Priority: optional Depends: \${shlibs:Depends}, \${misc:Depends} Description: utility for management and off-line analysis of PMDK memory pools This utility is a standalone tool that manages Persistent Memory pools created by PMDK libraries. It provides a set of utilities for administration and diagnostics of Persistent Memory pools. Pmempool may be useful for troubleshooting by system administrators and users of the applications based on PMDK libraries. Package: pmreorder Section: misc Architecture: any Priority: optional Depends: \${shlibs:Depends}, \${misc:Depends} Description: tool to parse and replay pmemcheck logs Pmreorder is tool that parses and replays log of operations collected by pmemcheck -- a atandalone tool which is a collection of python scripts designed to parse and replay operations logged by pmemcheck - a persistent memory checking tool. Pmreorder performs the store reordering between persistent memory barriers - a sequence of flush-fence operations. It uses a consistency checking routine provided in the command line options to check whether files are in a consistent state. EOF cp LICENSE debian/copyright if [ -n "$NDCTL_ENABLE" ]; then pass_ndctl_enable="NDCTL_ENABLE=$NDCTL_ENABLE" else pass_ndctl_enable="" fi cat << EOF > debian/rules #!/usr/bin/make -f #export DH_VERBOSE=1 %: dh \$@ override_dh_strip: dh_strip --dbg-package=$PACKAGE_NAME-dbg override_dh_auto_build: dh_auto_build -- EXPERIMENTAL=${EXPERIMENTAL} prefix=/$PREFIX libdir=/$LIB_DIR includedir=/$INC_DIR docdir=/$DOC_DIR man1dir=/$MAN1_DIR man3dir=/$MAN3_DIR man5dir=/$MAN5_DIR man7dir=/$MAN7_DIR sysconfdir=/etc bashcompdir=/usr/share/bash-completion/completions NORPATH=1 ${pass_ndctl_enable} SRCVERSION=$SRCVERSION PMEM2_INSTALL=${PMEM2_INSTALL} override_dh_auto_install: dh_auto_install -- EXPERIMENTAL=${EXPERIMENTAL} prefix=/$PREFIX libdir=/$LIB_DIR includedir=/$INC_DIR docdir=/$DOC_DIR man1dir=/$MAN1_DIR man3dir=/$MAN3_DIR man5dir=/$MAN5_DIR man7dir=/$MAN7_DIR sysconfdir=/etc bashcompdir=/usr/share/bash-completion/completions NORPATH=1 ${pass_ndctl_enable} SRCVERSION=$SRCVERSION PMEM2_INSTALL=${PMEM2_INSTALL} find -path './debian/*usr/share/man/man*/*.gz' -exec gunzip {} \; override_dh_install: mkdir -p debian/tmp/usr/share/pmdk/ cp utils/pmdk.magic debian/tmp/usr/share/pmdk/ dh_install ${CHECK_CMD} EOF chmod +x debian/rules mkdir debian/source ITP_BUG_EXCUSE="# This is our first package but we do not want to upload it yet. # Please refer to Debian Developer's Reference section 5.1 (New packages) for details: # https://www.debian.org/doc/manuals/developers-reference/pkgs.html#newpackage" cat << EOF > debian/source/format 3.0 (quilt) EOF cat << EOF > debian/libpmem.install $LIB_DIR/libpmem.so.* usr/share/pmdk/pmdk.magic $MAN5_DIR/poolset.5 EOF cat $MAGIC_INSTALL > debian/libpmem.postinst sed -i '1s/.*/\#\!\/bin\/bash/' debian/libpmem.postinst echo $'\n#DEBHELPER#\n' >> debian/libpmem.postinst cat $MAGIC_UNINSTALL > debian/libpmem.prerm sed -i '1s/.*/\#\!\/bin\/bash/' debian/libpmem.prerm echo $'\n#DEBHELPER#\n' >> debian/libpmem.prerm cat << EOF > debian/libpmem.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug libpmem: package-name-doesnt-match-sonames EOF cat << EOF > debian/libpmem-dev.install $LIB_DIR/pmdk_debug/libpmem.a $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmem.so $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmem.so.* $LIB_DIR/pmdk_dbg/ $LIB_DIR/libpmem.so $LIB_DIR/pkgconfig/libpmem.pc $INC_DIR/libpmem.h $MAN7_DIR/libpmem.7 $MAN3_DIR/pmem_*.3 EOF cat << EOF > debian/libpmem-dev.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug # The following warnings are triggered by a bug in debhelper: # https://bugs.debian.org/204975 postinst-has-useless-call-to-ldconfig postrm-has-useless-call-to-ldconfig # We do not want to compile with -O2 for debug version hardening-no-fortify-functions $LIB_DIR/pmdk_dbg/* # pmdk provides second set of libraries for debugging. # These are in /usr/lib/$arch/pmdk_dbg/, but still trigger ldconfig. # Related issue: https://github.com/pmem/issues/issues/841 libpmem-dev: package-has-unnecessary-activation-of-ldconfig-trigger EOF cat << EOF > debian/libpmemblk.install $LIB_DIR/libpmemblk.so.* EOF cat << EOF > debian/libpmemblk.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug libpmemblk: package-name-doesnt-match-sonames EOF cat << EOF > debian/libpmemblk-dev.install $LIB_DIR/pmdk_debug/libpmemblk.a $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmemblk.so $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmemblk.so.* $LIB_DIR/pmdk_dbg/ $LIB_DIR/libpmemblk.so $LIB_DIR/pkgconfig/libpmemblk.pc $INC_DIR/libpmemblk.h $MAN7_DIR/libpmemblk.7 $MAN3_DIR/pmemblk_*.3 EOF cat << EOF > debian/libpmemblk-dev.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug # The following warnings are triggered by a bug in debhelper: # https://bugs.debian.org/204975 postinst-has-useless-call-to-ldconfig postrm-has-useless-call-to-ldconfig # We do not want to compile with -O2 for debug version hardening-no-fortify-functions $LIB_DIR/pmdk_dbg/* # pmdk provides second set of libraries for debugging. # These are in /usr/lib/$arch/pmdk_dbg/, but still trigger ldconfig. # Related issue: https://github.com/pmem/issues/issues/841 libpmemblk-dev: package-has-unnecessary-activation-of-ldconfig-trigger EOF cat << EOF > debian/libpmemlog.install $LIB_DIR/libpmemlog.so.* EOF cat << EOF > debian/libpmemlog.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug libpmemlog: package-name-doesnt-match-sonames EOF cat << EOF > debian/libpmemlog-dev.install $LIB_DIR/pmdk_debug/libpmemlog.a $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmemlog.so $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmemlog.so.* $LIB_DIR/pmdk_dbg/ $LIB_DIR/libpmemlog.so $LIB_DIR/pkgconfig/libpmemlog.pc $INC_DIR/libpmemlog.h $MAN7_DIR/libpmemlog.7 $MAN3_DIR/pmemlog_*.3 EOF cat << EOF > debian/libpmemlog-dev.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug # The following warnings are triggered by a bug in debhelper: # https://bugs.debian.org/204975 postinst-has-useless-call-to-ldconfig postrm-has-useless-call-to-ldconfig # We do not want to compile with -O2 for debug version hardening-no-fortify-functions $LIB_DIR/pmdk_dbg/* # pmdk provides second set of libraries for debugging. # These are in /usr/lib/$arch/pmdk_dbg/, but still trigger ldconfig. # Related issue: https://github.com/pmem/issues/issues/841 libpmemlog-dev: package-has-unnecessary-activation-of-ldconfig-trigger EOF cat << EOF > debian/libpmemobj.install $LIB_DIR/libpmemobj.so.* EOF cat << EOF > debian/libpmemobj.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug libpmemobj: package-name-doesnt-match-sonames EOF cat << EOF > debian/libpmemobj-dev.install $LIB_DIR/pmdk_debug/libpmemobj.a $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmemobj.so $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmemobj.so.* $LIB_DIR/pmdk_dbg/ $LIB_DIR/libpmemobj.so $LIB_DIR/pkgconfig/libpmemobj.pc $INC_DIR/libpmemobj.h $INC_DIR/libpmemobj/*.h $MAN7_DIR/libpmemobj.7 $MAN3_DIR/pmemobj_*.3 $MAN3_DIR/pobj_*.3 $MAN3_DIR/oid_*.3 $MAN3_DIR/toid*.3 $MAN3_DIR/direct_*.3 $MAN3_DIR/d_r*.3 $MAN3_DIR/tx_*.3 EOF cat << EOF > debian/libpmemobj-dev.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug # The following warnings are triggered by a bug in debhelper: # https://bugs.debian.org/204975 postinst-has-useless-call-to-ldconfig postrm-has-useless-call-to-ldconfig # We do not want to compile with -O2 for debug version hardening-no-fortify-functions $LIB_DIR/pmdk_dbg/* # pmdk provides second set of libraries for debugging. # These are in /usr/lib/$arch/pmdk_dbg/, but still trigger ldconfig. # Related issue: https://github.com/pmem/issues/issues/841 libpmemobj-dev: package-has-unnecessary-activation-of-ldconfig-trigger EOF cat << EOF > debian/libpmempool.install $LIB_DIR/libpmempool.so.* EOF cat << EOF > debian/libpmempool.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug libpmempool: package-name-doesnt-match-sonames EOF cat << EOF > debian/libpmempool-dev.install $LIB_DIR/pmdk_debug/libpmempool.a $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmempool.so $LIB_DIR/pmdk_dbg/ $LIB_DIR/pmdk_debug/libpmempool.so.* $LIB_DIR/pmdk_dbg/ $LIB_DIR/libpmempool.so $LIB_DIR/pkgconfig/libpmempool.pc $INC_DIR/libpmempool.h $MAN7_DIR/libpmempool.7 $MAN3_DIR/pmempool_*.3 EOF cat << EOF > debian/libpmempool-dev.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug # The following warnings are triggered by a bug in debhelper: # https://bugs.debian.org/204975 postinst-has-useless-call-to-ldconfig postrm-has-useless-call-to-ldconfig # We do not want to compile with -O2 for debug version hardening-no-fortify-functions $LIB_DIR/pmdk_dbg/* # pmdk provides second set of libraries for debugging. # These are in /usr/lib/$arch/pmdk_dbg/, but still trigger ldconfig. # Related issue: https://github.com/pmem/issues/issues/841 libpmempool-dev: package-has-unnecessary-activation-of-ldconfig-trigger EOF cat << EOF > debian/$PACKAGE_NAME-dbg.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug EOF cat << EOF > debian/pmempool.install usr/bin/pmempool $MAN1_DIR/pmempool.1 $MAN1_DIR/pmempool-*.1 usr/share/bash-completion/completions/pmempool EOF cat << EOF > debian/pmempool.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug EOF cat << EOF > debian/pmreorder.install usr/bin/pmreorder usr/share/pmreorder/*.py $MAN1_DIR/pmreorder.1 EOF cat << EOF > debian/pmreorder.lintian-overrides $ITP_BUG_EXCUSE new-package-should-close-itp-bug EOF # librpmem & rpmemd if [ "${BUILD_RPMEM}" = "y" -a "${RPMEM_DPKG}" = "y" ] then append_rpmem_control; rpmem_install_triggers_overrides; fi # libpmem2 if [ "${PMEM2_INSTALL}" == "y" ] then append_libpmem2_control; libpmem2_install_triggers_overrides; fi # daxio if [ "${NDCTL_ENABLE}" != "n" ] then append_daxio_control; daxio_install_triggers_overrides; fi # Convert ChangeLog to debian format CHANGELOG_TMP=changelog.tmp dch --create --empty --package $PACKAGE_NAME -v $PACKAGE_VERSION-$PACKAGE_RELEASE -M -c $CHANGELOG_TMP touch debian/changelog head -n1 $CHANGELOG_TMP >> debian/changelog echo "" >> debian/changelog convert_changelog ChangeLog >> debian/changelog echo "" >> debian/changelog tail -n1 $CHANGELOG_TMP >> debian/changelog rm $CHANGELOG_TMP # This is our first release but we do debuild --preserve-envvar=EXTRA_CFLAGS_RELEASE \ --preserve-envvar=EXTRA_CFLAGS_DEBUG \ --preserve-envvar=EXTRA_CFLAGS \ --preserve-envvar=EXTRA_CXXFLAGS \ --preserve-envvar=EXTRA_LDFLAGS \ --preserve-envvar=NDCTL_ENABLE \ -us -uc -b cd $OLD_DIR find $WORKING_DIR -name "*.deb"\ -or -name "*.dsc"\ -or -name "*.changes"\ -or -name "*.orig.tar.gz"\ -or -name "*.debian.tar.gz" | while read FILE do mv -v $FILE $OUT_DIR/ done exit 0
24,325
27.925089
347
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/check-os.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2017-2019, Intel Corporation # # Used to check if there are no banned functions in .o file # # usage: ./check-os.sh [os.h path] [.o file] [.c file] EXCLUDE="os_posix|os_thread_posix" if [[ $2 =~ $EXCLUDE ]]; then echo "skip $2" exit 0 fi symbols=$(nm --demangle --undefined-only --format=posix $2 | sed 's/ U *//g') functions=$(cat $1 | tr '\n' '|') functions=${functions%?} # remove trailing | character out=$( for sym in $symbols do grep -wE $functions <<<"$sym" done | sed 's/$/\(\)/g') [[ ! -z $out ]] && echo -e "`pwd`/$3:1: non wrapped function(s):\n$out\nplease use os wrappers" && rm -f $2 && # remove .o file as it don't match requirements exit 1 exit 0
750
23.225806
80
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/magic-uninstall.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2014-2017, Intel Corporation # # magic-uninstall.sh -- Script for uninstalling magic script # set -e HDR_LOCAL=$(grep "File: pmdk" /etc/magic) HDR_PKG=$(grep "File: pmdk" /usr/share/pmdk/pmdk.magic) if [[ $HDR_LOCAL == $HDR_PKG ]] then echo "Removing PMDK magic from /etc/magic" HDR_LINE=$(grep -n "File: pmdk" /etc/magic | cut -f1 -d:) HDR_PKG_LINE=$(grep -n "File: pmdk" /usr/share/pmdk/pmdk.magic | cut -f1 -d:) HDR_LINES=$(cat /usr/share/pmdk/pmdk.magic | wc -l) HDR_FIRST=$(($HDR_LINE - $HDR_PKG_LINE + 1)) HDR_LAST=$(($HDR_FIRST + $HDR_LINES)) sed -i "${HDR_FIRST},${HDR_LAST}d" /etc/magic fi
680
29.954545
78
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/pkg-config.sh
# SPDX-License-Identifier: BSD-3-Clause # Copyright 2014-2020, Intel Corporation # Name of package PACKAGE_NAME="pmdk" # Name and email of package maintainer PACKAGE_MAINTAINER="Piotr Balcer <[email protected]>" # Brief description of the package PACKAGE_SUMMARY="Persistent Memory Development Kit" # Full description of the package PACKAGE_DESCRIPTION="The collection of libraries and utilities for Persistent Memory Programming" # Website PACKAGE_URL="https://pmem.io/pmdk"
486
26.055556
97
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/style_check.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2020, Intel Corporation # # utils/style_check.sh -- common style checking script # set -e ARGS=("$@") CSTYLE_ARGS=() CLANG_ARGS=() FLAKE8_ARGS=() CHECK_TYPE=$1 [ -z "$clang_format_bin" ] && which clang-format-9 >/dev/null && clang_format_bin=clang-format-9 [ -z "$clang_format_bin" ] && which clang-format >/dev/null && clang_format_bin=clang-format [ -z "$clang_format_bin" ] && clang_format_bin=clang-format # # print script usage # function usage() { echo "$0 <check|format> [C/C++ files]" } # # require clang-format version 9.0 # function check_clang_version() { set +e which ${clang_format_bin} &> /dev/null && ${clang_format_bin} --version |\ grep "version 9\.0"\ &> /dev/null if [ $? -ne 0 ]; then echo "SKIP: requires clang-format version 9.0" exit 0 fi set -e } # # run old cstyle check # function run_cstyle() { if [ $# -eq 0 ]; then return fi ${cstyle_bin} -pP $@ } # # generate diff with clang-format rules # function run_clang_check() { if [ $# -eq 0 ]; then return fi check_clang_version for file in $@ do LINES=$(${clang_format_bin} -style=file $file |\ git diff --no-index $file - | wc -l) if [ $LINES -ne 0 ]; then ${clang_format_bin} -style=file $file | git diff --no-index $file - fi done } # # in-place format according to clang-format rules # function run_clang_format() { if [ $# -eq 0 ]; then return fi check_clang_version ${clang_format_bin} -style=file -i $@ } function run_flake8() { if [ $# -eq 0 ]; then return fi ${flake8_bin} --exclude=testconfig.py,envconfig.py $@ } for ((i=1; i<$#; i++)) { IGNORE="$(dirname ${ARGS[$i]})/.cstyleignore" if [ -e $IGNORE ]; then if grep -q ${ARGS[$i]} $IGNORE ; then echo "SKIP ${ARGS[$i]}" continue fi fi case ${ARGS[$i]} in *.[ch]pp) CLANG_ARGS+="${ARGS[$i]} " ;; *.[ch]) CSTYLE_ARGS+="${ARGS[$i]} " ;; *.py) FLAKE8_ARGS+="${ARGS[$i]} " ;; *) echo "Unknown argument" exit 1 ;; esac } case $CHECK_TYPE in check) run_cstyle ${CSTYLE_ARGS} run_clang_check ${CLANG_ARGS} run_flake8 ${FLAKE8_ARGS} ;; format) run_clang_format ${CLANG_ARGS} ;; *) echo "Invalid parameters" usage exit 1 ;; esac
2,274
15.485507
75
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/version.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2017-2020, Intel Corporation # # utils/version.sh -- determine project's version # set -e if [ -f "$1/VERSION" ]; then cat "$1/VERSION" exit 0 fi if [ -f $1/GIT_VERSION ]; then echo -n "\$Format:%h\$" | cmp -s $1/GIT_VERSION - && true if [ $? -eq 0 ]; then PARSE_GIT_VERSION=0 else PARSE_GIT_VERSION=1 fi else PARSE_GIT_VERSION=0 fi LATEST_RELEASE=$(cat $1/ChangeLog | grep "* Version" | cut -d " " -f 3 | sort -rd | head -n1) if [ $PARSE_GIT_VERSION -eq 1 ]; then GIT_VERSION_HASH=$(cat $1/GIT_VERSION) if [ -n "$GIT_VERSION_HASH" ]; then echo "$LATEST_RELEASE+git.$GIT_VERSION_HASH" exit 0 fi fi cd "$1" GIT_DESCRIBE=$(git describe 2>/dev/null) && true if [ -n "$GIT_DESCRIBE" ]; then # 1.5-19-gb8f78a329 -> 1.5+git19.gb8f78a329 # 1.5-rc1-19-gb8f78a329 -> 1.5-rc1+git19.gb8f78a329 echo "$GIT_DESCRIBE" | sed "s/\([0-9.]*\)-rc\([0-9]*\)-\([0-9]*\)-\([0-9a-g]*\)/\1-rc\2+git\3.\4/" | sed "s/\([0-9.]*\)-\([0-9]*\)-\([0-9a-g]*\)/\1+git\2.\3/" exit 0 fi # try commit it, git describe can fail when there are no tags (e.g. with shallow clone, like on Travis) GIT_COMMIT=$(git log -1 --format=%h) && true if [ -n "$GIT_COMMIT" ]; then echo "$LATEST_RELEASE+git.$GIT_COMMIT" exit 0 fi cd - >/dev/null # If nothing works, try to get version from directory name VER=$(basename `realpath "$1"` | sed 's/pmdk[-]*\([0-9a-z.+-]*\).*/\1/') if [ -n "$VER" ]; then echo "$VER" exit 0 fi exit 1
1,489
22.650794
159
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/check_license/file-exceptions.sh
#!/bin/sh -e # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2020, Intel Corporation # file-exceptions.sh - filter out files not checked for copyright and license grep -v -E -e '/queue.h$' -e '/getopt.h$' -e '/getopt.c$' -e 'src/core/valgrind/' -e '/testconfig\...$'
278
33.875
103
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/check_license/check-headers.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2020, Intel Corporation # check-headers.sh - check copyright and license in source files SELF=$0 function usage() { echo "Usage: $SELF <source_root_path> <license_tag> [-h|-v|-a]" echo " -h, --help this help message" echo " -v, --verbose verbose mode" echo " -a, --all check all files (only modified files are checked by default)" } if [ "$#" -lt 2 ]; then usage >&2 exit 2 fi SOURCE_ROOT=$1 shift LICENSE=$1 shift PATTERN=`mktemp` TMP=`mktemp` TMP2=`mktemp` TEMPFILE=`mktemp` rm -f $PATTERN $TMP $TMP2 if [ "$1" == "-h" -o "$1" == "--help" ]; then usage exit 0 fi export GIT="git -C ${SOURCE_ROOT}" $GIT rev-parse || exit 1 if [ -f $SOURCE_ROOT/.git/shallow ]; then SHALLOW_CLONE=1 echo echo "Warning: This is a shallow clone. Checking dates in copyright headers" echo " will be skipped in case of files that have no history." echo else SHALLOW_CLONE=0 fi VERBOSE=0 CHECK_ALL=0 while [ "$1" != "" ]; do case $1 in -v|--verbose) VERBOSE=1 ;; -a|--all) CHECK_ALL=1 ;; esac shift done if [ $CHECK_ALL -eq 0 ]; then CURRENT_COMMIT=$($GIT log --pretty=%H -1) MERGE_BASE=$($GIT merge-base HEAD origin/master 2>/dev/null) [ -z $MERGE_BASE ] && \ MERGE_BASE=$($GIT log --pretty="%cN:%H" | grep GitHub | head -n1 | cut -d: -f2) [ -z $MERGE_BASE -o "$CURRENT_COMMIT" = "$MERGE_BASE" ] && \ CHECK_ALL=1 fi if [ $CHECK_ALL -eq 1 ]; then echo "Checking copyright headers of all files..." GIT_COMMAND="ls-tree -r --name-only HEAD" else if [ $VERBOSE -eq 1 ]; then echo echo "Warning: will check copyright headers of modified files only," echo " in order to check all files issue the following command:" echo " $ $SELF <source_root_path> <license_tag> -a" echo " (e.g.: $ $SELF $SOURCE_ROOT $LICENSE -a)" echo fi echo "Checking copyright headers of modified files only..." GIT_COMMAND="diff --name-only $MERGE_BASE $CURRENT_COMMIT" fi FILES=$($GIT $GIT_COMMAND | ${SOURCE_ROOT}/utils/check_license/file-exceptions.sh | \ grep -E -e '*\.[chs]$' -e '*\.[ch]pp$' -e '*\.sh$' \ -e '*\.py$' -e '*\.link$' -e 'Makefile*' -e 'TEST*' \ -e '/common.inc$' -e '/match$' -e '/check_whitespace$' \ -e 'LICENSE$' -e 'CMakeLists.txt$' -e '*\.cmake$' | \ xargs) RV=0 for file in $FILES ; do # The src_path is a path which should be used in every command except git. # git is called with -C flag so filepaths should be relative to SOURCE_ROOT src_path="${SOURCE_ROOT}/$file" [ ! -f $src_path ] && continue # ensure that file is UTF-8 encoded ENCODING=`file -b --mime-encoding $src_path` iconv -f $ENCODING -t "UTF-8" $src_path > $TEMPFILE if ! grep -q "SPDX-License-Identifier: $LICENSE" $src_path; then echo >&2 "$src_path:1: no $LICENSE SPDX tag found " RV=1 fi if [ $SHALLOW_CLONE -eq 0 ]; then $GIT log --no-merges --format="%ai %aE" -- $file | sort > $TMP else # mark the grafted commits (commits with no parents) $GIT log --no-merges --format="%ai %aE grafted-%p-commit" -- $file | sort > $TMP fi # skip checking dates for non-Intel commits [[ ! $(tail -n1 $TMP) =~ "@intel.com" ]] && continue # skip checking dates for new files [ $(cat $TMP | wc -l) -le 1 ] && continue # grep out the grafted commits (commits with no parents) # and skip checking dates for non-Intel commits grep -v -e "grafted--commit" $TMP | grep -e "@intel.com" > $TMP2 [ $(cat $TMP2 | wc -l) -eq 0 ] && continue FIRST=`head -n1 $TMP2` LAST=` tail -n1 $TMP2` YEARS=`sed ' /Copyright [0-9-]\+.*, Intel Corporation/!d s/.*Copyright \([0-9]\+\)-\([0-9]\+\),.*/\1-\2/ s/.*Copyright \([0-9]\+\),.*/\1-\1/' $src_path` if [ -z "$YEARS" ]; then echo >&2 "$src_path:1: No copyright years found" RV=1 continue fi HEADER_FIRST=`echo $YEARS | cut -d"-" -f1` HEADER_LAST=` echo $YEARS | cut -d"-" -f2` COMMIT_FIRST=`echo $FIRST | cut -d"-" -f1` COMMIT_LAST=` echo $LAST | cut -d"-" -f1` if [ "$COMMIT_FIRST" != "" -a "$COMMIT_LAST" != "" ]; then if [ $HEADER_LAST -lt $COMMIT_LAST ]; then if [ $HEADER_FIRST -lt $COMMIT_FIRST ]; then COMMIT_FIRST=$HEADER_FIRST fi COMMIT_LAST=`date +%G` if [ $COMMIT_FIRST -eq $COMMIT_LAST ]; then NEW=$COMMIT_LAST else NEW=$COMMIT_FIRST-$COMMIT_LAST fi echo "$file:1: error: wrong copyright date: (is: $YEARS, should be: $NEW)" >&2 RV=1 fi else echo "$file:1: unknown commit dates" >&2 RV=1 fi done rm -f $TMP $TMP2 $TEMPFILE $(dirname "$0")/check-ms-license.pl $FILES # check if error found if [ $RV -eq 0 ]; then echo "Copyright headers are OK." else echo "Error(s) in copyright headers found!" >&2 fi exit $RV
4,703
25.426966
87
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/docker/build-local.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2017-2020, Intel Corporation # # build-local.sh - runs a Docker container from a Docker image with environment # prepared for building PMDK project and starts building PMDK # # this script is for building PMDK locally (not on Travis) # # Notes: # - run this script from its location or set the variable 'HOST_WORKDIR' to # where the root of the PMDK project is on the host machine, # - set variables 'OS' and 'OS_VER' properly to a system you want to build PMDK # on (for proper values take a look on the list of Dockerfiles at the # utils/docker/images directory), eg. OS=ubuntu, OS_VER=16.04. # - set 'KEEP_TEST_CONFIG' variable to 1 if you do not want the tests to be # reconfigured (your current test configuration will be preserved and used) # - tests with Device Dax are not supported by pcheck yet, so do not provide # these devices in your configuration # set -e # Environment variables that can be customized (default values are after dash): export KEEP_CONTAINER=${KEEP_CONTAINER:-0} export KEEP_TEST_CONFIG=${KEEP_TEST_CONFIG:-0} export TEST_BUILD=${TEST_BUILD:-all} export REMOTE_TESTS=${REMOTE_TESTS:-1} export MAKE_PKG=${MAKE_PKG:-0} export EXTRA_CFLAGS=${EXTRA_CFLAGS} export EXTRA_CXXFLAGS=${EXTRA_CXXFLAGS:-} export PMDK_CC=${PMDK_CC:-gcc} export PMDK_CXX=${PMDK_CXX:-g++} export EXPERIMENTAL=${EXPERIMENTAL:-n} export VALGRIND=${VALGRIND:-1} export DOCKERHUB_REPO=${DOCKERHUB_REPO:-pmem/pmdk} export GITHUB_REPO=${GITHUB_REPO:-pmem/pmdk} if [[ -z "$OS" || -z "$OS_VER" ]]; then echo "ERROR: The variables OS and OS_VER have to be set " \ "(eg. OS=ubuntu, OS_VER=16.04)." exit 1 fi if [[ -z "$HOST_WORKDIR" ]]; then HOST_WORKDIR=$(readlink -f ../..) fi if [[ "$KEEP_CONTAINER" != "1" ]]; then RM_SETTING=" --rm" fi imageName=${DOCKERHUB_REPO}:1.9-${OS}-${OS_VER}-${CI_CPU_ARCH} containerName=pmdk-${OS}-${OS_VER} if [[ $MAKE_PKG -eq 1 ]] ; then command="./run-build-package.sh" else command="./run-build.sh" fi if [ -n "$DNS_SERVER" ]; then DNS_SETTING=" --dns=$DNS_SERVER "; fi if [ -z "$NDCTL_ENABLE" ]; then ndctl_enable=; else ndctl_enable="--env NDCTL_ENABLE=$NDCTL_ENABLE"; fi WORKDIR=/pmdk SCRIPTSDIR=$WORKDIR/utils/docker # Check if we are running on a CI (Travis or GitHub Actions) [ -n "$GITHUB_ACTIONS" -o -n "$TRAVIS" ] && CI_RUN="YES" || CI_RUN="NO" echo Building ${OS}-${OS_VER} # Run a container with # - environment variables set (--env) # - host directory containing PMDK source mounted (-v) # - a tmpfs /tmp with the necessary size and permissions (--tmpfs)* # - working directory set (-w) # # * We need a tmpfs /tmp inside docker but we cannot run it with --privileged # and do it from inside, so we do using this docker-run option. # By default --tmpfs add nosuid,nodev,noexec to the mount flags, we don't # want that and just to make sure we add the usually default rw,relatime just # in case docker change the defaults. docker run --name=$containerName -ti \ $RM_SETTING \ $DNS_SETTING \ --env http_proxy=$http_proxy \ --env https_proxy=$https_proxy \ --env CC=$PMDK_CC \ --env CXX=$PMDK_CXX \ --env VALGRIND=$VALGRIND \ --env EXTRA_CFLAGS=$EXTRA_CFLAGS \ --env EXTRA_CXXFLAGS=$EXTRA_CXXFLAGS \ --env EXTRA_LDFLAGS=$EXTRA_LDFLAGS \ --env REMOTE_TESTS=$REMOTE_TESTS \ --env CONFIGURE_TESTS=$CONFIGURE_TESTS \ --env TEST_BUILD=$TEST_BUILD \ --env WORKDIR=$WORKDIR \ --env EXPERIMENTAL=$EXPERIMENTAL \ --env SCRIPTSDIR=$SCRIPTSDIR \ --env KEEP_TEST_CONFIG=$KEEP_TEST_CONFIG \ --env CI_RUN=$CI_RUN \ --env BLACKLIST_FILE=$BLACKLIST_FILE \ $ndctl_enable \ --tmpfs /tmp:rw,relatime,suid,dev,exec,size=6G \ -v $HOST_WORKDIR:$WORKDIR \ -v /etc/localtime:/etc/localtime \ $DAX_SETTING \ -w $SCRIPTSDIR \ $imageName $command
3,812
33.044643
103
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/docker/run-build.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2020, Intel Corporation # # run-build.sh - is called inside a Docker container; prepares the environment # and starts a build of PMDK project. # set -e # Prepare build environment ./prepare-for-build.sh # Build all and run tests cd $WORKDIR if [ "$SRC_CHECKERS" != "0" ]; then make -j$(nproc) check-license make -j$(nproc) cstyle fi make -j$(nproc) make -j$(nproc) test # do not change -j2 to -j$(nproc) in case of tests (make check/pycheck) make -j2 pcheck TEST_BUILD=$TEST_BUILD # do not change -j2 to -j$(nproc) in case of tests (make check/pycheck) make -j2 pycheck make -j$(nproc) DESTDIR=/tmp source # Create PR with generated docs if [[ "$AUTO_DOC_UPDATE" == "1" ]]; then echo "Running auto doc update" ./utils/docker/run-doc-update.sh fi
848
23.257143
78
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/docker/prepare-for-build.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2020, Intel Corporation # # prepare-for-build.sh - is called inside a Docker container; prepares # the environment inside a Docker container for # running build of PMDK project. # set -e # This should be run only on CIs if [ "$CI_RUN" == "YES" ]; then # Make sure $WORKDIR has correct access rights # - set them to the current UID and GID echo $USERPASS | sudo -S chown -R $(id -u).$(id -g) $WORKDIR fi # Configure tests (e.g. ssh for remote tests) unless the current configuration # should be preserved KEEP_TEST_CONFIG=${KEEP_TEST_CONFIG:-0} if [[ "$KEEP_TEST_CONFIG" == 0 ]]; then ./configure-tests.sh fi
739
27.461538
78
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/docker/set-ci-vars.sh
#!/usr/bin/env bash # # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020, Intel Corporation # # set-ci-vars.sh -- set CI variables common for both: # Travis and GitHub Actions CIs # set -e function get_commit_range_from_last_merge { # get commit id of the last merge LAST_MERGE=$(git log --merges --pretty=%H -1) LAST_COMMIT=$(git log --pretty=%H -1) if [ "$LAST_MERGE" == "$LAST_COMMIT" ]; then # GitHub Actions commits its own merge in case of pull requests # so the first merge commit has to be skipped. LAST_MERGE=$(git log --merges --pretty=%H -2 | tail -n1) fi if [ "$LAST_MERGE" == "" ]; then # possible in case of shallow clones # or new repos with no merge commits yet # - pick up the first commit LAST_MERGE=$(git log --pretty=%H | tail -n1) fi COMMIT_RANGE="$LAST_MERGE..HEAD" # make sure it works now if ! git rev-list $COMMIT_RANGE >/dev/null; then COMMIT_RANGE="" fi echo $COMMIT_RANGE } COMMIT_RANGE_FROM_LAST_MERGE=$(get_commit_range_from_last_merge) if [ -n "$TRAVIS" ]; then CI_COMMIT=$TRAVIS_COMMIT CI_COMMIT_RANGE="${TRAVIS_COMMIT_RANGE/.../..}" CI_BRANCH=$TRAVIS_BRANCH CI_EVENT_TYPE=$TRAVIS_EVENT_TYPE CI_REPO_SLUG=$TRAVIS_REPO_SLUG # CI_COMMIT_RANGE is usually invalid for force pushes - fix it when used # with non-upstream repository if [ -n "$CI_COMMIT_RANGE" -a "$CI_REPO_SLUG" != "$GITHUB_REPO" ]; then if ! git rev-list $CI_COMMIT_RANGE; then CI_COMMIT_RANGE=$COMMIT_RANGE_FROM_LAST_MERGE fi fi case "$TRAVIS_CPU_ARCH" in "amd64") CI_CPU_ARCH="x86_64" ;; *) CI_CPU_ARCH=$TRAVIS_CPU_ARCH ;; esac elif [ -n "$GITHUB_ACTIONS" ]; then CI_COMMIT=$GITHUB_SHA CI_COMMIT_RANGE=$COMMIT_RANGE_FROM_LAST_MERGE CI_BRANCH=$(echo $GITHUB_REF | cut -d'/' -f3) CI_REPO_SLUG=$GITHUB_REPOSITORY CI_CPU_ARCH="x86_64" # GitHub Actions supports only x86_64 case "$GITHUB_EVENT_NAME" in "schedule") CI_EVENT_TYPE="cron" ;; *) CI_EVENT_TYPE=$GITHUB_EVENT_NAME ;; esac else CI_COMMIT=$(git log --pretty=%H -1) CI_COMMIT_RANGE=$COMMIT_RANGE_FROM_LAST_MERGE CI_CPU_ARCH="x86_64" fi export CI_COMMIT=$CI_COMMIT export CI_COMMIT_RANGE=$CI_COMMIT_RANGE export CI_BRANCH=$CI_BRANCH export CI_EVENT_TYPE=$CI_EVENT_TYPE export CI_REPO_SLUG=$CI_REPO_SLUG export CI_CPU_ARCH=$CI_CPU_ARCH echo CI_COMMIT=$CI_COMMIT echo CI_COMMIT_RANGE=$CI_COMMIT_RANGE echo CI_BRANCH=$CI_BRANCH echo CI_EVENT_TYPE=$CI_EVENT_TYPE echo CI_REPO_SLUG=$CI_REPO_SLUG echo CI_CPU_ARCH=$CI_CPU_ARCH
2,481
24.587629
73
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/docker/build-CI.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2020, Intel Corporation # # build-CI.sh - runs a Docker container from a Docker image with environment # prepared for building PMDK project and starts building PMDK # # this script is used for building PMDK on Travis and GitHub Actions CIs # set -e source $(dirname $0)/set-ci-vars.sh source $(dirname $0)/set-vars.sh source $(dirname $0)/valid-branches.sh if [[ "$CI_EVENT_TYPE" != "cron" && "$CI_BRANCH" != "coverity_scan" \ && "$COVERITY" -eq 1 ]]; then echo "INFO: Skip Coverity scan job if build is triggered neither by " \ "'cron' nor by a push to 'coverity_scan' branch" exit 0 fi if [[ ( "$CI_EVENT_TYPE" == "cron" || "$CI_BRANCH" == "coverity_scan" )\ && "$COVERITY" -ne 1 ]]; then echo "INFO: Skip regular jobs if build is triggered either by 'cron'" \ " or by a push to 'coverity_scan' branch" exit 0 fi if [[ -z "$OS" || -z "$OS_VER" ]]; then echo "ERROR: The variables OS and OS_VER have to be set properly " \ "(eg. OS=ubuntu, OS_VER=16.04)." exit 1 fi if [[ -z "$HOST_WORKDIR" ]]; then echo "ERROR: The variable HOST_WORKDIR has to contain a path to " \ "the root of the PMDK project on the host machine" exit 1 fi if [[ -z "$TEST_BUILD" ]]; then TEST_BUILD=all fi imageName=${DOCKERHUB_REPO}:1.9-${OS}-${OS_VER}-${CI_CPU_ARCH} containerName=pmdk-${OS}-${OS_VER} if [[ $MAKE_PKG -eq 0 ]] ; then command="./run-build.sh"; fi if [[ $MAKE_PKG -eq 1 ]] ; then command="./run-build-package.sh"; fi if [[ $COVERAGE -eq 1 ]] ; then command="./run-coverage.sh"; ci_env=`bash <(curl -s https://codecov.io/env)`; fi if [[ ( "$CI_EVENT_TYPE" == "cron" || "$CI_BRANCH" == "coverity_scan" )\ && "$COVERITY" -eq 1 ]]; then command="./run-coverity.sh" fi if [ -n "$DNS_SERVER" ]; then DNS_SETTING=" --dns=$DNS_SERVER "; fi if [[ -f $CI_FILE_SKIP_BUILD_PKG_CHECK ]]; then BUILD_PACKAGE_CHECK=n; else BUILD_PACKAGE_CHECK=y; fi if [ -z "$NDCTL_ENABLE" ]; then ndctl_enable=; else ndctl_enable="--env NDCTL_ENABLE=$NDCTL_ENABLE"; fi if [[ $UBSAN -eq 1 ]]; then for x in C CPP LD; do declare EXTRA_${x}FLAGS=-fsanitize=undefined; done; fi # Only run doc update on $GITHUB_REPO master or stable branch if [[ -z "${CI_BRANCH}" || -z "${TARGET_BRANCHES[${CI_BRANCH}]}" || "$CI_EVENT_TYPE" == "pull_request" || "$CI_REPO_SLUG" != "${GITHUB_REPO}" ]]; then AUTO_DOC_UPDATE=0 fi # Check if we are running on a CI (Travis or GitHub Actions) [ -n "$GITHUB_ACTIONS" -o -n "$TRAVIS" ] && CI_RUN="YES" || CI_RUN="NO" # We have a blacklist only for ppc64le arch if [[ "$CI_CPU_ARCH" == ppc64le ]] ; then BLACKLIST_FILE=../../utils/docker/ppc64le.blacklist; fi # docker on travis + ppc64le runs inside an LXD container and for security # limits what can be done inside it, and as such, `docker run` fails with # > the input device is not a TTY # when using -t because of limited permissions to /dev imposed by LXD. if [[ -n "$TRAVIS" && "$CI_CPU_ARCH" == ppc64le ]] || [[ -n "$GITHUB_ACTIONS" ]]; then TTY='' else TTY='-t' fi WORKDIR=/pmdk SCRIPTSDIR=$WORKDIR/utils/docker # Run a container with # - environment variables set (--env) # - host directory containing PMDK source mounted (-v) # - a tmpfs /tmp with the necessary size and permissions (--tmpfs)* # - working directory set (-w) # # * We need a tmpfs /tmp inside docker but we cannot run it with --privileged # and do it from inside, so we do using this docker-run option. # By default --tmpfs add nosuid,nodev,noexec to the mount flags, we don't # want that and just to make sure we add the usually default rw,relatime just # in case docker change the defaults. docker run --rm --name=$containerName -i $TTY \ $DNS_SETTING \ $ci_env \ --env http_proxy=$http_proxy \ --env https_proxy=$https_proxy \ --env AUTO_DOC_UPDATE=$AUTO_DOC_UPDATE \ --env CC=$PMDK_CC \ --env CXX=$PMDK_CXX \ --env VALGRIND=$VALGRIND \ --env EXTRA_CFLAGS=$EXTRA_CFLAGS \ --env EXTRA_CXXFLAGS=$EXTRA_CXXFLAGS \ --env EXTRA_LDFLAGS=$EXTRA_LDFLAGS \ --env REMOTE_TESTS=$REMOTE_TESTS \ --env TEST_BUILD=$TEST_BUILD \ --env WORKDIR=$WORKDIR \ --env EXPERIMENTAL=$EXPERIMENTAL \ --env BUILD_PACKAGE_CHECK=$BUILD_PACKAGE_CHECK \ --env SCRIPTSDIR=$SCRIPTSDIR \ --env TRAVIS=$TRAVIS \ --env CI_COMMIT_RANGE=$CI_COMMIT_RANGE \ --env CI_COMMIT=$CI_COMMIT \ --env CI_REPO_SLUG=$CI_REPO_SLUG \ --env CI_BRANCH=$CI_BRANCH \ --env CI_EVENT_TYPE=$CI_EVENT_TYPE \ --env DOC_UPDATE_GITHUB_TOKEN=$DOC_UPDATE_GITHUB_TOKEN \ --env COVERITY_SCAN_TOKEN=$COVERITY_SCAN_TOKEN \ --env COVERITY_SCAN_NOTIFICATION_EMAIL=$COVERITY_SCAN_NOTIFICATION_EMAIL \ --env FAULT_INJECTION=$FAULT_INJECTION \ --env GITHUB_ACTION=$GITHUB_ACTION \ --env GITHUB_HEAD_REF=$GITHUB_HEAD_REF \ --env GITHUB_REPO=$GITHUB_REPO \ --env GITHUB_REPOSITORY=$GITHUB_REPOSITORY \ --env GITHUB_REF=$GITHUB_REF \ --env GITHUB_RUN_ID=$GITHUB_RUN_ID \ --env GITHUB_SHA=$GITHUB_SHA \ --env CI_RUN=$CI_RUN \ --env SRC_CHECKERS=$SRC_CHECKERS \ --env BLACKLIST_FILE=$BLACKLIST_FILE \ $ndctl_enable \ --tmpfs /tmp:rw,relatime,suid,dev,exec,size=6G \ -v $HOST_WORKDIR:$WORKDIR \ -v /etc/localtime:/etc/localtime \ -w $SCRIPTSDIR \ $imageName $command
5,193
35.069444
150
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/docker/run-build-package.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2019, Intel Corporation # # run-build-package.sh - is called inside a Docker container; prepares # the environment and starts a build of PMDK project. # set -e # Prepare build enviromnent ./prepare-for-build.sh # Create fake tag, so that package has proper 'version' field git config user.email "[email protected]" git config user.name "test package" git tag -a 1.4.99 -m "1.4" HEAD~1 || true # Build all and run tests cd $WORKDIR export PCHECK_OPTS="-j2 BLACKLIST_FILE=${BLACKLIST_FILE}" make -j$(nproc) $PACKAGE_MANAGER # Install packages if [[ "$PACKAGE_MANAGER" == "dpkg" ]]; then cd $PACKAGE_MANAGER echo $USERPASS | sudo -S dpkg --install *.deb else RPM_ARCH=$(uname -m) cd $PACKAGE_MANAGER/$RPM_ARCH echo $USERPASS | sudo -S rpm --install *.rpm fi # Compile and run standalone test cd $WORKDIR/utils/docker/test_package make -j$(nproc) LIBPMEMOBJ_MIN_VERSION=1.4 ./test_package testfile1 # Use pmreorder installed in the system pmreorder_version="$(pmreorder -v)" pmreorder_pattern="pmreorder\.py .+$" (echo "$pmreorder_version" | grep -Ev "$pmreorder_pattern") && echo "pmreorder version failed" && exit 1 touch testfile2 touch logfile1 pmreorder -p testfile2 -l logfile1
1,293
25.958333
104
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/docker/run-doc-update.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2019-2020, Intel Corporation set -e source `dirname $0`/valid-branches.sh BOT_NAME="pmem-bot" USER_NAME="pmem" REPO_NAME="pmdk" ORIGIN="https://${DOC_UPDATE_GITHUB_TOKEN}@github.com/${BOT_NAME}/${REPO_NAME}" UPSTREAM="https://github.com/${USER_NAME}/${REPO_NAME}" # master or stable-* branch TARGET_BRANCH=${CI_BRANCH} VERSION=${TARGET_BRANCHES[$TARGET_BRANCH]} if [ -z $VERSION ]; then echo "Target location for branch $TARGET_BRANCH is not defined." exit 1 fi # Clone bot repo git clone ${ORIGIN} cd ${REPO_NAME} git remote add upstream ${UPSTREAM} git config --local user.name ${BOT_NAME} git config --local user.email "[email protected]" git remote update git checkout -B ${TARGET_BRANCH} upstream/${TARGET_BRANCH} # Copy man & PR web md cd ./doc make -j$(nproc) web cd .. mv ./doc/web_linux ../ mv ./doc/web_windows ../ mv ./doc/generated/libs_map.yml ../ # Checkout gh-pages and copy docs GH_PAGES_NAME="gh-pages-for-${TARGET_BRANCH}" git checkout -B $GH_PAGES_NAME upstream/gh-pages git clean -dfx rsync -a ../web_linux/ ./manpages/linux/${VERSION}/ rsync -a ../web_windows/ ./manpages/windows/${VERSION}/ \ --exclude='librpmem' \ --exclude='rpmemd' --exclude='pmreorder' \ --exclude='daxio' rm -r ../web_linux rm -r ../web_windows if [ $TARGET_BRANCH = "master" ]; then [ ! -d _data ] && mkdir _data cp ../libs_map.yml _data fi # Add and push changes. # git commit command may fail if there is nothing to commit. # In that case we want to force push anyway (there might be open pull request # with changes which were reverted). git add -A git commit -m "doc: automatic gh-pages docs update" && true git push -f ${ORIGIN} $GH_PAGES_NAME GITHUB_TOKEN=${DOC_UPDATE_GITHUB_TOKEN} hub pull-request -f \ -b ${USER_NAME}:gh-pages \ -h ${BOT_NAME}:${GH_PAGES_NAME} \ -m "doc: automatic gh-pages docs update" && true exit 0
1,924
24
79
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/docker/pull-or-rebuild-image.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2020, Intel Corporation # # pull-or-rebuild-image.sh - rebuilds the Docker image used in the # current Travis build if necessary. # # The script rebuilds the Docker image if the Dockerfile for the current # OS version (Dockerfile.${OS}-${OS_VER}) or any .sh script from the directory # with Dockerfiles were modified and committed. # # If the Travis build is not of the "pull_request" type (i.e. in case of # merge after pull_request) and it succeed, the Docker image should be pushed # to the Docker Hub repository. An empty file is created to signal that to # further scripts. # # If the Docker image does not have to be rebuilt, it will be pulled from # Docker Hub. # set -e source $(dirname $0)/set-ci-vars.sh source $(dirname $0)/set-vars.sh if [[ "$CI_EVENT_TYPE" != "cron" && "$CI_BRANCH" != "coverity_scan" \ && "$COVERITY" -eq 1 ]]; then echo "INFO: Skip Coverity scan job if build is triggered neither by " \ "'cron' nor by a push to 'coverity_scan' branch" exit 0 fi if [[ ( "$CI_EVENT_TYPE" == "cron" || "$CI_BRANCH" == "coverity_scan" )\ && "$COVERITY" -ne 1 ]]; then echo "INFO: Skip regular jobs if build is triggered either by 'cron'" \ " or by a push to 'coverity_scan' branch" exit 0 fi if [[ -z "$OS" || -z "$OS_VER" ]]; then echo "ERROR: The variables OS and OS_VER have to be set properly " \ "(eg. OS=ubuntu, OS_VER=16.04)." exit 1 fi if [[ -z "$HOST_WORKDIR" ]]; then echo "ERROR: The variable HOST_WORKDIR has to contain a path to " \ "the root of the PMDK project on the host machine" exit 1 fi # Find all the commits for the current build if [ -n "$CI_COMMIT_RANGE" ]; then commits=$(git rev-list $CI_COMMIT_RANGE) else commits=$CI_COMMIT fi echo "Commits in the commit range:" for commit in $commits; do echo $commit; done # Get the list of files modified by the commits files=$(for commit in $commits; do git diff-tree --no-commit-id --name-only \ -r $commit; done | sort -u) echo "Files modified within the commit range:" for file in $files; do echo $file; done # Path to directory with Dockerfiles and image building scripts images_dir_name=images base_dir=utils/docker/$images_dir_name # Check if committed file modifications require the Docker image to be rebuilt for file in $files; do # Check if modified files are relevant to the current build if [[ $file =~ ^($base_dir)\/Dockerfile\.($OS)-($OS_VER)$ ]] \ || [[ $file =~ ^($base_dir)\/.*\.sh$ ]] then # Rebuild Docker image for the current OS version echo "Rebuilding the Docker image for the Dockerfile.$OS-$OS_VER" pushd $images_dir_name ./build-image.sh ${OS}-${OS_VER} ${CI_CPU_ARCH} popd # Check if the image has to be pushed to Docker Hub # (i.e. the build is triggered by commits to the $GITHUB_REPO # repository's stable-* or master branch, and the Travis build is not # of the "pull_request" type). In that case, create the empty # file. if [[ "$CI_REPO_SLUG" == "$GITHUB_REPO" \ && ($CI_BRANCH == stable-* || $CI_BRANCH == devel-* || $CI_BRANCH == master) \ && $CI_EVENT_TYPE != "pull_request" \ && $PUSH_IMAGE == "1" ]] then echo "The image will be pushed to Docker Hub" touch $CI_FILE_PUSH_IMAGE_TO_REPO else echo "Skip pushing the image to Docker Hub" fi if [[ $PUSH_IMAGE == "1" ]] then echo "Skip build package check if image has to be pushed" touch $CI_FILE_SKIP_BUILD_PKG_CHECK fi exit 0 fi done # Getting here means rebuilding the Docker image is not required. # Pull the image from Docker Hub. docker pull ${DOCKERHUB_REPO}:1.9-${OS}-${OS_VER}-${CI_CPU_ARCH}
3,681
31.584071
81
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/docker/valid-branches.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2018-2020, Intel Corporation declare -A TARGET_BRANCHES=( \ ["master"]="master" \ ["stable-1.5"]="v1.5" \ ["stable-1.6"]="v1.6" \ ["stable-1.7"]="v1.7" \ ["stable-1.8"]="v1.8" \ ["stable-1.9"]="v1.9" \ )
291
21.461538
40
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/docker/configure-tests.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2020, Intel Corporation # # configure-tests.sh - is called inside a Docker container; configures tests # and ssh server for use during build of PMDK project. # set -e # Configure tests cat << EOF > $WORKDIR/src/test/testconfig.sh LONGDIR=LoremipsumdolorsitametconsecteturadipiscingelitVivamuslacinianibhattortordictumsollicitudinNullamvariusvestibulumligulaetegestaselitsemperidMaurisultriciesligulaeuipsumtinciduntluctusMorbimaximusvariusdolorid # this path is ~3000 characters long DIRSUFFIX="$LONGDIR/$LONGDIR/$LONGDIR/$LONGDIR/$LONGDIR" NON_PMEM_FS_DIR=/tmp PMEM_FS_DIR=/tmp PMEM_FS_DIR_FORCE_PMEM=1 TEST_BUILD="debug nondebug" ENABLE_SUDO_TESTS=y TM=1 EOF # Configure remote tests if [[ $REMOTE_TESTS -eq 1 ]]; then echo "Configuring remote tests" cat << EOF >> $WORKDIR/src/test/testconfig.sh NODE[0]=127.0.0.1 NODE_WORKING_DIR[0]=/tmp/node0 NODE_ADDR[0]=127.0.0.1 NODE_ENV[0]="PMEM_IS_PMEM_FORCE=1" NODE[1]=127.0.0.1 NODE_WORKING_DIR[1]=/tmp/node1 NODE_ADDR[1]=127.0.0.1 NODE_ENV[1]="PMEM_IS_PMEM_FORCE=1" NODE[2]=127.0.0.1 NODE_WORKING_DIR[2]=/tmp/node2 NODE_ADDR[2]=127.0.0.1 NODE_ENV[2]="PMEM_IS_PMEM_FORCE=1" NODE[3]=127.0.0.1 NODE_WORKING_DIR[3]=/tmp/node3 NODE_ADDR[3]=127.0.0.1 NODE_ENV[3]="PMEM_IS_PMEM_FORCE=1" TEST_BUILD="debug nondebug" TEST_PROVIDERS=sockets EOF mkdir -p ~/.ssh/cm cat << EOF >> ~/.ssh/config Host 127.0.0.1 StrictHostKeyChecking no ControlPath ~/.ssh/cm/%r@%h:%p ControlMaster auto ControlPersist 10m EOF if [ ! -f /etc/ssh/ssh_host_rsa_key ] then (echo $USERPASS | sudo -S ssh-keygen -t rsa -C $USER@$HOSTNAME -P '' -f /etc/ssh/ssh_host_rsa_key) fi echo $USERPASS | sudo -S sh -c 'cat /etc/ssh/ssh_host_rsa_key.pub >> /etc/ssh/authorized_keys' ssh-keygen -t rsa -C $USER@$HOSTNAME -P '' -f ~/.ssh/id_rsa cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys chmod -R 700 ~/.ssh chmod 640 ~/.ssh/authorized_keys chmod 600 ~/.ssh/config # Start ssh service echo $USERPASS | sudo -S $START_SSH_COMMAND ssh 127.0.0.1 exit 0 else echo "Skipping remote tests" echo echo "Removing all libfabric.pc files in order to simulate that libfabric is not installed:" find /usr -name "libfabric.pc" 2>/dev/null || true echo $USERPASS | sudo -S sh -c 'find /usr -name "libfabric.pc" -exec rm -f {} + 2>/dev/null' fi # Configure python tests cat << EOF >> $WORKDIR/src/test/testconfig.py config = { 'unittest_log_level': 1, 'cacheline_fs_dir': '/tmp', 'force_cacheline': True, 'page_fs_dir': '/tmp', 'force_page': False, 'byte_fs_dir': '/tmp', 'force_byte': True, 'tm': True, 'test_type': 'check', 'granularity': 'all', 'fs_dir_force_pmem': 0, 'keep_going': False, 'timeout': '3m', 'build': ['debug', 'release'], 'force_enable': None, 'device_dax_path': [], 'fail_on_skip': False, 'enable_admin_tests': True } EOF
2,886
26.235849
216
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/docker/set-vars.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2019, Intel Corporation # # set-vars.sh - set required environment variables # set -e export CI_FILE_PUSH_IMAGE_TO_REPO=/tmp/push_image_to_repo_flag export CI_FILE_SKIP_BUILD_PKG_CHECK=/tmp/skip_build_package_check
290
21.384615
65
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/docker/run-coverity.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2017-2020, Intel Corporation # # run-coverity.sh - runs the Coverity scan build # set -e if [[ "$CI_REPO_SLUG" != "$GITHUB_REPO" \ && ( "$COVERITY_SCAN_NOTIFICATION_EMAIL" == "" \ || "$COVERITY_SCAN_TOKEN" == "" ) ]]; then echo echo "Skipping Coverity build:"\ "COVERITY_SCAN_TOKEN=\"$COVERITY_SCAN_TOKEN\" or"\ "COVERITY_SCAN_NOTIFICATION_EMAIL="\ "\"$COVERITY_SCAN_NOTIFICATION_EMAIL\" is not set" exit 0 fi # Prepare build environment ./prepare-for-build.sh CERT_FILE=/etc/ssl/certs/ca-certificates.crt TEMP_CF=$(mktemp) cp $CERT_FILE $TEMP_CF # Download Coverity certificate echo -n | openssl s_client -connect scan.coverity.com:443 | \ sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | \ tee -a $TEMP_CF echo $USERPASS | sudo -S mv $TEMP_CF $CERT_FILE export COVERITY_SCAN_PROJECT_NAME="$CI_REPO_SLUG" [[ "$CI_EVENT_TYPE" == "cron" ]] \ && export COVERITY_SCAN_BRANCH_PATTERN="master" \ || export COVERITY_SCAN_BRANCH_PATTERN="coverity_scan" export COVERITY_SCAN_BUILD_COMMAND="make -j$(nproc) all" cd $WORKDIR # # Run the Coverity scan # # The 'travisci_build_coverity_scan.sh' script requires the following # environment variables to be set: # - TRAVIS_BRANCH - has to contain the name of the current branch # - TRAVIS_PULL_REQUEST - has to be set to 'true' in case of pull requests # export TRAVIS_BRANCH=${CI_BRANCH} [ "${CI_EVENT_TYPE}" == "pull_request" ] && export TRAVIS_PULL_REQUEST="true" # XXX: Patch the Coverity script. # Recently, this script regularly exits with an error, even though # the build is successfully submitted. Probably because the status code # is missing in response, or it's not 201. # Changes: # 1) change the expected status code to 200 and # 2) print the full response string. # # This change should be reverted when the Coverity script is fixed. # # The previous version was: # curl -s https://scan.coverity.com/scripts/travisci_build_coverity_scan.sh | bash wget https://scan.coverity.com/scripts/travisci_build_coverity_scan.sh patch < utils/docker/0001-travis-fix-travisci_build_coverity_scan.sh.patch bash ./travisci_build_coverity_scan.sh
2,196
29.513889
82
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/docker/run-coverage.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2017-2019, Intel Corporation # # run-coverage.sh - is called inside a Docker container; runs the coverage # test # set -e # Get and prepare PMDK source ./prepare-for-build.sh # Hush error messages, mainly from Valgrind export UT_DUMP_LINES=0 # Skip printing mismatched files for tests with Valgrind export UT_VALGRIND_SKIP_PRINT_MISMATCHED=1 # Build all and run tests cd $WORKDIR make -j$(nproc) COVERAGE=1 make -j$(nproc) test COVERAGE=1 # XXX: unfortunately valgrind raports issues in coverage instrumentation # which we have to ignore (-k flag), also there is dependency between # local and remote tests (which cannot be easily removed) we have to # run local and remote tests separately cd src/test # do not change -j2 to -j$(nproc) in case of tests (make check/pycheck) make -kj2 pcheck-local-quiet TEST_BUILD=debug || true make check-remote-quiet TEST_BUILD=debug || true # do not change -j2 to -j$(nproc) in case of tests (make check/pycheck) make -j2 pycheck TEST_BUILD=debug || true cd ../.. bash <(curl -s https://codecov.io/bash)
1,138
28.973684
74
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/docker/images/install-valgrind.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2020, Intel Corporation # # install-valgrind.sh - installs valgrind for persistent memory # set -e OS=$1 install_upstream_from_distro() { case "$OS" in fedora) dnf install -y valgrind ;; ubuntu) apt-get install -y --no-install-recommends valgrind ;; *) return 1 ;; esac } install_upstream_3_16_1() { git clone git://sourceware.org/git/valgrind.git cd valgrind # valgrind v3.16.1 upstream git checkout VALGRIND_3_16_BRANCH ./autogen.sh ./configure make -j$(nproc) make -j$(nproc) install cd .. rm -rf valgrind } install_custom-pmem_from_source() { git clone https://github.com/pmem/valgrind.git cd valgrind # valgrind v3.15 with pmemcheck # 2020.04.01 Merge pull request #78 from marcinslusarz/opt3 git checkout 759686fd66cc0105df8311cfe676b0b2f9e89196 ./autogen.sh ./configure make -j$(nproc) make -j$(nproc) install cd .. rm -rf valgrind } ARCH=$(uname -m) case "$ARCH" in ppc64le) install_upstream_3_16_1 ;; *) install_custom-pmem_from_source ;; esac
1,099
19.754717
66
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/docker/images/build-image.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2020, Intel Corporation # # build-image.sh <OS-VER> <ARCH> - prepares a Docker image with <OS>-based # environment intended for the <ARCH> CPU architecture # designed for building PMDK project, according to # the Dockerfile.<OS-VER> file located in the same directory. # # The script can be run locally. # set -e OS_VER=$1 CPU_ARCH=$2 function usage { echo "Usage:" echo " build-image.sh <OS-VER> <ARCH>" echo "where:" echo " <OS-VER> - can be for example 'ubuntu-19.10' provided "\ "a Dockerfile named 'Dockerfile.ubuntu-19.10' "\ "exists in the current directory and" echo " <ARCH> - is a CPU architecture, for example 'x86_64'" } # Check if two first arguments are not empty if [[ -z "$2" ]]; then usage exit 1 fi # Check if the file Dockerfile.OS-VER exists if [[ ! -f "Dockerfile.$OS_VER" ]]; then echo "Error: Dockerfile.$OS_VER does not exist." echo usage exit 1 fi if [[ -z "${DOCKERHUB_REPO}" ]]; then echo "Error: DOCKERHUB_REPO environment variable is not set" exit 1 fi # Build a Docker image tagged with ${DOCKERHUB_REPO}:OS-VER-ARCH tag=${DOCKERHUB_REPO}:1.9-${OS_VER}-${CPU_ARCH} docker build -t $tag \ --build-arg http_proxy=$http_proxy \ --build-arg https_proxy=$https_proxy \ -f Dockerfile.$OS_VER .
1,373
24.444444
76
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/docker/images/install-libfabric.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2020, Intel Corporation # # install-libfabric.sh - installs a customized version of libfabric # set -e OS=$1 # Keep in sync with requirements in src/common.inc. libfabric_ver=1.4.2 libfabric_url=https://github.com/ofiwg/libfabric/archive libfabric_dir=libfabric-$libfabric_ver libfabric_tarball=v${libfabric_ver}.zip wget "${libfabric_url}/${libfabric_tarball}" unzip $libfabric_tarball cd $libfabric_dir # XXX HACK HACK HACK # Disable use of spin locks in libfabric. # # spinlocks do not play well (IOW at all) with cpu-constrained environments, # like GitHub Actions, and this leads to timeouts of some PMDK's tests. # This change speeds up pmempool_sync_remote/TEST28-31 by a factor of 20-30. # perl -pi -e 's/have_spinlock=1/have_spinlock=0/' configure.ac # XXX HACK HACK HACK ./autogen.sh ./configure --prefix=/usr --enable-sockets make -j$(nproc) make -j$(nproc) install cd .. rm -f ${libfabric_tarball} rm -rf ${libfabric_dir}
1,019
23.878049
76
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/docker/images/install-libndctl.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2017-2019, Intel Corporation # # install-libndctl.sh - installs libndctl # set -e OS=$2 echo "==== clone ndctl repo ====" git clone https://github.com/pmem/ndctl.git cd ndctl git checkout $1 if [ "$OS" = "fedora" ]; then echo "==== setup rpmbuild tree ====" rpmdev-setuptree RPMDIR=$HOME/rpmbuild/ VERSION=$(./git-version) SPEC=./rhel/ndctl.spec echo "==== create source tarball =====" git archive --format=tar --prefix="ndctl-${VERSION}/" HEAD | gzip > "$RPMDIR/SOURCES/ndctl-${VERSION}.tar.gz" echo "==== build ndctl ====" ./autogen.sh ./configure --disable-docs make -j$(nproc) echo "==== build ndctl packages ====" rpmbuild -ba $SPEC echo "==== install ndctl packages ====" RPM_ARCH=$(uname -m) rpm -i $RPMDIR/RPMS/$RPM_ARCH/*.rpm echo "==== cleanup ====" rm -rf $RPMDIR else echo "==== build ndctl ====" ./autogen.sh ./configure --disable-docs make -j$(nproc) echo "==== install ndctl ====" make -j$(nproc) install echo "==== cleanup ====" fi cd .. rm -rf ndctl
1,057
16.344262
109
sh
null
NearPMSW-main/nearpmMDsync/logging/pmdk/utils/docker/images/push-image.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2020, Intel Corporation # # push-image.sh - pushes the Docker image to the Docker Hub. # # The script utilizes $DOCKERHUB_USER and $DOCKERHUB_PASSWORD variables # to log in to Docker Hub. The variables can be set in the Travis project's # configuration for automated builds. # set -e source $(dirname $0)/../set-ci-vars.sh if [[ -z "$OS" ]]; then echo "OS environment variable is not set" exit 1 fi if [[ -z "$OS_VER" ]]; then echo "OS_VER environment variable is not set" exit 1 fi if [[ -z "$CI_CPU_ARCH" ]]; then echo "CI_CPU_ARCH environment variable is not set" exit 1 fi if [[ -z "${DOCKERHUB_REPO}" ]]; then echo "DOCKERHUB_REPO environment variable is not set" exit 1 fi TAG="1.9-${OS}-${OS_VER}-${CI_CPU_ARCH}" # Check if the image tagged with pmdk/OS-VER exists locally if [[ ! $(docker images -a | awk -v pattern="^${DOCKERHUB_REPO}:${TAG}\$" \ '$1":"$2 ~ pattern') ]] then echo "ERROR: Docker image tagged ${DOCKERHUB_REPO}:${TAG} does not exists locally." exit 1 fi # Log in to the Docker Hub docker login -u="$DOCKERHUB_USER" -p="$DOCKERHUB_PASSWORD" # Push the image to the repository docker push ${DOCKERHUB_REPO}:${TAG}
1,236
22.788462
84
sh
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/run.sh
sudo mount -o dax /dev/pmem0 /mnt/pmem sudo rm -rf /mnt/mem/* sudo chown oem /mnt/pmem #./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 #sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --num=10000 --engine=cmap --benchmarks=fillseq,fillrandom,overwrite > out make bench sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --num=10000 --engine=cmap --benchmarks=fillseq > out #grep "timecp" out > time #awk '{sum+= $3;} END{print sum;}' time redo=$(grep -w "redo" out | tail -1 | awk '{print $NF/2000000000}') redoclob=$(grep -w "redoclob" out | tail -1 | awk '{print $NF/2000000000}') ulog=$(grep timelog out | tail -1 | awk '{print $NF}') sumulog=$(echo $ulog $redo $redoclob| awk '{print $1 + $2 + $3}') echo "" echo $1'log' $sumulog tottime=$(tail -1 out | awk '{print $NF}' | cut -d "," -f10|awk '{print $1/100}') echo $1'tottime' $tottime
875
47.666667
135
sh
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/runtest.sh
sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillrandom sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,overwrite sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,readseq sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,readrandom sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,readmissing sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,deleteseq sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,deleterandom sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,readwhilewriting sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,readrandomwriterandom sudo ./pmemkv_bench --db=/mnt/pmem/pmemkv --db_size_in_gb=1 --engine=tree3 --benchmarks=fillseq,txfillrandom
1,176
89.538462
117
sh
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/setup.sh
sudo mkfs.ext4 /dev/pmem0 sudo mount -o dax /dev/pmem0 /mnt/pmem sudo chown oem /mnt/pmem #cd Research/Research/pmdk/src/examples/libpmemobj/map/ #cat run.sh
159
25.666667
55
sh
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/entrypoint.sh
#!/bin/sh -l # # SPDX-License-Identifier: Apache-2.0 # Copyright 2021, Intel Corporation set -e set -x echo "$1" project_dir=${WORKDIR:-/pmemkv-bench} echo "run basic test" pytest-3 -v ${project_dir}/tests/test.py
219
12.75
40
sh
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/bench/db_bench.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. // This source code is licensed under the Apache 2.0 License // (found in the LICENSE file in the root directory). // SPDX-License-Identifier: Apache-2.0 // Copyright 2017-2021, Intel Corporation #include <chrono> #include <cstdio> #include <cstdlib> #include <ctime> #include <inttypes.h> #include <iomanip> #include <iostream> #include <memory> #include <sstream> #include <string> #include <sys/types.h> #include <sys/stat.h> #include <vector> #include "csv.h" #include "histogram.h" #include "leveldb/env.h" #include "libpmemkv.hpp" #include "libpmempool.h" #include "mutexlock.h" #include "port/port_posix.h" #include "random.h" #include "testutil.h" static const std::string USAGE = "pmemkv_bench\n" "--engine=<name> (storage engine name, default: cmap)\n" "--db=<location> (path to persistent pool, default: /dev/shm/pmemkv)\n" " (note: file on DAX filesystem, DAX device, or poolset file)\n" "--db_size_in_gb=<integer> (size of persistent pool to create in GB, default: 0)\n" " (note: for existing poolset or device DAX configs use 0 or leave default value)\n" " (note: when pool path is non-existing, value should be > 0)\n" "--histogram=<0|1> (show histograms when reporting latencies)\n" "--num=<integer> (number of keys to place in database, default: 1000000)\n" "--reads=<integer> (number of read operations, default: 1000000)\n" "--threads=<integer> (number of concurrent threads, default: 1)\n" "--key_size=<integer> (size of keys in bytes, default: 16)\n" "--value_size=<integer> (size of values in bytes, default: 100)\n" "--readwritepercent=<integer> (Ratio of reads to reads/writes (expressed " "as percentage) for the ReadRandomWriteRandom workload. The default value " "90 means 90% operations out of all reads and writes operations are reads. " "In other words, 9 gets for every 1 put.) type: int32 default: 90\n" "--tx_size=<integer> (number of elements to insert in a single tx, there will be" "num/tx_size transactions per thread in total, the last tx might be smaller, default: 10)\n" "--disjoint=<0|1> (specifies whether each thread works on disjoint set of keys. " "0 means that all threads read/write to the db using any key between 0 and `num`, so that " "number of ops is `threads` * `num`. 1 means that each thread performs reads/writes using " "only [`thread_id` * `num` / `threads`, (`thread_id` + 1) * `num` / `threads`) subset of keys, " "so that total number of ops is `num`. The default value is 0.)\n" "--benchmarks=<name>, (comma-separated list of benchmarks to run)\n" " fillseq (load N values in sequential key order)\n" " fillrandom (load N values in random key order)\n" " overwrite (replace N values in random key order)\n" " readseq (read N values in sequential key order)\n" " readrandom (read N values in random key order)\n" " readmissing (read N missing values in random key order)\n" " deleteseq (delete N values in sequential key order)\n" " deleterandom (delete N values in random key order)\n" " readwhilewriting (1 writer, N threads doing random reads)\n" " readrandomwriterandom (N threads doing random-read, random-write)\n" " txfillrandom (load N values in random key order transactionally)\n"; // Number of key/values to place in database static int FLAGS_num = 1000000; static bool FLAGS_disjoint = false; // Number of read operations to do. If negative, do FLAGS_num reads. static int FLAGS_reads = -1; // Number of concurrent threads to run. static int FLAGS_threads = 1; static int FLAGS_key_size = 16; // Size of each value static int FLAGS_value_size = 100; // Print histogram of operation timings static bool FLAGS_histogram = false; // Use the db with the following name. static const char *FLAGS_db = "/dev/shm/pmemkv"; // Use following size when opening the database. static int FLAGS_db_size_in_gb = 0; static const double FLAGS_compression_ratio = 1.0; static const int FLAGS_ops_between_duration_checks = 1000; static const int FLAGS_duration = 0; static int FLAGS_readwritepercent = 90; static int FLAGS_tx_size = 10; using namespace leveldb; leveldb::Env *g_env = NULL; #if defined(__linux) static Slice TrimSpace(Slice s) { size_t start = 0; while (start < s.size() && isspace(s[start])) { start++; } size_t limit = s.size(); while (limit > start && isspace(s[limit - 1])) { limit--; } return Slice(s.data() + start, limit - start); } #endif // Helper for quickly generating random data. class RandomGenerator { private: std::string data_; unsigned int pos_; public: RandomGenerator() { // We use a limited amount of data over and over again and ensure // that it is larger than the compression window (32KB), and also // large enough to serve all typical value sizes we want to write. Random rnd(301); std::string piece; while (data_.size() < (unsigned)std::max(1048576, FLAGS_value_size)) { // Add a short fragment that is as compressible as specified // by FLAGS_compression_ratio. test::CompressibleString(&rnd, FLAGS_compression_ratio, 100, &piece); data_.append(piece); } pos_ = 0; } Slice Generate(unsigned int len) { assert(len <= data_.size()); if (pos_ + len > data_.size()) { pos_ = 0; } pos_ += len; return Slice(data_.data() + pos_ - len, len); } Slice GenerateWithTTL(unsigned int len) { assert(len <= data_.size()); if (pos_ + len > data_.size()) { pos_ = 0; } pos_ += len; return Slice(data_.data() + pos_ - len, len); } }; static void AppendWithSpace(std::string *str, Slice msg) { if (msg.empty()) return; if (!str->empty()) { str->push_back(' '); } str->append(msg.data(), msg.size()); } enum OperationType : unsigned char { kRead = 0, kWrite, kDelete, kSeek, kMerge, kUpdate, }; class BenchmarkLogger { private: struct hist { int id; std::string name; std::string histogram; }; int id = 0; std::vector<hist> histograms; CSV<int> csv = CSV<int>("id"); public: void insert(std::string name, Histogram histogram) { histograms.push_back({id, name, histogram.ToString()}); std::vector<double> percentiles = {50, 75, 90, 99.9, 99.99}; for (double &percentile : percentiles) { csv.insert(id, "Percentilie P" + std::to_string(percentile) + " [micros/op]", histogram.Percentile(percentile)); } csv.insert(id, "Median [micros/op]", histogram.Median()); } template <typename T> void insert(std::string column, T data) { csv.insert(id, column, data); } void insert(std::string column, std::time_t time) { std::ostringstream time_stream; time_stream << std::put_time(std::localtime(&time), "%D %T"); insert(column, time_stream.str()); } void print_histogram() { std::cout << "------------------------------------------------" << std::endl; for (auto &histogram : histograms) { std::cout << "benchmark: " << histogram.id << ", " << histogram.name << std::endl << histogram.histogram << std::endl; } } void print() { csv.print(); } void next_benchmark() { id++; } }; class Stats { private: double start_; double finish_; double seconds_; int done_; int next_report_; int64_t bytes_; double last_op_finish_; Histogram hist_; std::string message_; bool exclude_from_merge_; public: Stats() { Start(); } void Start() { next_report_ = 100; last_op_finish_ = start_; hist_.Clear(); done_ = 0; bytes_ = 0; seconds_ = 0; start_ = g_env->NowMicros(); finish_ = start_; message_.clear(); // When set, stats from this thread won't be merged with others. exclude_from_merge_ = false; } void Merge(const Stats &other) { if (other.exclude_from_merge_) return; hist_.Merge(other.hist_); done_ += other.done_; bytes_ += other.bytes_; seconds_ += other.seconds_; if (other.start_ < start_) start_ = other.start_; if (other.finish_ > finish_) finish_ = other.finish_; // Just keep the messages from one thread if (message_.empty()) message_ = other.message_; } void Stop() { finish_ = g_env->NowMicros(); seconds_ = (finish_ - start_) * 1e-6; } void AddMessage(Slice msg) { AppendWithSpace(&message_, msg); } void SetExcludeFromMerge() { exclude_from_merge_ = true; } void FinishedSingleOp() { if (FLAGS_histogram) { double now = g_env->NowMicros(); double micros = now - last_op_finish_; hist_.Add(micros); if (micros > 20000) { fprintf(stderr, "long op: %.1f micros%30s\r", micros, ""); fflush(stderr); } last_op_finish_ = now; } done_++; if (done_ >= next_report_) { if (next_report_ < 1000) next_report_ += 100; else if (next_report_ < 5000) next_report_ += 500; else if (next_report_ < 10000) next_report_ += 1000; else if (next_report_ < 50000) next_report_ += 5000; else if (next_report_ < 100000) next_report_ += 10000; else if (next_report_ < 500000) next_report_ += 50000; else next_report_ += 100000; fprintf(stderr, "... finished %d ops%30s\r", done_, ""); fflush(stderr); } } void AddBytes(int64_t n) { bytes_ += n; } float get_micros_per_op() { // Pretend at least one op was done in case we are running a benchmark // that does not call FinishedSingleOp(). if (done_ < 1) done_ = 1; return seconds_ * 1e6 / done_; } float get_ops_per_sec() { // Pretend at least one op was done in case we are running a benchmark // that does not call FinishedSingleOp(). if (done_ < 1) done_ = 1; double elapsed = (finish_ - start_) * 1e-6; return done_ / elapsed; } float get_throughput() { // Rate and ops/sec is computed on actual elapsed time, not the sum of per-thread // elapsed times. double elapsed = (finish_ - start_) * 1e-6; return (bytes_ / 1048576.0) / elapsed; } std::string get_extra_data() { return message_; } Histogram &get_histogram() { return hist_; } }; // State shared by all concurrent executions of the same benchmark. struct SharedState { port::Mutex mu; port::CondVar cv; int total; // Each thread goes through the following states: // (1) initializing // (2) waiting for others to be initialized // (3) running // (4) done int num_initialized; int num_done; bool start; SharedState() : cv(&mu) { } }; // Per-thread state for concurrent executions of the same benchmark. struct ThreadState { int tid; // 0..n-1 when running in n threads Random rand; // Has different seeds for different threads Stats stats; SharedState *shared; ThreadState(int index) : tid(index), rand(1000 + index) { } }; class Duration { typedef std::chrono::high_resolution_clock::time_point time_point; public: Duration(uint64_t max_seconds, int64_t max_ops, int64_t ops_per_stage = 0) { max_seconds_ = max_seconds; max_ops_ = max_ops; ops_per_stage_ = (ops_per_stage > 0) ? ops_per_stage : max_ops; ops_ = 0; start_at_ = std::chrono::high_resolution_clock::now(); } int64_t GetStage() { return std::min(ops_, max_ops_ - 1) / ops_per_stage_; } bool Done(int64_t increment) { if (increment <= 0) increment = 1; // avoid Done(0) and infinite loops ops_ += increment; if (max_seconds_) { // Recheck every appx 1000 ops (exact iff increment is factor of 1000) auto granularity = FLAGS_ops_between_duration_checks; if ((ops_ / granularity) != ((ops_ - increment) / granularity)) { time_point now = std::chrono::high_resolution_clock::now(); return std::chrono::duration_cast<std::chrono::milliseconds>(now - start_at_) .count() >= max_seconds_; } else { return false; } } else { return ops_ > max_ops_; } } private: uint64_t max_seconds_; int64_t max_ops_; int64_t ops_per_stage_; int64_t ops_; time_point start_at_; }; class Benchmark { private: pmem::kv::db *kv_; int num_; int tx_size_; int value_size_; int key_size_; int reads_; int64_t readwrites_; BenchmarkLogger &logger; Slice name; int n; const char *engine; void (Benchmark::*method)(ThreadState *) = NULL; void PrintHeader() { PrintEnvironment(); logger.insert("Path", FLAGS_db); logger.insert("Engine", engine); logger.insert("Keys [bytes each]", FLAGS_key_size); logger.insert("Values [bytes each]", FLAGS_value_size); logger.insert("Entries", num_); logger.insert("RawSize [MB (estimated)]", ((static_cast<int64_t>(FLAGS_key_size + FLAGS_value_size) * num_) / 1048576.0)); PrintWarnings(); } void PrintWarnings() { #if defined(__GNUC__) && !defined(__OPTIMIZE__) fprintf(stdout, "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"); #endif #ifndef NDEBUG fprintf(stdout, "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n"); #endif } void PrintEnvironment() { #if defined(__linux) auto now = std::time(NULL); logger.insert("Date:", now); FILE *cpuinfo = fopen("/proc/cpuinfo", "r"); if (cpuinfo != NULL) { char line[1000]; int num_cpus = 0; std::string cpu_type; std::string cache_size; while (fgets(line, sizeof(line), cpuinfo) != NULL) { const char *sep = strchr(line, ':'); if (sep == NULL) { continue; } Slice key = TrimSpace(Slice(line, sep - 1 - line)); Slice val = TrimSpace(Slice(sep + 1)); if (key == "model name") { ++num_cpus; cpu_type = val.ToString(); } else if (key == "cache size") { cache_size = val.ToString(); } } fclose(cpuinfo); logger.insert("CPU", std::to_string(num_cpus)); logger.insert("CPU model", cpu_type); logger.insert("CPUCache", cache_size); } #endif } public: Benchmark(Slice name, pmem::kv::db *&kv, int num_threads, const char *engine, BenchmarkLogger &logger) : kv_(kv), num_(FLAGS_num), tx_size_(FLAGS_tx_size), value_size_(FLAGS_value_size), key_size_(FLAGS_key_size), reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads), readwrites_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads), logger(logger), n(num_threads), name(name), engine(engine) { fprintf(stderr, "running %s \n", name.ToString().c_str()); bool fresh_db = false; if (name == Slice("fillseq")) { fresh_db = true; method = &Benchmark::WriteSeq; } else if (name == Slice("fillrandom")) { fresh_db = true; method = &Benchmark::WriteRandom; } else if (name == Slice("txfillrandom")) { fresh_db = true; method = &Benchmark::TxFillRandom; } else if (name == Slice("overwrite")) { method = &Benchmark::WriteRandom; } else if (name == Slice("readseq")) { method = &Benchmark::ReadSeq; } else if (name == Slice("readrandom")) { method = &Benchmark::ReadRandom; } else if (name == Slice("readmissing")) { method = &Benchmark::ReadMissing; } else if (name == Slice("deleteseq")) { method = &Benchmark::DeleteSeq; } else if (name == Slice("deleterandom")) { method = &Benchmark::DeleteRandom; } else if (name == Slice("readwhilewriting")) { ++num_threads; method = &Benchmark::ReadWhileWriting; } else if (name == Slice("readrandomwriterandom")) { method = &Benchmark::ReadRandomWriteRandom; } else { throw std::runtime_error("unknown benchmark: " + name.ToString()); } logger.next_benchmark(); logger.insert("Benchmark", name.ToString()); PrintHeader(); if (fresh_db) { if (kv_ != nullptr) { kv_->close(); delete kv_; kv_ = nullptr; } Create(name.ToString()); kv = kv_; } } Slice AllocateKey(std::unique_ptr<const char[]> &key_guard) { const char *tmp = new char[key_size_]; key_guard.reset(tmp); return Slice(key_guard.get(), key_size_); } void GenerateKeyFromInt(uint64_t v, int64_t num_keys, Slice *key, bool missing = false) { char *start = const_cast<char *>(key->data()); char *pos = start; int bytes_to_fill = std::min(key_size_, 8); if (missing) { int64_t v1 = -v; memcpy(pos, static_cast<void *>(&v1), bytes_to_fill); } else memcpy(pos, static_cast<void *>(&v), bytes_to_fill); pos += bytes_to_fill; if (key_size_ > pos - start) { memset(pos, '0', key_size_ - (pos - start)); } } void Run() { SharedState shared; shared.total = n; shared.num_initialized = 0; shared.num_done = 0; shared.start = false; ThreadArg *arg = new ThreadArg[n]; for (int i = 0; i < n; i++) { arg[i].bm = this; arg[i].method = method; arg[i].shared = &shared; arg[i].thread = new ThreadState(i); arg[i].thread->shared = &shared; g_env->StartThread(ThreadBody, &arg[i]); } shared.mu.Lock(); while (shared.num_initialized < n) { shared.cv.Wait(); } shared.start = true; shared.cv.SignalAll(); while (shared.num_done < n) { shared.cv.Wait(); } shared.mu.Unlock(); for (int i = 1; i < n; i++) { arg[0].thread->stats.Merge(arg[i].thread->stats); } auto thread_stats = arg[0].thread->stats; logger.insert("micros/op (avarage)", thread_stats.get_micros_per_op()); logger.insert("ops/sec", thread_stats.get_ops_per_sec()); logger.insert("throughput [MB/s]", thread_stats.get_throughput()); logger.insert("extra_data", thread_stats.get_extra_data()); if (FLAGS_histogram) { logger.insert(name.ToString(), thread_stats.get_histogram()); } for (int i = 0; i < n; i++) { delete arg[i].thread; } delete[] arg; } private: struct ThreadArg { Benchmark *bm; SharedState *shared; ThreadState *thread; void (Benchmark::*method)(ThreadState *); }; struct DbInserter { DbInserter(pmem::kv::db *db) : db(db) { } pmem::kv::status put(pmem::kv::string_view key, pmem::kv::string_view value) { return db->put(key, value); } pmem::kv::status commit() { return pmem::kv::status::OK; } private: pmem::kv::db *db; }; struct TxInserter { TxInserter(pmem::kv::db *db) : tx(db->tx_begin().get_value()) { } pmem::kv::status put(pmem::kv::string_view key, pmem::kv::string_view value) { return tx.put(key, value); } pmem::kv::status commit() { return tx.commit(); } private: pmem::kv::tx tx; }; static void ThreadBody(void *v) { ThreadArg *arg = reinterpret_cast<ThreadArg *>(v); SharedState *shared = arg->shared; ThreadState *thread = arg->thread; { MutexLock l(&shared->mu); shared->num_initialized++; if (shared->num_initialized >= shared->total) { shared->cv.SignalAll(); } while (!shared->start) { shared->cv.Wait(); } } thread->stats.Start(); (arg->bm->*(arg->method))(thread); thread->stats.Stop(); { MutexLock l(&shared->mu); shared->num_done++; if (shared->num_done >= shared->total) { shared->cv.SignalAll(); } } } void Create(std::string name) { assert(kv_ == nullptr); auto start = g_env->NowMicros(); auto size = 512ULL * 1024ULL * 1024ULL * FLAGS_db_size_in_gb; pmem::kv::config cfg; auto cfg_s = cfg.put_string("path", FLAGS_db); if (cfg_s != pmem::kv::status::OK) throw std::runtime_error("putting 'path' to config failed"); cfg_s = cfg.put_uint64("force_create", 1); if (cfg_s != pmem::kv::status::OK) throw std::runtime_error("putting 'force_create' to config failed"); cfg_s = cfg.put_uint64("size", size); if (cfg_s != pmem::kv::status::OK) throw std::runtime_error("putting 'size' to config failed"); /* Check if the path is a directory or a file * (we don't pass filename in case of memkind * based engines, only dir). If it is a file, * remove the previous file with the same name. */ struct stat info; if (stat(FLAGS_db, &info) == 0 && !(info.st_mode & S_IFDIR)) { auto start = g_env->NowMicros(); /* Remove pool file. This should be * implemented using libpmempool for backward * compatibility. */ if (pmempool_rm(FLAGS_db, PMEMPOOL_RM_FORCE) != 0) { throw std::runtime_error(std::string("Cannot remove pool: ") + FLAGS_db); } logger.insert("Remove [millis/op]", ((g_env->NowMicros() - start) * 1e-3)); } kv_ = new pmem::kv::db; auto s = kv_->open(engine, std::move(cfg)); if (s != pmem::kv::status::OK) { fprintf(stderr, "Cannot start engine (%s) for path (%s) with %i GB capacity\n%s\n\nUSAGE: %s", engine, FLAGS_db, FLAGS_db_size_in_gb, pmem::kv::errormsg().c_str(), USAGE.c_str()); exit(-42); } logger.insert("Open [millis/op]", ((g_env->NowMicros() - start) * 1e-3)); } template <typename Inserter = DbInserter> void DoWrite(ThreadState *thread, bool seq) { if (num_ != FLAGS_num) { char msg[100]; snprintf(msg, sizeof(msg), "(%d ops)", num_); thread->stats.AddMessage(msg); } std::unique_ptr<const char[]> key_guard; Slice key = AllocateKey(key_guard); auto num = FLAGS_disjoint ? num_ / FLAGS_threads : num_; auto start = FLAGS_disjoint ? thread->tid * num : 0; auto end = FLAGS_disjoint ? (thread->tid + 1) * num : num_; pmem::kv::status s; int64_t bytes = 0; auto batch_size = std::is_same<Inserter, TxInserter>::value ? tx_size_ : 1; for (int n = start; n < end; n += batch_size) { Inserter inserter(kv_); for (int i = n; i < n + batch_size; i++) { const int k = seq ? i : (thread->rand.Next() % num) + start; GenerateKeyFromInt(k, FLAGS_num, &key); std::string value = std::string(); value.append(value_size_, 'X'); s = inserter.put(key.ToString(), value); bytes += value_size_ + key.size(); if (s != pmem::kv::status::OK) { fprintf(stdout, "Out of space at key %i\n", i); exit(1); } } s = inserter.commit(); thread->stats.FinishedSingleOp(); if (s != pmem::kv::status::OK) { fprintf(stdout, "Commit failed at batch %i\n", n); exit(1); } } thread->stats.AddBytes(bytes); } void WriteSeq(ThreadState *thread) { DoWrite<DbInserter>(thread, true); } void WriteRandom(ThreadState *thread) { DoWrite<DbInserter>(thread, false); } void DoRead(ThreadState *thread, bool seq, bool missing) { pmem::kv::status s; int64_t bytes = 0; int found = 0; std::unique_ptr<const char[]> key_guard; Slice key = AllocateKey(key_guard); auto num = FLAGS_disjoint ? reads_ / FLAGS_threads : reads_; auto start = FLAGS_disjoint ? thread->tid * num : 0; auto end = FLAGS_disjoint ? (thread->tid + 1) * num : reads_; for (int i = start; i < end; i++) { const int k = seq ? i : (thread->rand.Next() % num) + start; GenerateKeyFromInt(k, FLAGS_num, &key, missing); std::string value; if (kv_->get(key.ToString(), &value) == pmem::kv::status::OK) found++; thread->stats.FinishedSingleOp(); bytes += value.length() + key.size(); } thread->stats.AddBytes(bytes); char msg[100]; snprintf(msg, sizeof(msg), "(%d of %d found by one thread)", found, reads_); thread->stats.AddMessage(msg); } void ReadSeq(ThreadState *thread) { DoRead(thread, true, false); } void ReadRandom(ThreadState *thread) { DoRead(thread, false, false); } void ReadMissing(ThreadState *thread) { DoRead(thread, false, true); } void DoDelete(ThreadState *thread, bool seq) { std::unique_ptr<const char[]> key_guard; Slice key = AllocateKey(key_guard); for (int i = 0; i < num_; i++) { const int k = seq ? i : (thread->rand.Next() % FLAGS_num); GenerateKeyFromInt(k, FLAGS_num, &key); kv_->remove(key.ToString()); thread->stats.FinishedSingleOp(); } } void DeleteSeq(ThreadState *thread) { DoDelete(thread, true); } void DeleteRandom(ThreadState *thread) { DoDelete(thread, false); } void BGWriter(ThreadState *thread, enum OperationType write_merge) { // Special thread that keeps writing until other threads are done. RandomGenerator gen; int64_t bytes = 0; // Don't merge stats from this thread with the readers. thread->stats.SetExcludeFromMerge(); std::unique_ptr<const char[]> key_guard; Slice key = AllocateKey(key_guard); uint32_t written = 0; bool hint_printed = false; while (true) { { MutexLock l(&thread->shared->mu); if (thread->shared->num_done + 1 >= thread->shared->num_initialized) { // Finish the write immediately break; } } GenerateKeyFromInt(thread->rand.Next() % FLAGS_num, FLAGS_num, &key); pmem::kv::status s; if (write_merge == kWrite) { s = kv_->put(key.ToString(), gen.Generate(value_size_).ToString()); } else { fprintf(stderr, "Merge operation not supported\n"); exit(1); } written++; if (s != pmem::kv::status::OK) { fprintf(stderr, "Put error\n"); exit(1); } bytes += key.size() + value_size_; } thread->stats.AddBytes(bytes); } void ReadWhileWriting(ThreadState *thread) { if (thread->tid > 0) { ReadRandom(thread); } else { BGWriter(thread, kWrite); } } void ReadRandomWriteRandom(ThreadState *thread) { RandomGenerator gen; std::string value; int64_t found = 0; int get_weight = 0; int put_weight = 0; int64_t reads_done = 0; int64_t writes_done = 0; int64_t bytes = 0; Duration duration(FLAGS_duration, readwrites_); std::unique_ptr<const char[]> key_guard; Slice key = AllocateKey(key_guard); // the number of iterations is the larger of read_ or write_ while (!duration.Done(1)) { GenerateKeyFromInt(thread->rand.Next() % FLAGS_num, FLAGS_num, &key); if (get_weight == 0 && put_weight == 0) { // one batch completed, reinitialize for next batch get_weight = FLAGS_readwritepercent; put_weight = 100 - get_weight; } if (get_weight > 0) { value.clear(); pmem::kv::status s = kv_->get(key.ToString(), &value); if (s == pmem::kv::status::OK) { found++; } else if (s != pmem::kv::status::NOT_FOUND) { fprintf(stderr, "get error\n"); } bytes += value.length() + key.size(); get_weight--; reads_done++; thread->stats.FinishedSingleOp(); } else if (put_weight > 0) { // then do all the corresponding number of puts // for all the gets we have done earlier pmem::kv::status s = kv_->put(key.ToString(), gen.Generate(value_size_).ToString()); if (s != pmem::kv::status::OK) { fprintf(stderr, "put error\n"); exit(1); } bytes += key.size() + value_size_; put_weight--; writes_done++; thread->stats.FinishedSingleOp(); } } thread->stats.AddBytes(bytes); char msg[100]; snprintf(msg, sizeof(msg), "(reads:%" PRIu64 " writes:%" PRIu64 " total:%" PRIu64 " found:%" PRIu64 ")", reads_done, writes_done, readwrites_, found); thread->stats.AddMessage(msg); } void TxFillRandom(ThreadState *thread) { DoWrite<TxInserter>(thread, false); } }; int main(int argc, char **argv) { // Default list of comma-separated operations to run static const char *FLAGS_benchmarks = "fillseq,fillrandom,overwrite,readseq,readrandom,readmissing,deleteseq,deleterandom,readwhilewriting,readrandomwriterandom"; // Default engine name static const char *FLAGS_engine = "cmap"; // Print usage statement if necessary if (argc != 1) { if ((strcmp(argv[1], "?") == 0) || (strcmp(argv[1], "-?") == 0) || (strcmp(argv[1], "h") == 0) || (strcmp(argv[1], "-h") == 0) || (strcmp(argv[1], "-help") == 0) || (strcmp(argv[1], "--help") == 0)) { fprintf(stderr, "%s", USAGE.c_str()); exit(1); } } // Parse command-line arguments for (int i = 1; i < argc; i++) { int n; char junk; if (leveldb::Slice(argv[i]).starts_with("--benchmarks=")) { FLAGS_benchmarks = argv[i] + strlen("--benchmarks="); } else if (strncmp(argv[i], "--engine=", 9) == 0) { FLAGS_engine = argv[i] + 9; } else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 && (n == 0 || n == 1)) { FLAGS_histogram = n; } else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) { FLAGS_num = n; } else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) { FLAGS_reads = n; } else if (sscanf(argv[i], "--threads=%d%c", &n, &junk) == 1) { FLAGS_threads = n; } else if (sscanf(argv[i], "--key_size=%d%c", &n, &junk) == 1) { FLAGS_key_size = n; } else if (sscanf(argv[i], "--value_size=%d%c", &n, &junk) == 1) { FLAGS_value_size = n; } else if (sscanf(argv[i], "--readwritepercent=%d%c", &n, &junk) == 1) { FLAGS_readwritepercent = n; } else if (strncmp(argv[i], "--db=", 5) == 0) { FLAGS_db = argv[i] + 5; } else if (sscanf(argv[i], "--db_size_in_gb=%d%c", &n, &junk) == 1) { FLAGS_db_size_in_gb = n; } else if (sscanf(argv[i], "--tx_size=%d%c", &n, &junk) == 1) { FLAGS_tx_size = n; } else if (sscanf(argv[i], "--disjoint=%d%c", &n, &junk) == 1 && (n == 0 || n == 1)) { FLAGS_disjoint = n; } else { fprintf(stderr, "Invalid flag '%s'\n", argv[i]); exit(1); } } // Run benchmark against default environment g_env = leveldb::Env::Default(); BenchmarkLogger logger = BenchmarkLogger(); int return_value = 0; pmem::kv::db *kv = NULL; const char *benchmarks = FLAGS_benchmarks; while (benchmarks != NULL) { const char *sep = strchr(benchmarks, ','); Slice name; if (sep == NULL) { name = benchmarks; benchmarks = NULL; } else { name = Slice(benchmarks, sep - benchmarks); benchmarks = sep + 1; } try { auto benchmark = Benchmark(name, kv, FLAGS_threads, FLAGS_engine, logger); benchmark.Run(); } catch (std::exception &e) { std::cerr << e.what() << std::endl; return_value = 1; break; } } if (kv != NULL) { kv->close(); delete kv; } logger.print(); if (FLAGS_histogram) { logger.print_histogram(); } return return_value; }
29,881
25.632799
126
cc
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/bench/util/csv.h
// SPDX-License-Identifier: Apache-2.0 /* Copyright 2020-2021, Intel Corporation */ #pragma once #include <iostream> #include <map> #include <ostream> #include <set> #include <string> template <typename IdType> class CSV { private: /* Hold data in two-dimensional map of strings: data_matrix[row][column] */ std::map<IdType, std::map<std::string, std::string>> data_matrix; /* List of all columns, which is filled during inserts. Needed for * printing header and data in the same order. * */ std::set<std::string> columns; std::string id_name; public: CSV(std::string id_column_name) : id_name(id_column_name){}; void insert(IdType row, std::string column, std::string data) { columns.insert(column); data_matrix[row][column] = data; } void insert(IdType row, std::string column, const char *data) { insert(row, column, std::string(data)); } template <typename T> void insert(IdType row, std::string column, T data) { insert(row, column, std::to_string(data)); } void print() { // Print first column name std::cout << id_name; for (auto &column : columns) { std::cout << "," << column; } std::cout << "\r\n" << std::flush; for (auto &row : data_matrix) { std::cout << row.first; for (auto &column : columns) { std::cout << "," << data_matrix[row.first][column]; } std::cout << "\r\n" << std::flush; } } };
1,381
21.290323
73
h
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/bench/util/env.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. #include "leveldb/env.h" namespace leveldb { Env::~Env() { } Status Env::NewAppendableFile(const std::string &fname, WritableFile **result) { return Status::NotSupported("NewAppendableFile", fname); } SequentialFile::~SequentialFile() { } RandomAccessFile::~RandomAccessFile() { } WritableFile::~WritableFile() { } Logger::~Logger() { } FileLock::~FileLock() { } void Log(Logger *info_log, const char *format, ...) { if (info_log != NULL) { va_list ap; va_start(ap, format); info_log->Logv(format, ap); va_end(ap); } } static Status DoWriteStringToFile(Env *env, const Slice &data, const std::string &fname, bool should_sync) { WritableFile *file; Status s = env->NewWritableFile(fname, &file); if (!s.ok()) { return s; } s = file->Append(data); if (s.ok() && should_sync) { s = file->Sync(); } if (s.ok()) { s = file->Close(); } delete file; // Will auto-close if we did not close above if (!s.ok()) { env->DeleteFile(fname); } return s; } Status WriteStringToFile(Env *env, const Slice &data, const std::string &fname) { return DoWriteStringToFile(env, data, fname, false); } Status WriteStringToFileSync(Env *env, const Slice &data, const std::string &fname) { return DoWriteStringToFile(env, data, fname, true); } Status ReadFileToString(Env *env, const std::string &fname, std::string *data) { data->clear(); SequentialFile *file; Status s = env->NewSequentialFile(fname, &file); if (!s.ok()) { return s; } static const int kBufferSize = 8192; char *space = new char[kBufferSize]; while (true) { Slice fragment; s = file->Read(kBufferSize, &fragment, space); if (!s.ok()) { break; } data->append(fragment.data(), fragment.size()); if (fragment.empty()) { break; } } delete[] space; delete file; return s; } EnvWrapper::~EnvWrapper() { } } // namespace leveldb
2,069
17.648649
106
cc
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/bench/util/logging.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 /* Copyright 2020, Intel Corporation */ #include "util/logging.h" #include "leveldb/env.h" #include "leveldb/slice.h" #include <errno.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> namespace leveldb { void AppendNumberTo(std::string *str, uint64_t num) { char buf[30]; snprintf(buf, sizeof(buf), "%llu", (unsigned long long)num); str->append(buf); } void AppendEscapedStringTo(std::string *str, const Slice &value) { for (size_t i = 0; i < value.size(); i++) { char c = value[i]; if (c >= ' ' && c <= '~') { str->push_back(c); } else { char buf[10]; snprintf(buf, sizeof(buf), "\\x%02x", static_cast<unsigned int>(c) & 0xff); str->append(buf); } } } std::string NumberToString(uint64_t num) { std::string r; AppendNumberTo(&r, num); return r; } std::string EscapeString(const Slice &value) { std::string r; AppendEscapedStringTo(&r, value); return r; } bool ConsumeDecimalNumber(Slice *in, uint64_t *val) { uint64_t v = 0; int digits = 0; while (!in->empty()) { char c = (*in)[0]; if (c >= '0' && c <= '9') { ++digits; // |delta| intentionally unit64_t to avoid Android crash (see log). const uint64_t delta = (c - '0'); static const uint64_t kMaxUint64 = ~static_cast<uint64_t>(0); if (v > kMaxUint64 / 10 || (v == kMaxUint64 / 10 && delta > kMaxUint64 % 10)) { // Overflow return false; } v = (v * 10) + delta; in->remove_prefix(1); } else { break; } } *val = v; return (digits > 0); } } // namespace leveldb
1,775
20.925926
82
cc
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/bench/util/logging.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // Must not be included from any .h files to avoid polluting the namespace // with macros. #ifndef STORAGE_LEVELDB_UTIL_LOGGING_H_ #define STORAGE_LEVELDB_UTIL_LOGGING_H_ #include "port/port_posix.h" #include <stdint.h> #include <stdio.h> #include <string> namespace leveldb { class Slice; class WritableFile; // Append a human-readable printout of "num" to *str extern void AppendNumberTo(std::string *str, uint64_t num); // Append a human-readable printout of "value" to *str. // Escapes any non-printable characters found in "value". extern void AppendEscapedStringTo(std::string *str, const Slice &value); // Return a human-readable printout of "num" extern std::string NumberToString(uint64_t num); // Return a human-readable version of "value". // Escapes any non-printable characters found in "value". extern std::string EscapeString(const Slice &value); // Parse a human-readable number from "*in" into *value. On success, // advances "*in" past the consumed number and sets "*val" to the // numeric value. Otherwise, returns false and leaves *in in an // unspecified state. extern bool ConsumeDecimalNumber(Slice *in, uint64_t *val); } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_LOGGING_H_
1,519
30.666667
81
h
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/bench/util/status.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 /* Copyright 2020, Intel Corporation */ #include "leveldb/status.h" #include "port/port_posix.h" #include <stdio.h> namespace leveldb { const char *Status::CopyState(const char *state) { uint32_t size; memcpy(&size, state, sizeof(size)); char *result = new char[size + 5]; memcpy(result, state, size + 5); return result; } Status::Status(Code code, const Slice &msg, const Slice &msg2) { assert(code != kOk); const uint32_t len1 = msg.size(); const uint32_t len2 = msg2.size(); const uint32_t size = len1 + (len2 ? (2 + len2) : 0); char *result = new char[size + 5]; memcpy(result, &size, sizeof(size)); result[4] = static_cast<char>(code); memcpy(result + 5, msg.data(), len1); if (len2) { result[5 + len1] = ':'; result[6 + len1] = ' '; memcpy(result + 7 + len1, msg2.data(), len2); } state_ = result; } std::string Status::ToString() const { if (state_ == NULL) { return "OK"; } else { char tmp[30]; const char *type; switch (code()) { case kOk: type = "OK"; break; case kNotFound: type = "NotFound: "; break; case kCorruption: type = "Corruption: "; break; case kNotSupported: type = "Not implemented: "; break; case kInvalidArgument: type = "Invalid argument: "; break; case kIOError: type = "IO error: "; break; default: snprintf(tmp, sizeof(tmp), "Unknown code(%d): ", static_cast<int>(code())); type = tmp; break; } std::string result(type); uint32_t length; memcpy(&length, state_, sizeof(length)); result.append(state_ + 5, length); return result; } } } // namespace leveldb
1,877
21.902439
81
cc
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/bench/util/testutil.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation #ifndef STORAGE_LEVELDB_UTIL_TESTUTIL_H_ #define STORAGE_LEVELDB_UTIL_TESTUTIL_H_ #include "leveldb/env.h" #include "leveldb/slice.h" #include "util/random.h" namespace leveldb { namespace test { // Store in *dst a random string of length "len" and return a Slice that // references the generated data. Slice RandomString(Random *rnd, int len, std::string *dst); // Return a random key with the specified length that may contain interesting // characters (e.g. \x00, \xff, etc.). std::string RandomKey(Random *rnd, int len); // Store in *dst a string of length "len" that will compress to // "N*compressed_fraction" bytes and return a Slice that references // the generated data. Slice CompressibleString(Random *rnd, double compressed_fraction, size_t len, std::string *dst); // A wrapper that allows injection of errors. class ErrorEnv : public EnvWrapper { public: bool writable_file_error_; int num_writable_file_errors_; ErrorEnv() : EnvWrapper(Env::Default()), writable_file_error_(false), num_writable_file_errors_(0) { } virtual Status NewWritableFile(const std::string &fname, WritableFile **result) { if (writable_file_error_) { ++num_writable_file_errors_; *result = nullptr; return Status::IOError(fname, "fake error"); } return target()->NewWritableFile(fname, result); } virtual Status NewAppendableFile(const std::string &fname, WritableFile **result) { if (writable_file_error_) { ++num_writable_file_errors_; *result = nullptr; return Status::IOError(fname, "fake error"); } return target()->NewAppendableFile(fname, result); } }; } // namespace test } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_TESTUTIL_H_
1,984
28.191176
99
h
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/bench/util/mutexlock.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation #ifndef STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_ #define STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_ #include "port/port_posix.h" #include "port/thread_annotations.h" namespace leveldb { // Helper class that locks a mutex on construction and unlocks the mutex when // the destructor of the MutexLock object is invoked. // // Typical usage: // // void MyClass::MyMethod() { // MutexLock l(&mu_); // mu_ is an instance variable // ... some complex code, possibly with multiple return paths ... // } class SCOPED_LOCKABLE MutexLock { public: explicit MutexLock(port::Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) { this->mu_->Lock(); } ~MutexLock() UNLOCK_FUNCTION() { this->mu_->Unlock(); } private: port::Mutex *const mu_; // No copying allowed MutexLock(const MutexLock &); void operator=(const MutexLock &); }; } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_
1,202
24.0625
81
h
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/bench/util/histogram.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2017-2020, Intel Corporation #ifndef STORAGE_LEVELDB_UTIL_HISTOGRAM_H_ #define STORAGE_LEVELDB_UTIL_HISTOGRAM_H_ #include <string> namespace leveldb { class Histogram { public: Histogram() { } ~Histogram() { } void Clear(); void Add(double value); void Merge(const Histogram &other); std::string ToString() const; double Median() const; double Percentile(double p) const; double Average() const; double StandardDeviation() const; private: double min_; double max_; double num_; double sum_; double sum_squares_; enum { kNumBuckets = 154 }; static const double kBucketLimit[kNumBuckets]; double buckets_[kNumBuckets]; }; } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_HISTOGRAM_H_
993
18.490196
81
h
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/bench/util/testutil.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. #include "util/testutil.h" #include "util/random.h" namespace leveldb { namespace test { Slice RandomString(Random *rnd, int len, std::string *dst) { dst->resize(len); for (int i = 0; i < len; i++) { (*dst)[i] = static_cast<char>(' ' + rnd->Uniform(95)); // ' ' .. '~' } return Slice(*dst); } std::string RandomKey(Random *rnd, int len) { // Make sure to generate a wide variety of characters so we // test the boundary conditions for short-key optimizations. static const char kTestChars[] = {'\0', '\1', 'a', 'b', 'c', 'd', 'e', '\xfd', '\xfe', '\xff'}; std::string result; for (int i = 0; i < len; i++) { result += kTestChars[rnd->Uniform(sizeof(kTestChars))]; } return result; } Slice CompressibleString(Random *rnd, double compressed_fraction, size_t len, std::string *dst) { int raw = static_cast<int>(len * compressed_fraction); if (raw < 1) raw = 1; std::string raw_data; RandomString(rnd, raw, &raw_data); // Duplicate the random data until we have filled "len" bytes dst->clear(); while (dst->size() < len) { dst->append(raw_data); } dst->resize(len); return Slice(*dst); } } // namespace test } // namespace leveldb
1,384
24.648148
96
cc
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/bench/util/histogram.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 /* Copyright 2020, Intel Corporation */ #include "util/histogram.h" #include "port/port_posix.h" #include <math.h> #include <stdio.h> namespace leveldb { // clang-format off const double Histogram::kBucketLimit[kNumBuckets] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 25, 30, 35, 40, 45, 50, 60, 70, 80, 90, 100, 120, 140, 160, 180, 200, 250, 300, 350, 400, 450, 500, 600, 700, 800, 900, 1000, 1200, 1400, 1600, 1800, 2000, 2500, 3000, 3500, 4000, 4500, 5000, 6000, 7000, 8000, 9000, 10000, 12000, 14000, 16000, 18000, 20000, 25000, 30000, 35000, 40000, 45000, 50000, 60000, 70000, 80000, 90000, 100000, 120000, 140000, 160000, 180000, 200000, 250000, 300000, 350000, 400000, 450000, 500000, 600000, 700000, 800000, 900000, 1000000, 1200000, 1400000, 1600000, 1800000, 2000000, 2500000, 3000000, 3500000, 4000000, 4500000, 5000000, 6000000, 7000000, 8000000, 9000000, 10000000, 12000000, 14000000, 16000000, 18000000, 20000000, 25000000, 30000000, 35000000, 40000000, 45000000, 50000000, 60000000, 70000000, 80000000, 90000000, 100000000, 120000000, 140000000, 160000000, 180000000, 200000000, 250000000, 300000000, 350000000, 400000000, 450000000, 500000000, 600000000, 700000000, 800000000, 900000000, 1000000000, 1200000000, 1400000000, 1600000000, 1800000000, 2000000000, 2500000000.0, 3000000000.0, 3500000000.0, 4000000000.0, 4500000000.0, 5000000000.0, 6000000000.0, 7000000000.0, 8000000000.0, 9000000000.0, 1e200, }; // clang-format on void Histogram::Clear() { min_ = kBucketLimit[kNumBuckets - 1]; max_ = 0; num_ = 0; sum_ = 0; sum_squares_ = 0; for (int i = 0; i < kNumBuckets; i++) { buckets_[i] = 0; } } void Histogram::Add(double value) { // Linear search is fast enough for our usage in db_bench int b = 0; while (b < kNumBuckets - 1 && kBucketLimit[b] <= value) { b++; } buckets_[b] += 1.0; if (min_ > value) min_ = value; if (max_ < value) max_ = value; num_++; sum_ += value; sum_squares_ += (value * value); } void Histogram::Merge(const Histogram &other) { if (other.min_ < min_) min_ = other.min_; if (other.max_ > max_) max_ = other.max_; num_ += other.num_; sum_ += other.sum_; sum_squares_ += other.sum_squares_; for (int b = 0; b < kNumBuckets; b++) { buckets_[b] += other.buckets_[b]; } } double Histogram::Median() const { return Percentile(50.0); } double Histogram::Percentile(double p) const { double threshold = num_ * (p / 100.0); double sum = 0; for (int b = 0; b < kNumBuckets; b++) { sum += buckets_[b]; if (sum >= threshold) { // Scale linearly within this bucket double left_point = (b == 0) ? 0 : kBucketLimit[b - 1]; double right_point = kBucketLimit[b]; double left_sum = sum - buckets_[b]; double right_sum = sum; double pos = (threshold - left_sum) / (right_sum - left_sum); double r = left_point + (right_point - left_point) * pos; if (r < min_) r = min_; if (r > max_) r = max_; return r; } } return max_; } double Histogram::Average() const { if (num_ == 0.0) return 0; return sum_ / num_; } double Histogram::StandardDeviation() const { if (num_ == 0.0) return 0; double variance = (sum_squares_ * num_ - sum_ * sum_) / (num_ * num_); return sqrt(variance); } std::string Histogram::ToString() const { std::string r; char buf[200]; snprintf(buf, sizeof(buf), "Count: %.0f Average: %.4f StdDev: %.2f\n", num_, Average(), StandardDeviation()); r.append(buf); snprintf(buf, sizeof(buf), "Min: %.4f Median: %.4f Max: %.4f\n", (num_ == 0.0 ? 0.0 : min_), Median(), max_); r.append(buf); snprintf(buf, sizeof(buf), "Percentiles: P50: %.2f P75: %.2f P99: %.2f P99.9: %.2f P99.99: %.2f\n", Percentile(50), Percentile(75), Percentile(99), Percentile(99.9), Percentile(99.99)); r.append(buf); r.append("------------------------------------------------------\n"); const double mult = 100.0 / num_; double sum = 0; for (int b = 0; b < kNumBuckets; b++) { if (buckets_[b] <= 0.0) continue; sum += buckets_[b]; snprintf(buf, sizeof(buf), "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ", ((b == 0) ? 0.0 : kBucketLimit[b - 1]), // left kBucketLimit[b], // right buckets_[b], // count mult * buckets_[b], // percentage mult * sum); // cumulative percentage r.append(buf); // Add hash marks based on percentage; 20 marks for 100%. int marks = static_cast<int>(20 * (buckets_[b] / num_) + 0.5); r.append(marks, '#'); r.push_back('\n'); } return r; } } // namespace leveldb
4,793
28.411043
100
cc
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/bench/util/env_posix.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 /* Copyright 2020, Intel Corporation */ #include <deque> #include <dirent.h> #include <errno.h> #include <fcntl.h> #include <limits> #include <pthread.h> #include <set> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/mman.h> #include <sys/resource.h> #include <sys/stat.h> #include <sys/time.h> #include <sys/types.h> #include <time.h> #include <unistd.h> #include "leveldb/env.h" #include "leveldb/slice.h" #include "port/port_posix.h" #include "util/env_posix_test_helper.h" #include "util/logging.h" #include "util/mutexlock.h" #include "util/posix_logger.h" namespace leveldb { namespace { static int open_read_only_file_limit = -1; static int mmap_limit = -1; static const size_t kBufSize = 65536; static Status PosixError(const std::string &context, int err_number) { if (err_number == ENOENT) { return Status::NotFound(context, strerror(err_number)); } else { return Status::IOError(context, strerror(err_number)); } } // Helper class to limit resource usage to avoid exhaustion. // Currently used to limit read-only file descriptors and mmap file usage // so that we do not end up running out of file descriptors, virtual memory, // or running into kernel performance problems for very large databases. class Limiter { public: // Limit maximum number of resources to |n|. Limiter(intptr_t n) { SetAllowed(n); } // If another resource is available, acquire it and return true. // Else return false. bool Acquire() { if (GetAllowed() <= 0) { return false; } MutexLock l(&mu_); intptr_t x = GetAllowed(); if (x <= 0) { return false; } else { SetAllowed(x - 1); return true; } } // Release a resource acquired by a previous call to Acquire() that returned // true. void Release() { MutexLock l(&mu_); SetAllowed(GetAllowed() + 1); } private: port::Mutex mu_; port::AtomicPointer allowed_; intptr_t GetAllowed() const { return reinterpret_cast<intptr_t>(allowed_.Acquire_Load()); } // REQUIRES: mu_ must be held void SetAllowed(intptr_t v) { allowed_.Release_Store(reinterpret_cast<void *>(v)); } Limiter(const Limiter &); void operator=(const Limiter &); }; class PosixSequentialFile : public SequentialFile { private: std::string filename_; int fd_; public: PosixSequentialFile(const std::string &fname, int fd) : filename_(fname), fd_(fd) { } virtual ~PosixSequentialFile() { close(fd_); } virtual Status Read(size_t n, Slice *result, char *scratch) { Status s; while (true) { ssize_t r = read(fd_, scratch, n); if (r < 0) { if (errno == EINTR) { continue; // Retry } s = PosixError(filename_, errno); break; } *result = Slice(scratch, r); break; } return s; } virtual Status Skip(uint64_t n) { if (lseek(fd_, n, SEEK_CUR) == static_cast<off_t>(-1)) { return PosixError(filename_, errno); } return Status::OK(); } }; // pread() based random-access class PosixRandomAccessFile : public RandomAccessFile { private: std::string filename_; bool temporary_fd_; // If true, fd_ is -1 and we open on every read. int fd_; Limiter *limiter_; public: PosixRandomAccessFile(const std::string &fname, int fd, Limiter *limiter) : filename_(fname), fd_(fd), limiter_(limiter) { temporary_fd_ = !limiter->Acquire(); if (temporary_fd_) { // Open file on every access. close(fd_); fd_ = -1; } } virtual ~PosixRandomAccessFile() { if (!temporary_fd_) { close(fd_); limiter_->Release(); } } virtual Status Read(uint64_t offset, size_t n, Slice *result, char *scratch) const { int fd = fd_; if (temporary_fd_) { fd = open(filename_.c_str(), O_RDONLY); if (fd < 0) { return PosixError(filename_, errno); } } Status s; ssize_t r = pread(fd, scratch, n, static_cast<off_t>(offset)); *result = Slice(scratch, (r < 0) ? 0 : r); if (r < 0) { // An error: return a non-ok status s = PosixError(filename_, errno); } if (temporary_fd_) { // Close the temporary file descriptor opened earlier. close(fd); } return s; } }; // mmap() based random-access class PosixMmapReadableFile : public RandomAccessFile { private: std::string filename_; void *mmapped_region_; size_t length_; Limiter *limiter_; public: // base[0,length-1] contains the mmapped contents of the file. PosixMmapReadableFile(const std::string &fname, void *base, size_t length, Limiter *limiter) : filename_(fname), mmapped_region_(base), length_(length), limiter_(limiter) { } virtual ~PosixMmapReadableFile() { munmap(mmapped_region_, length_); limiter_->Release(); } virtual Status Read(uint64_t offset, size_t n, Slice *result, char *scratch) const { Status s; if (offset + n > length_) { *result = Slice(); s = PosixError(filename_, EINVAL); } else { *result = Slice(reinterpret_cast<char *>(mmapped_region_) + offset, n); } return s; } }; class PosixWritableFile : public WritableFile { private: // buf_[0, pos_-1] contains data to be written to fd_. std::string filename_; int fd_; char buf_[kBufSize]; size_t pos_; public: PosixWritableFile(const std::string &fname, int fd) : filename_(fname), fd_(fd), pos_(0) { } ~PosixWritableFile() { if (fd_ >= 0) { // Ignoring any potential errors Close(); } } virtual Status Append(const Slice &data) { size_t n = data.size(); const char *p = data.data(); // Fit as much as possible into buffer. size_t copy = std::min(n, kBufSize - pos_); memcpy(buf_ + pos_, p, copy); p += copy; n -= copy; pos_ += copy; if (n == 0) { return Status::OK(); } // Can't fit in buffer, so need to do at least one write. Status s = FlushBuffered(); if (!s.ok()) { return s; } // Small writes go to buffer, large writes are written directly. if (n < kBufSize) { memcpy(buf_, p, n); pos_ = n; return Status::OK(); } return WriteRaw(p, n); } virtual Status Close() { Status result = FlushBuffered(); const int r = close(fd_); if (r < 0 && result.ok()) { result = PosixError(filename_, errno); } fd_ = -1; return result; } virtual Status Flush() { return FlushBuffered(); } Status SyncDirIfManifest() { const char *f = filename_.c_str(); const char *sep = strrchr(f, '/'); Slice basename; std::string dir; if (sep == NULL) { dir = "."; basename = f; } else { dir = std::string(f, sep - f); basename = sep + 1; } Status s; if (basename.starts_with("MANIFEST")) { int fd = open(dir.c_str(), O_RDONLY); if (fd < 0) { s = PosixError(dir, errno); } else { if (fsync(fd) < 0) { s = PosixError(dir, errno); } close(fd); } } return s; } virtual Status Sync() { // Ensure new files referred to by the manifest are in the filesystem. Status s = SyncDirIfManifest(); if (!s.ok()) { return s; } s = FlushBuffered(); if (s.ok()) { if (fdatasync(fd_) != 0) { s = PosixError(filename_, errno); } } return s; } private: Status FlushBuffered() { Status s = WriteRaw(buf_, pos_); pos_ = 0; return s; } Status WriteRaw(const char *p, size_t n) { while (n > 0) { ssize_t r = write(fd_, p, n); if (r < 0) { if (errno == EINTR) { continue; // Retry } return PosixError(filename_, errno); } p += r; n -= r; } return Status::OK(); } }; static int LockOrUnlock(int fd, bool lock) { errno = 0; struct flock f; memset(&f, 0, sizeof(f)); f.l_type = (lock ? F_WRLCK : F_UNLCK); f.l_whence = SEEK_SET; f.l_start = 0; f.l_len = 0; // Lock/unlock entire file return fcntl(fd, F_SETLK, &f); } class PosixFileLock : public FileLock { public: int fd_; std::string name_; }; // Set of locked files. We keep a separate set instead of just // relying on fcntrl(F_SETLK) since fcntl(F_SETLK) does not provide // any protection against multiple uses from the same process. class PosixLockTable { private: port::Mutex mu_; std::set<std::string> locked_files_; public: bool Insert(const std::string &fname) { MutexLock l(&mu_); return locked_files_.insert(fname).second; } void Remove(const std::string &fname) { MutexLock l(&mu_); locked_files_.erase(fname); } }; class PosixEnv : public Env { public: PosixEnv(); virtual ~PosixEnv() { char msg[] = "Destroying Env::Default()\n"; fwrite(msg, 1, sizeof(msg), stderr); abort(); } virtual Status NewSequentialFile(const std::string &fname, SequentialFile **result) { int fd = open(fname.c_str(), O_RDONLY); if (fd < 0) { *result = NULL; return PosixError(fname, errno); } else { *result = new PosixSequentialFile(fname, fd); return Status::OK(); } } virtual Status NewRandomAccessFile(const std::string &fname, RandomAccessFile **result) { *result = NULL; Status s; int fd = open(fname.c_str(), O_RDONLY); if (fd < 0) { s = PosixError(fname, errno); } else if (mmap_limit_.Acquire()) { uint64_t size; s = GetFileSize(fname, &size); if (s.ok()) { void *base = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0); if (base != MAP_FAILED) { *result = new PosixMmapReadableFile(fname, base, size, &mmap_limit_); } else { s = PosixError(fname, errno); } } close(fd); if (!s.ok()) { mmap_limit_.Release(); } } else { *result = new PosixRandomAccessFile(fname, fd, &fd_limit_); } return s; } virtual Status NewWritableFile(const std::string &fname, WritableFile **result) { Status s; int fd = open(fname.c_str(), O_TRUNC | O_WRONLY | O_CREAT, 0644); if (fd < 0) { *result = NULL; s = PosixError(fname, errno); } else { *result = new PosixWritableFile(fname, fd); } return s; } virtual Status NewAppendableFile(const std::string &fname, WritableFile **result) { Status s; int fd = open(fname.c_str(), O_APPEND | O_WRONLY | O_CREAT, 0644); if (fd < 0) { *result = NULL; s = PosixError(fname, errno); } else { *result = new PosixWritableFile(fname, fd); } return s; } virtual bool FileExists(const std::string &fname) { return access(fname.c_str(), F_OK) == 0; } virtual Status GetChildren(const std::string &dir, std::vector<std::string> *result) { result->clear(); DIR *d = opendir(dir.c_str()); if (d == NULL) { return PosixError(dir, errno); } struct dirent *entry; while ((entry = readdir(d)) != NULL) { result->push_back(entry->d_name); } closedir(d); return Status::OK(); } virtual Status DeleteFile(const std::string &fname) { Status result; if (unlink(fname.c_str()) != 0) { result = PosixError(fname, errno); } return result; } virtual Status CreateDir(const std::string &name) { Status result; if (mkdir(name.c_str(), 0755) != 0) { result = PosixError(name, errno); } return result; } virtual Status DeleteDir(const std::string &name) { Status result; if (rmdir(name.c_str()) != 0) { result = PosixError(name, errno); } return result; } virtual Status GetFileSize(const std::string &fname, uint64_t *size) { Status s; struct stat sbuf; if (stat(fname.c_str(), &sbuf) != 0) { *size = 0; s = PosixError(fname, errno); } else { *size = sbuf.st_size; } return s; } virtual Status RenameFile(const std::string &src, const std::string &target) { Status result; if (rename(src.c_str(), target.c_str()) != 0) { result = PosixError(src, errno); } return result; } virtual Status LockFile(const std::string &fname, FileLock **lock) { *lock = NULL; Status result; int fd = open(fname.c_str(), O_RDWR | O_CREAT, 0644); if (fd < 0) { result = PosixError(fname, errno); } else if (!locks_.Insert(fname)) { close(fd); result = Status::IOError("lock " + fname, "already held by process"); } else if (LockOrUnlock(fd, true) == -1) { result = PosixError("lock " + fname, errno); close(fd); locks_.Remove(fname); } else { PosixFileLock *my_lock = new PosixFileLock; my_lock->fd_ = fd; my_lock->name_ = fname; *lock = my_lock; } return result; } virtual Status UnlockFile(FileLock *lock) { PosixFileLock *my_lock = reinterpret_cast<PosixFileLock *>(lock); Status result; if (LockOrUnlock(my_lock->fd_, false) == -1) { result = PosixError("unlock", errno); } locks_.Remove(my_lock->name_); close(my_lock->fd_); delete my_lock; return result; } virtual void Schedule(void (*function)(void *), void *arg); virtual void StartThread(void (*function)(void *arg), void *arg); virtual Status GetTestDirectory(std::string *result) { const char *env = getenv("TEST_TMPDIR"); if (env && env[0] != '\0') { *result = env; } else { char buf[100]; snprintf(buf, sizeof(buf), "/tmp/leveldbtest-%d", int(geteuid())); *result = buf; } // Directory may already exist CreateDir(*result); return Status::OK(); } static uint64_t gettid() { pthread_t tid = pthread_self(); uint64_t thread_id = 0; memcpy(&thread_id, &tid, std::min(sizeof(thread_id), sizeof(tid))); return thread_id; } virtual Status NewLogger(const std::string &fname, Logger **result) { FILE *f = fopen(fname.c_str(), "w"); if (f == NULL) { *result = NULL; return PosixError(fname, errno); } else { *result = new PosixLogger(f, &PosixEnv::gettid); return Status::OK(); } } virtual uint64_t NowMicros() { struct timeval tv; gettimeofday(&tv, NULL); return static_cast<uint64_t>(tv.tv_sec) * 1000000 + tv.tv_usec; } virtual void SleepForMicroseconds(int micros) { usleep(micros); } private: void PthreadCall(const char *label, int result) { if (result != 0) { fprintf(stderr, "pthread %s: %s\n", label, strerror(result)); abort(); } } // BGThread() is the body of the background thread void BGThread(); static void *BGThreadWrapper(void *arg) { reinterpret_cast<PosixEnv *>(arg)->BGThread(); return NULL; } pthread_mutex_t mu_; pthread_cond_t bgsignal_; pthread_t bgthread_; bool started_bgthread_; // Entry per Schedule() call struct BGItem { void *arg; void (*function)(void *); }; typedef std::deque<BGItem> BGQueue; BGQueue queue_; PosixLockTable locks_; Limiter mmap_limit_; Limiter fd_limit_; }; // Return the maximum number of concurrent mmaps. static int MaxMmaps() { if (mmap_limit >= 0) { return mmap_limit; } // Up to 1000 mmaps for 64-bit binaries; none for smaller pointer sizes. mmap_limit = sizeof(void *) >= 8 ? 1000 : 0; return mmap_limit; } // Return the maximum number of read-only files to keep open. static intptr_t MaxOpenFiles() { if (open_read_only_file_limit >= 0) { return open_read_only_file_limit; } struct rlimit rlim; if (getrlimit(RLIMIT_NOFILE, &rlim)) { // getrlimit failed, fallback to hard-coded default. open_read_only_file_limit = 50; } else if (rlim.rlim_cur == RLIM_INFINITY) { open_read_only_file_limit = std::numeric_limits<int>::max(); } else { // Allow use of 20% of available file descriptors for read-only files. open_read_only_file_limit = rlim.rlim_cur / 5; } return open_read_only_file_limit; } PosixEnv::PosixEnv() : started_bgthread_(false), mmap_limit_(MaxMmaps()), fd_limit_(MaxOpenFiles()) { PthreadCall("mutex_init", pthread_mutex_init(&mu_, NULL)); PthreadCall("cvar_init", pthread_cond_init(&bgsignal_, NULL)); } void PosixEnv::Schedule(void (*function)(void *), void *arg) { PthreadCall("lock", pthread_mutex_lock(&mu_)); // Start background thread if necessary if (!started_bgthread_) { started_bgthread_ = true; PthreadCall("create thread", pthread_create(&bgthread_, NULL, &PosixEnv::BGThreadWrapper, this)); } // If the queue is currently empty, the background thread may currently be // waiting. if (queue_.empty()) { PthreadCall("signal", pthread_cond_signal(&bgsignal_)); } // Add to priority queue queue_.push_back(BGItem()); queue_.back().function = function; queue_.back().arg = arg; PthreadCall("unlock", pthread_mutex_unlock(&mu_)); } void PosixEnv::BGThread() { while (true) { // Wait until there is an item that is ready to run PthreadCall("lock", pthread_mutex_lock(&mu_)); while (queue_.empty()) { PthreadCall("wait", pthread_cond_wait(&bgsignal_, &mu_)); } void (*function)(void *) = queue_.front().function; void *arg = queue_.front().arg; queue_.pop_front(); PthreadCall("unlock", pthread_mutex_unlock(&mu_)); (*function)(arg); } } namespace { struct StartThreadState { void (*user_function)(void *); void *arg; }; } static void *StartThreadWrapper(void *arg) { StartThreadState *state = reinterpret_cast<StartThreadState *>(arg); state->user_function(state->arg); delete state; return NULL; } void PosixEnv::StartThread(void (*function)(void *arg), void *arg) { pthread_t t; StartThreadState *state = new StartThreadState; state->user_function = function; state->arg = arg; PthreadCall("start thread", pthread_create(&t, NULL, &StartThreadWrapper, state)); } } // namespace static pthread_once_t once = PTHREAD_ONCE_INIT; static Env *default_env; static void InitDefaultEnv() { default_env = new PosixEnv; } void EnvPosixTestHelper::SetReadOnlyFDLimit(int limit) { assert(default_env == NULL); open_read_only_file_limit = limit; } void EnvPosixTestHelper::SetReadOnlyMMapLimit(int limit) { assert(default_env == NULL); mmap_limit = limit; } Env *Env::Default() { pthread_once(&once, InitDefaultEnv); return default_env; } } // namespace leveldb
17,689
20.812577
99
cc
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/bench/util/random.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation #ifndef STORAGE_LEVELDB_UTIL_RANDOM_H_ #define STORAGE_LEVELDB_UTIL_RANDOM_H_ #include <stdint.h> namespace leveldb { // A very simple random number generator. Not especially good at // generating truly random bits, but good enough for our needs in this // package. class Random { private: uint32_t seed_; public: explicit Random(uint32_t s) : seed_(s & 0x7fffffffu) { // Avoid bad seeds. if (seed_ == 0 || seed_ == 2147483647L) { seed_ = 1; } } uint32_t Next() { static const uint32_t M = 2147483647L; // 2^31-1 static const uint64_t A = 16807; // bits 14, 8, 7, 5, 2, 1, 0 // We are computing // seed_ = (seed_ * A) % M, where M = 2^31-1 // // seed_ must not be zero or M, or else all subsequent computed values // will be zero or M respectively. For all other values, seed_ will end // up cycling through every number in [1,M-1] uint64_t product = seed_ * A; // Compute (product % M) using the fact that ((x << 31) % M) == x. seed_ = static_cast<uint32_t>((product >> 31) + (product & M)); // The first reduction may overflow by 1 bit, so we may need to // repeat. mod == M is not possible; using > allows the faster // sign-bit-based test. if (seed_ > M) { seed_ -= M; } return seed_; } // Returns a uniformly distributed value in the range [0..n-1] // REQUIRES: n > 0 uint32_t Uniform(int n) { return Next() % n; } // Randomly returns true ~"1/n" of the time, and false otherwise. // REQUIRES: n > 0 bool OneIn(int n) { return (Next() % n) == 0; } // Skewed: pick "base" uniformly from range [0,max_log] and then // return "base" random bits. The effect is to pick a number in the // range [0,2^max_log-1] with exponential bias towards smaller numbers. uint32_t Skewed(int max_log) { return Uniform(1 << Uniform(max_log + 1)); } }; } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_RANDOM_H_
2,202
26.886076
81
h
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/bench/util/posix_logger.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // // Logger implementation that can be shared by all environments // where enough posix functionality is available. #ifndef STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_ #define STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_ #include "leveldb/env.h" #include <algorithm> #include <stdio.h> #include <sys/time.h> #include <time.h> namespace leveldb { class PosixLogger : public Logger { private: FILE *file_; uint64_t (*gettid_)(); // Return the thread id for the current thread public: PosixLogger(FILE *f, uint64_t (*gettid)()) : file_(f), gettid_(gettid) { } virtual ~PosixLogger() { fclose(file_); } virtual void Logv(const char *format, va_list ap) { const uint64_t thread_id = (*gettid_)(); // We try twice: the first time with a fixed-size stack allocated buffer, // and the second time with a much larger dynamically allocated buffer. char buffer[500]; for (int iter = 0; iter < 2; iter++) { char *base; int bufsize; if (iter == 0) { bufsize = sizeof(buffer); base = buffer; } else { bufsize = 30000; base = new char[bufsize]; } char *p = base; char *limit = base + bufsize; struct timeval now_tv; gettimeofday(&now_tv, NULL); const time_t seconds = now_tv.tv_sec; struct tm t; localtime_r(&seconds, &t); p += snprintf(p, limit - p, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ", t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec, static_cast<int>(now_tv.tv_usec), static_cast<long long unsigned int>(thread_id)); // Print the message if (p < limit) { va_list backup_ap; va_copy(backup_ap, ap); p += vsnprintf(p, limit - p, format, backup_ap); va_end(backup_ap); } // Truncate to available space if necessary if (p >= limit) { if (iter == 0) { continue; // Try again with larger buffer } else { p = limit - 1; } } // Add newline if necessary if (p == base || p[-1] != '\n') { *p++ = '\n'; } assert(p <= limit); fwrite(base, 1, p - base, file_); fflush(file_); if (base != buffer) { delete[] base; } break; } } }; } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_
2,503
23.54902
81
h
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/bench/util/env_posix_test_helper.h
// Copyright 2017 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation #ifndef STORAGE_LEVELDB_UTIL_ENV_POSIX_TEST_HELPER_H_ #define STORAGE_LEVELDB_UTIL_ENV_POSIX_TEST_HELPER_H_ namespace leveldb { class EnvPosixTest; // A helper for the POSIX Env to facilitate testing. class EnvPosixTestHelper { private: friend class EnvPosixTest; // Set the maximum number of read-only files that will be opened. // Must be called before creating an Env. static void SetReadOnlyFDLimit(int limit); // Set the maximum number of read-only files that will be mapped via mmap. // Must be called before creating an Env. static void SetReadOnlyMMapLimit(int limit); }; } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_ENV_POSIX_TEST_HELPER_H_
967
28.333333
81
h
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/bench/port/port_posix.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // See port_example.h for documentation for the following types/functions. #ifndef STORAGE_LEVELDB_PORT_PORT_POSIX_H_ #define STORAGE_LEVELDB_PORT_PORT_POSIX_H_ #undef PLATFORM_IS_LITTLE_ENDIAN #if defined(__APPLE__) #include <machine/endian.h> #if defined(__DARWIN_LITTLE_ENDIAN) && defined(__DARWIN_BYTE_ORDER) #define PLATFORM_IS_LITTLE_ENDIAN (__DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN) #endif #elif defined(OS_SOLARIS) #include <sys/isa_defs.h> #ifdef _LITTLE_ENDIAN #define PLATFORM_IS_LITTLE_ENDIAN true #else #define PLATFORM_IS_LITTLE_ENDIAN false #endif #elif defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || defined(OS_DRAGONFLYBSD) #include <sys/endian.h> #include <sys/types.h> #define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN) #elif defined(OS_HPUX) #define PLATFORM_IS_LITTLE_ENDIAN false #elif defined(OS_ANDROID) // Due to a bug in the NDK x86 <sys/endian.h> definition, // _BYTE_ORDER must be used instead of __BYTE_ORDER on Android. // See http://code.google.com/p/android/issues/detail?id=39824 #include <endian.h> #define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN) #else #include <endian.h> #endif #include <pthread.h> #if defined(HAVE_CRC32C) #include <crc32c/crc32c.h> #endif // defined(HAVE_CRC32C) #ifdef HAVE_SNAPPY #include <snappy.h> #endif // defined(HAVE_SNAPPY) #include "port/atomic_pointer.h" #include <stdint.h> #include <string> #ifndef PLATFORM_IS_LITTLE_ENDIAN #define PLATFORM_IS_LITTLE_ENDIAN (__BYTE_ORDER == __LITTLE_ENDIAN) #endif #if defined(__APPLE__) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD) // Use fsync() on platforms without fdatasync() #define fdatasync fsync #endif #if defined(OS_ANDROID) && __ANDROID_API__ < 9 // fdatasync() was only introduced in API level 9 on Android. Use fsync() // when targetting older platforms. #define fdatasync fsync #endif namespace leveldb { namespace port { static const bool kLittleEndian = PLATFORM_IS_LITTLE_ENDIAN; #undef PLATFORM_IS_LITTLE_ENDIAN class CondVar; class Mutex { public: Mutex(); ~Mutex(); void Lock(); void Unlock(); void AssertHeld() { } private: friend class CondVar; pthread_mutex_t mu_; // No copying Mutex(const Mutex &); void operator=(const Mutex &); }; class CondVar { public: explicit CondVar(Mutex *mu); ~CondVar(); void Wait(); void Signal(); void SignalAll(); private: pthread_cond_t cv_; Mutex *mu_; }; typedef pthread_once_t OnceType; #define LEVELDB_ONCE_INIT PTHREAD_ONCE_INIT extern void InitOnce(OnceType *once, void (*initializer)()); inline bool Snappy_Compress(const char *input, size_t length, ::std::string *output) { #ifdef HAVE_SNAPPY output->resize(snappy::MaxCompressedLength(length)); size_t outlen; snappy::RawCompress(input, length, &(*output)[0], &outlen); output->resize(outlen); return true; #endif // defined(HAVE_SNAPPY) return false; } inline bool Snappy_GetUncompressedLength(const char *input, size_t length, size_t *result) { #ifdef HAVE_SNAPPY return snappy::GetUncompressedLength(input, length, result); #else return false; #endif // defined(HAVE_SNAPPY) } inline bool Snappy_Uncompress(const char *input, size_t length, char *output) { #ifdef HAVE_SNAPPY return snappy::RawUncompress(input, length, output); #else return false; #endif // defined(HAVE_SNAPPY) } inline bool GetHeapProfile(void (*func)(void *, const char *, int), void *arg) { return false; } inline uint32_t AcceleratedCRC32C(uint32_t crc, const char *buf, size_t size) { #if defined(HAVE_CRC32C) return ::crc32c::Extend(crc, reinterpret_cast<const uint8_t *>(buf), size); #else return 0; #endif // defined(HAVE_CRC32C) } } // namespace port } // namespace leveldb #endif // STORAGE_LEVELDB_PORT_PORT_POSIX_H_
4,061
23.768293
98
h
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/bench/port/port_posix.cc
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 /* Copyright 2020, Intel Corporation */ #include "port/port_posix.h" #include <cstdlib> #include <stdio.h> #include <string.h> namespace leveldb { namespace port { static void PthreadCall(const char *label, int result) { if (result != 0) { fprintf(stderr, "pthread %s: %s\n", label, strerror(result)); abort(); } } Mutex::Mutex() { PthreadCall("init mutex", pthread_mutex_init(&mu_, NULL)); } Mutex::~Mutex() { PthreadCall("destroy mutex", pthread_mutex_destroy(&mu_)); } void Mutex::Lock() { PthreadCall("lock", pthread_mutex_lock(&mu_)); } void Mutex::Unlock() { PthreadCall("unlock", pthread_mutex_unlock(&mu_)); } CondVar::CondVar(Mutex *mu) : mu_(mu) { PthreadCall("init cv", pthread_cond_init(&cv_, NULL)); } CondVar::~CondVar() { PthreadCall("destroy cv", pthread_cond_destroy(&cv_)); } void CondVar::Wait() { PthreadCall("wait", pthread_cond_wait(&cv_, &mu_->mu_)); } void CondVar::Signal() { PthreadCall("signal", pthread_cond_signal(&cv_)); } void CondVar::SignalAll() { PthreadCall("broadcast", pthread_cond_broadcast(&cv_)); } void InitOnce(OnceType *once, void (*initializer)()) { PthreadCall("once", pthread_once(once, initializer)); } } // namespace port } // namespace leveldb
1,484
17.797468
81
cc
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/bench/port/thread_annotations.h
// Copyright (c) 2012 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation #ifndef STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_ #define STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_ // Some environments provide custom macros to aid in static thread-safety // analysis. Provide empty definitions of such macros unless they are already // defined. #ifndef EXCLUSIVE_LOCKS_REQUIRED #define EXCLUSIVE_LOCKS_REQUIRED(...) #endif #ifndef SHARED_LOCKS_REQUIRED #define SHARED_LOCKS_REQUIRED(...) #endif #ifndef LOCKS_EXCLUDED #define LOCKS_EXCLUDED(...) #endif #ifndef LOCK_RETURNED #define LOCK_RETURNED(x) #endif #ifndef LOCKABLE #define LOCKABLE #endif #ifndef SCOPED_LOCKABLE #define SCOPED_LOCKABLE #endif #ifndef EXCLUSIVE_LOCK_FUNCTION #define EXCLUSIVE_LOCK_FUNCTION(...) #endif #ifndef SHARED_LOCK_FUNCTION #define SHARED_LOCK_FUNCTION(...) #endif #ifndef EXCLUSIVE_TRYLOCK_FUNCTION #define EXCLUSIVE_TRYLOCK_FUNCTION(...) #endif #ifndef SHARED_TRYLOCK_FUNCTION #define SHARED_TRYLOCK_FUNCTION(...) #endif #ifndef UNLOCK_FUNCTION #define UNLOCK_FUNCTION(...) #endif #ifndef NO_THREAD_SAFETY_ANALYSIS #define NO_THREAD_SAFETY_ANALYSIS #endif #endif // STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
1,429
21.34375
81
h
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/bench/port/atomic_pointer.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // AtomicPointer provides storage for a lock-free pointer. // Platform-dependent implementation of AtomicPointer: // - If the platform provides a cheap barrier, we use it with raw pointers // - If <atomic> is present (on newer versions of gcc, it is), we use // a <atomic>-based AtomicPointer. However we prefer the memory // barrier based version, because at least on a gcc 4.4 32-bit build // on linux, we have encountered a buggy <atomic> implementation. // Also, some <atomic> implementations are much slower than a memory-barrier // based implementation (~16ns for <atomic> based acquire-load vs. ~1ns for // a barrier based acquire-load). // This code is based on atomicops-internals-* in Google's perftools: // http://code.google.com/p/google-perftools/source/browse/#svn%2Ftrunk%2Fsrc%2Fbase #ifndef PORT_ATOMIC_POINTER_H_ #define PORT_ATOMIC_POINTER_H_ #include <stdint.h> #ifdef LEVELDB_ATOMIC_PRESENT #include <atomic> #endif #ifdef OS_WIN #include <windows.h> #endif #ifdef __APPLE__ #include <libkern/OSAtomic.h> #endif #if defined(_M_X64) || defined(__x86_64__) #define ARCH_CPU_X86_FAMILY 1 #elif defined(_M_IX86) || defined(__i386__) || defined(__i386) #define ARCH_CPU_X86_FAMILY 1 #elif defined(__ARMEL__) #define ARCH_CPU_ARM_FAMILY 1 #elif defined(__aarch64__) #define ARCH_CPU_ARM64_FAMILY 1 #elif defined(__ppc__) || defined(__powerpc__) || defined(__powerpc64__) #define ARCH_CPU_PPC_FAMILY 1 #elif defined(__mips__) #define ARCH_CPU_MIPS_FAMILY 1 #endif namespace leveldb { namespace port { // Define MemoryBarrier() if available // Windows on x86 #if defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY) // windows.h already provides a MemoryBarrier(void) macro // http://msdn.microsoft.com/en-us/library/ms684208(v=vs.85).aspx #define LEVELDB_HAVE_MEMORY_BARRIER // Mac OS #elif defined(__APPLE__) inline void MemoryBarrier() { OSMemoryBarrier(); } #define LEVELDB_HAVE_MEMORY_BARRIER // Gcc on x86 #elif defined(ARCH_CPU_X86_FAMILY) && defined(__GNUC__) inline void MemoryBarrier() { // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering. __asm__ __volatile__("" : : : "memory"); } #define LEVELDB_HAVE_MEMORY_BARRIER // Sun Studio #elif defined(ARCH_CPU_X86_FAMILY) && defined(__SUNPRO_CC) inline void MemoryBarrier() { // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering. asm volatile("" : : : "memory"); } #define LEVELDB_HAVE_MEMORY_BARRIER // ARM Linux #elif defined(ARCH_CPU_ARM_FAMILY) && defined(__linux__) typedef void (*LinuxKernelMemoryBarrierFunc)(void); // The Linux ARM kernel provides a highly optimized device-specific memory // barrier function at a fixed memory address that is mapped in every // user-level process. // // This beats using CPU-specific instructions which are, on single-core // devices, un-necessary and very costly (e.g. ARMv7-A "dmb" takes more // than 180ns on a Cortex-A8 like the one on a Nexus One). Benchmarking // shows that the extra function call cost is completely negligible on // multi-core devices. // inline void MemoryBarrier() { (*(LinuxKernelMemoryBarrierFunc)0xffff0fa0)(); } #define LEVELDB_HAVE_MEMORY_BARRIER // ARM64 #elif defined(ARCH_CPU_ARM64_FAMILY) inline void MemoryBarrier() { asm volatile("dmb sy" : : : "memory"); } #define LEVELDB_HAVE_MEMORY_BARRIER // PPC #elif defined(ARCH_CPU_PPC_FAMILY) && defined(__GNUC__) inline void MemoryBarrier() { // TODO for some powerpc expert: is there a cheaper suitable variant? // Perhaps by having separate barriers for acquire and release ops. asm volatile("sync" : : : "memory"); } #define LEVELDB_HAVE_MEMORY_BARRIER // MIPS #elif defined(ARCH_CPU_MIPS_FAMILY) && defined(__GNUC__) inline void MemoryBarrier() { __asm__ __volatile__("sync" : : : "memory"); } #define LEVELDB_HAVE_MEMORY_BARRIER #endif // AtomicPointer built using platform-specific MemoryBarrier() #if defined(LEVELDB_HAVE_MEMORY_BARRIER) class AtomicPointer { private: void *rep_; public: AtomicPointer() { } explicit AtomicPointer(void *p) : rep_(p) { } inline void *NoBarrier_Load() const { return rep_; } inline void NoBarrier_Store(void *v) { rep_ = v; } inline void *Acquire_Load() const { void *result = rep_; MemoryBarrier(); return result; } inline void Release_Store(void *v) { MemoryBarrier(); rep_ = v; } }; // AtomicPointer based on <cstdatomic> #elif defined(LEVELDB_ATOMIC_PRESENT) class AtomicPointer { private: std::atomic<void *> rep_; public: AtomicPointer() { } explicit AtomicPointer(void *v) : rep_(v) { } inline void *Acquire_Load() const { return rep_.load(std::memory_order_acquire); } inline void Release_Store(void *v) { rep_.store(v, std::memory_order_release); } inline void *NoBarrier_Load() const { return rep_.load(std::memory_order_relaxed); } inline void NoBarrier_Store(void *v) { rep_.store(v, std::memory_order_relaxed); } }; // Atomic pointer based on sparc memory barriers #elif defined(__sparcv9) && defined(__GNUC__) class AtomicPointer { private: void *rep_; public: AtomicPointer() { } explicit AtomicPointer(void *v) : rep_(v) { } inline void *Acquire_Load() const { void *val; __asm__ __volatile__("ldx [%[rep_]], %[val] \n\t" "membar #LoadLoad|#LoadStore \n\t" : [val] "=r"(val) : [rep_] "r"(&rep_) : "memory"); return val; } inline void Release_Store(void *v) { __asm__ __volatile__("membar #LoadStore|#StoreStore \n\t" "stx %[v], [%[rep_]] \n\t" : : [rep_] "r"(&rep_), [v] "r"(v) : "memory"); } inline void *NoBarrier_Load() const { return rep_; } inline void NoBarrier_Store(void *v) { rep_ = v; } }; // Atomic pointer based on ia64 acq/rel #elif defined(__ia64) && defined(__GNUC__) class AtomicPointer { private: void *rep_; public: AtomicPointer() { } explicit AtomicPointer(void *v) : rep_(v) { } inline void *Acquire_Load() const { void *val; __asm__ __volatile__("ld8.acq %[val] = [%[rep_]] \n\t" : [val] "=r"(val) : [rep_] "r"(&rep_) : "memory"); return val; } inline void Release_Store(void *v) { __asm__ __volatile__("st8.rel [%[rep_]] = %[v] \n\t" : : [rep_] "r"(&rep_), [v] "r"(v) : "memory"); } inline void *NoBarrier_Load() const { return rep_; } inline void NoBarrier_Store(void *v) { rep_ = v; } }; // We have neither MemoryBarrier(), nor <atomic> #else #error Please implement AtomicPointer for this platform. #endif #undef LEVELDB_HAVE_MEMORY_BARRIER #undef ARCH_CPU_X86_FAMILY #undef ARCH_CPU_ARM_FAMILY #undef ARCH_CPU_ARM64_FAMILY #undef ARCH_CPU_PPC_FAMILY } // namespace port } // namespace leveldb #endif // PORT_ATOMIC_POINTER_H_
7,207
23.26936
84
h
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/bench/include/leveldb/status.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // A Status encapsulates the result of an operation. It may indicate success, // or it may indicate an error with an associated error message. // // Multiple threads can invoke const methods on a Status without // external synchronization, but if any of the threads may call a // non-const method, all threads accessing the same Status must use // external synchronization. #ifndef STORAGE_LEVELDB_INCLUDE_STATUS_H_ #define STORAGE_LEVELDB_INCLUDE_STATUS_H_ #include "leveldb/slice.h" #include <string> namespace leveldb { class Status { public: // Create a success status. Status() : state_(NULL) { } ~Status() { delete[] state_; } // Copy the specified status. Status(const Status &s); void operator=(const Status &s); // Return a success status. static Status OK() { return Status(); } // Return error status of an appropriate type. static Status NotFound(const Slice &msg, const Slice &msg2 = Slice()) { return Status(kNotFound, msg, msg2); } static Status Corruption(const Slice &msg, const Slice &msg2 = Slice()) { return Status(kCorruption, msg, msg2); } static Status NotSupported(const Slice &msg, const Slice &msg2 = Slice()) { return Status(kNotSupported, msg, msg2); } static Status InvalidArgument(const Slice &msg, const Slice &msg2 = Slice()) { return Status(kInvalidArgument, msg, msg2); } static Status IOError(const Slice &msg, const Slice &msg2 = Slice()) { return Status(kIOError, msg, msg2); } // Returns true iff the status indicates success. bool ok() const { return (state_ == NULL); } // Returns true iff the status indicates a NotFound error. bool IsNotFound() const { return code() == kNotFound; } // Returns true iff the status indicates a Corruption error. bool IsCorruption() const { return code() == kCorruption; } // Returns true iff the status indicates an IOError. bool IsIOError() const { return code() == kIOError; } // Returns true iff the status indicates a NotSupportedError. bool IsNotSupportedError() const { return code() == kNotSupported; } // Returns true iff the status indicates an InvalidArgument. bool IsInvalidArgument() const { return code() == kInvalidArgument; } // Return a string representation of this status suitable for printing. // Returns the string "OK" for success. std::string ToString() const; private: // OK status has a NULL state_. Otherwise, state_ is a new[] array // of the following form: // state_[0..3] == length of message // state_[4] == code // state_[5..] == message const char *state_; enum Code { kOk = 0, kNotFound = 1, kCorruption = 2, kNotSupported = 3, kInvalidArgument = 4, kIOError = 5 }; Code code() const { return (state_ == NULL) ? kOk : static_cast<Code>(state_[4]); } Status(Code code, const Slice &msg, const Slice &msg2); static const char *CopyState(const char *s); }; inline Status::Status(const Status &s) { state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_); } inline void Status::operator=(const Status &s) { // The following condition catches both aliasing (when this == &s), // and the common case where both s and *this are ok. if (state_ != s.state_) { delete[] state_; state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_); } } } // namespace leveldb #endif // STORAGE_LEVELDB_INCLUDE_STATUS_H_
3,658
23.231788
81
h
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/bench/include/leveldb/slice.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // Slice is a simple structure containing a pointer into some external // storage and a size. The user of a Slice must ensure that the slice // is not used after the corresponding external storage has been // deallocated. // // Multiple threads can invoke const methods on a Slice without // external synchronization, but if any of the threads may call a // non-const method, all threads accessing the same Slice must use // external synchronization. #ifndef STORAGE_LEVELDB_INCLUDE_SLICE_H_ #define STORAGE_LEVELDB_INCLUDE_SLICE_H_ #include <assert.h> #include <stddef.h> #include <string.h> #include <string> namespace leveldb { class Slice { public: // Create an empty slice. Slice() : data_(""), size_(0) { } // Create a slice that refers to d[0,n-1]. Slice(const char *d, size_t n) : data_(d), size_(n) { } // Create a slice that refers to the contents of "s" Slice(const std::string &s) : data_(s.data()), size_(s.size()) { } // Create a slice that refers to s[0,strlen(s)-1] Slice(const char *s) : data_(s), size_(strlen(s)) { } // Return a pointer to the beginning of the referenced data const char *data() const { return data_; } // Return the length (in bytes) of the referenced data size_t size() const { return size_; } // Return true iff the length of the referenced data is zero bool empty() const { return size_ == 0; } // Return the ith byte in the referenced data. // REQUIRES: n < size() char operator[](size_t n) const { assert(n < size()); return data_[n]; } // Change this slice to refer to an empty array void clear() { data_ = ""; size_ = 0; } // Drop the first "n" bytes from this slice. void remove_prefix(size_t n) { assert(n <= size()); data_ += n; size_ -= n; } // Return a string that contains the copy of the referenced data. std::string ToString() const { return std::string(data_, size_); } // Three-way comparison. Returns value: // < 0 iff "*this" < "b", // == 0 iff "*this" == "b", // > 0 iff "*this" > "b" int compare(const Slice &b) const; // Return true iff "x" is a prefix of "*this" bool starts_with(const Slice &x) const { return ((size_ >= x.size_) && (memcmp(data_, x.data_, x.size_) == 0)); } private: const char *data_; size_t size_; // Intentionally copyable }; inline bool operator==(const Slice &x, const Slice &y) { return ((x.size() == y.size()) && (memcmp(x.data(), y.data(), x.size()) == 0)); } inline bool operator!=(const Slice &x, const Slice &y) { return !(x == y); } inline int Slice::compare(const Slice &b) const { const size_t min_len = (size_ < b.size_) ? size_ : b.size_; int r = memcmp(data_, b.data_, min_len); if (r == 0) { if (size_ < b.size_) r = -1; else if (size_ > b.size_) r = +1; } return r; } } // namespace leveldb #endif // STORAGE_LEVELDB_INCLUDE_SLICE_H_
3,163
21.125874
81
h
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/bench/include/leveldb/env.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // An Env is an interface used by the leveldb implementation to access // operating system functionality like the filesystem etc. Callers // may wish to provide a custom Env object when opening a database to // get fine gain control; e.g., to rate limit file system operations. // // All Env implementations are safe for concurrent access from // multiple threads without any external synchronization. #ifndef STORAGE_LEVELDB_INCLUDE_ENV_H_ #define STORAGE_LEVELDB_INCLUDE_ENV_H_ #include "leveldb/status.h" #include <stdarg.h> #include <stdint.h> #include <string> #include <vector> namespace leveldb { class FileLock; class Logger; class RandomAccessFile; class SequentialFile; class Slice; class WritableFile; class Env { public: Env() { } virtual ~Env(); // Return a default environment suitable for the current operating // system. Sophisticated users may wish to provide their own Env // implementation instead of relying on this default environment. // // The result of Default() belongs to leveldb and must never be deleted. static Env *Default(); // Create a brand new sequentially-readable file with the specified name. // On success, stores a pointer to the new file in *result and returns OK. // On failure stores NULL in *result and returns non-OK. If the file does // not exist, returns a non-OK status. Implementations should return a // NotFound status when the file does not exist. // // The returned file will only be accessed by one thread at a time. virtual Status NewSequentialFile(const std::string &fname, SequentialFile **result) = 0; // Create a brand new random access read-only file with the // specified name. On success, stores a pointer to the new file in // *result and returns OK. On failure stores NULL in *result and // returns non-OK. If the file does not exist, returns a non-OK // status. Implementations should return a NotFound status when the file does // not exist. // // The returned file may be concurrently accessed by multiple threads. virtual Status NewRandomAccessFile(const std::string &fname, RandomAccessFile **result) = 0; // Create an object that writes to a new file with the specified // name. Deletes any existing file with the same name and creates a // new file. On success, stores a pointer to the new file in // *result and returns OK. On failure stores NULL in *result and // returns non-OK. // // The returned file will only be accessed by one thread at a time. virtual Status NewWritableFile(const std::string &fname, WritableFile **result) = 0; // Create an object that either appends to an existing file, or // writes to a new file (if the file does not exist to begin with). // On success, stores a pointer to the new file in *result and // returns OK. On failure stores NULL in *result and returns // non-OK. // // The returned file will only be accessed by one thread at a time. // // May return an IsNotSupportedError error if this Env does // not allow appending to an existing file. Users of Env (including // the leveldb implementation) must be prepared to deal with // an Env that does not support appending. virtual Status NewAppendableFile(const std::string &fname, WritableFile **result); // Returns true iff the named file exists. virtual bool FileExists(const std::string &fname) = 0; // Store in *result the names of the children of the specified directory. // The names are relative to "dir". // Original contents of *results are dropped. virtual Status GetChildren(const std::string &dir, std::vector<std::string> *result) = 0; // Delete the named file. virtual Status DeleteFile(const std::string &fname) = 0; // Create the specified directory. virtual Status CreateDir(const std::string &dirname) = 0; // Delete the specified directory. virtual Status DeleteDir(const std::string &dirname) = 0; // Store the size of fname in *file_size. virtual Status GetFileSize(const std::string &fname, uint64_t *file_size) = 0; // Rename file src to target. virtual Status RenameFile(const std::string &src, const std::string &target) = 0; // Lock the specified file. Used to prevent concurrent access to // the same db by multiple processes. On failure, stores NULL in // *lock and returns non-OK. // // On success, stores a pointer to the object that represents the // acquired lock in *lock and returns OK. The caller should call // UnlockFile(*lock) to release the lock. If the process exits, // the lock will be automatically released. // // If somebody else already holds the lock, finishes immediately // with a failure. I.e., this call does not wait for existing locks // to go away. // // May create the named file if it does not already exist. virtual Status LockFile(const std::string &fname, FileLock **lock) = 0; // Release the lock acquired by a previous successful call to LockFile. // REQUIRES: lock was returned by a successful LockFile() call // REQUIRES: lock has not already been unlocked. virtual Status UnlockFile(FileLock *lock) = 0; // Arrange to run "(*function)(arg)" once in a background thread. // // "function" may run in an unspecified thread. Multiple functions // added to the same Env may run concurrently in different threads. // I.e., the caller may not assume that background work items are // serialized. virtual void Schedule(void (*function)(void *arg), void *arg) = 0; // Start a new thread, invoking "function(arg)" within the new thread. // When "function(arg)" returns, the thread will be destroyed. virtual void StartThread(void (*function)(void *arg), void *arg) = 0; // *path is set to a temporary directory that can be used for testing. It may // or many not have just been created. The directory may or may not differ // between runs of the same process, but subsequent calls will return the // same directory. virtual Status GetTestDirectory(std::string *path) = 0; // Create and return a log file for storing informational messages. virtual Status NewLogger(const std::string &fname, Logger **result) = 0; // Returns the number of micro-seconds since some fixed point in time. Only // useful for computing deltas of time. virtual uint64_t NowMicros() = 0; // Sleep/delay the thread for the prescribed number of micro-seconds. virtual void SleepForMicroseconds(int micros) = 0; private: // No copying allowed Env(const Env &); void operator=(const Env &); }; // A file abstraction for reading sequentially through a file class SequentialFile { public: SequentialFile() { } virtual ~SequentialFile(); // Read up to "n" bytes from the file. "scratch[0..n-1]" may be // written by this routine. Sets "*result" to the data that was // read (including if fewer than "n" bytes were successfully read). // May set "*result" to point at data in "scratch[0..n-1]", so // "scratch[0..n-1]" must be live when "*result" is used. // If an error was encountered, returns a non-OK status. // // REQUIRES: External synchronization virtual Status Read(size_t n, Slice *result, char *scratch) = 0; // Skip "n" bytes from the file. This is guaranteed to be no // slower that reading the same data, but may be faster. // // If end of file is reached, skipping will stop at the end of the // file, and Skip will return OK. // // REQUIRES: External synchronization virtual Status Skip(uint64_t n) = 0; private: // No copying allowed SequentialFile(const SequentialFile &); void operator=(const SequentialFile &); }; // A file abstraction for randomly reading the contents of a file. class RandomAccessFile { public: RandomAccessFile() { } virtual ~RandomAccessFile(); // Read up to "n" bytes from the file starting at "offset". // "scratch[0..n-1]" may be written by this routine. Sets "*result" // to the data that was read (including if fewer than "n" bytes were // successfully read). May set "*result" to point at data in // "scratch[0..n-1]", so "scratch[0..n-1]" must be live when // "*result" is used. If an error was encountered, returns a non-OK // status. // // Safe for concurrent use by multiple threads. virtual Status Read(uint64_t offset, size_t n, Slice *result, char *scratch) const = 0; private: // No copying allowed RandomAccessFile(const RandomAccessFile &); void operator=(const RandomAccessFile &); }; // A file abstraction for sequential writing. The implementation // must provide buffering since callers may append small fragments // at a time to the file. class WritableFile { public: WritableFile() { } virtual ~WritableFile(); virtual Status Append(const Slice &data) = 0; virtual Status Close() = 0; virtual Status Flush() = 0; virtual Status Sync() = 0; private: // No copying allowed WritableFile(const WritableFile &); void operator=(const WritableFile &); }; // An interface for writing log messages. class Logger { public: Logger() { } virtual ~Logger(); // Write an entry to the log file with the specified format. virtual void Logv(const char *format, va_list ap) = 0; private: // No copying allowed Logger(const Logger &); void operator=(const Logger &); }; // Identifies a locked file. class FileLock { public: FileLock() { } virtual ~FileLock(); private: // No copying allowed FileLock(const FileLock &); void operator=(const FileLock &); }; // Log the specified data to *info_log if info_log is non-NULL. extern void Log(Logger *info_log, const char *format, ...) #if defined(__GNUC__) || defined(__clang__) __attribute__((__format__(__printf__, 2, 3))) #endif ; // A utility routine: write "data" to the named file. Status WriteStringToFile(Env *env, const Slice &data, const std::string &fname); // A utility routine: read contents of named file into *data Status ReadFileToString(Env *env, const std::string &fname, std::string *data); // An implementation of Env that forwards all calls to another Env. // May be useful to clients who wish to override just part of the // functionality of another Env. class EnvWrapper : public Env { public: // Initialize an EnvWrapper that delegates all calls to *t explicit EnvWrapper(Env *t) : target_(t) { } virtual ~EnvWrapper(); // Return the target to which this Env forwards all calls Env *target() const { return target_; } // The following text is boilerplate that forwards all methods to target() Status NewSequentialFile(const std::string &f, SequentialFile **r) { return target_->NewSequentialFile(f, r); } Status NewRandomAccessFile(const std::string &f, RandomAccessFile **r) { return target_->NewRandomAccessFile(f, r); } Status NewWritableFile(const std::string &f, WritableFile **r) { return target_->NewWritableFile(f, r); } Status NewAppendableFile(const std::string &f, WritableFile **r) { return target_->NewAppendableFile(f, r); } bool FileExists(const std::string &f) { return target_->FileExists(f); } Status GetChildren(const std::string &dir, std::vector<std::string> *r) { return target_->GetChildren(dir, r); } Status DeleteFile(const std::string &f) { return target_->DeleteFile(f); } Status CreateDir(const std::string &d) { return target_->CreateDir(d); } Status DeleteDir(const std::string &d) { return target_->DeleteDir(d); } Status GetFileSize(const std::string &f, uint64_t *s) { return target_->GetFileSize(f, s); } Status RenameFile(const std::string &s, const std::string &t) { return target_->RenameFile(s, t); } Status LockFile(const std::string &f, FileLock **l) { return target_->LockFile(f, l); } Status UnlockFile(FileLock *l) { return target_->UnlockFile(l); } void Schedule(void (*f)(void *), void *a) { return target_->Schedule(f, a); } void StartThread(void (*f)(void *), void *a) { return target_->StartThread(f, a); } virtual Status GetTestDirectory(std::string *path) { return target_->GetTestDirectory(path); } virtual Status NewLogger(const std::string &fname, Logger **result) { return target_->NewLogger(fname, result); } uint64_t NowMicros() { return target_->NowMicros(); } void SleepForMicroseconds(int micros) { target_->SleepForMicroseconds(micros); } private: Env *target_; }; } // namespace leveldb #endif // STORAGE_LEVELDB_INCLUDE_ENV_H_
12,539
30.827411
93
h
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/utils/jenkins/scripts/createNamespace.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2019-2020, Intel Corporation # createNamespace.sh - Remove old namespaces and create new set -e # region used for dax namespaces. DEV_DAX_R=0x0000 # region used for fsdax namespaces. FS_DAX_R=0x0001 CREATE_DAX=false CREATE_PMEM=false MOUNT_POINT="/mnt/pmem0" SIZE=100G function usage() { echo "" echo "Script for creating namespaces, mountpoint, and configuring file permissions." echo "Usage: $(basename $1) [-h|--help] [-d|--dax] [-p|--pmem] [--size]" echo "-h, --help Print help and exit" echo "-d, --dax Create dax device." echo "-p, --pmem Create fsdax device and create mountpoint." echo "--size Set size for namespaces [default: $SIZE]" } function clear_namespaces() { scriptdir=$(readlink -f $(dirname ${BASH_SOURCE[0]})) $scriptdir/removeNamespaces.sh } function create_devdax() { local align=$1 local size=$2 local cmd="sudo ndctl create-namespace --mode devdax -a ${align} -s ${size} -r ${DEV_DAX_R} -f" result=$(${cmd}) if [ $? -ne 0 ]; then exit 1; fi jq -r '.daxregion.devices[].chardev' <<< $result } function create_fsdax() { local size=$1 local cmd="sudo ndctl create-namespace --mode fsdax -s ${size} -r ${FS_DAX_R} -f" result=$(${cmd}) if [ $? -ne 0 ]; then exit 1; fi jq -r '.blockdev' <<< $result } while getopts ":dhp-:" optchar; do case "${optchar}" in -) case "$OPTARG" in help) usage $0 && exit 0 ;; dax) CREATE_DAX=true ;; pmem) CREATE_PMEM=true ;; size=*) SIZE="${OPTARG#*=}" ;; *) echo "Invalid argument '$OPTARG'"; usage $0 && exit 1 ;; esac ;; p) CREATE_PMEM=true ;; d) CREATE_DAX=true ;; h) usage $0 && exit 0 ;; *) echo "Invalid argument '$OPTARG'"; usage $0 && exit 1 ;; esac done # There is no default test cofiguration in this script. Configurations has to be specified. if ! $CREATE_DAX && ! $CREATE_PMEM; then echo "" echo "ERROR: No config type selected. Please select one or more config types." exit 1 fi # Remove existing namespaces. clear_namespaces # Creating namespaces. trap 'echo "ERROR: Failed to create namespaces"; clear_namespaces; exit 1' ERR SIGTERM SIGABRT if $CREATE_DAX; then create_devdax 4k $SIZE fi if $CREATE_PMEM; then pmem_name=$(create_fsdax $SIZE) fi # Creating mountpoint. trap 'echo "ERROR: Failed to create mountpoint"; clear_namespaces; exit 1' ERR SIGTERM SIGABRT if $CREATE_PMEM; then if [ ! -d "$MOUNT_POINT" ]; then sudo mkdir $MOUNT_POINT fi if ! grep -qs "$MOUNT_POINT " /proc/mounts; then sudo mkfs.ext4 -F /dev/$pmem_name sudo mount -o dax /dev/$pmem_name $MOUNT_POINT fi fi # Changing file permissions. sudo chmod 777 $MOUNT_POINT || true sudo chmod 777 /dev/dax* || true sudo chmod a+rw /sys/bus/nd/devices/region*/deep_flush sudo chmod +r /sys/bus/nd/devices/ndbus*/region*/resource sudo chmod +r /sys/bus/nd/devices/ndbus*/region*/dax*/resource # Print created namespaces. ndctl list -X | jq -r '.[] | select(.mode=="devdax") | [.daxregion.devices[].chardev, "align: "+(.daxregion.align/1024|tostring+"k"), "size: "+(.size/1024/1024/1024|tostring+"G") ]' ndctl list | jq -r '.[] | select(.mode=="fsdax") | [.blockdev, "size: "+(.size/1024/1024/1024|tostring+"G") ]'
3,239
26.457627
181
sh
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/utils/jenkins/scripts/removeNamespaces.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2019-2020, Intel Corporation # removeNamespaces.sh - clear all existing namespaces. set -e MOUNT_POINT="/mnt/pmem*" sudo umount $MOUNT_POINT || true namespace_names=$(ndctl list -X | jq -r '.[].dev') for n in $namespace_names do sudo ndctl clear-errors $n -v done sudo ndctl disable-namespace all || true sudo ndctl destroy-namespace all || true
424
20.25
54
sh
null
NearPMSW-main/nearpmMDsync/logging/pmemkv-bench/utils/jenkins/scripts/common.sh
#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2019-2020, Intel Corporation # common.sh - contains bash functions used in all jenkins pipelines. set -o pipefail scriptdir=$(readlink -f $(dirname ${BASH_SOURCE[0]})) function system_info { echo "********** system-info **********" cat /etc/os-release | grep -oP "PRETTY_NAME=\K.*" uname -r echo "libndctl: $(pkg-config --modversion libndctl || echo 'libndctl not found')" echo "libfabric: $(pkg-config --modversion libfabric || echo 'libfabric not found')" echo "libpmem: $(pkg-config --modversion libpmem || echo 'libpmem not found')" echo "libpmemobj: $(pkg-config --modversion libpmemobj || echo 'libpmemobj not found')" echo "libpmemobj++: $(pkg-config --modversion libpmemobj++ || echo 'libpmemobj++ not found')" echo "memkind: $(pkg-config --modversion memkind || echo 'memkind not found')" echo "TBB : $(pkg-config --modversion TBB || echo 'TBB not found')" echo "valgrind: $(pkg-config --modversion valgrind || echo 'valgrind not found')" echo "**********memory-info**********" sudo ipmctl show -dimm || true sudo ipmctl show -topology || true echo "**********list-existing-namespaces**********" sudo ndctl list -M -N echo "**********installed-packages**********" zypper se --installed-only 2>/dev/null || true apt list --installed 2>/dev/null || true yum list installed 2>/dev/null || true echo "**********/proc/cmdline**********" cat /proc/cmdline echo "**********/proc/modules**********" cat /proc/modules echo "**********/proc/cpuinfo**********" cat /proc/cpuinfo echo "**********/proc/meminfo**********" cat /proc/meminfo eco "**********/proc/swaps**********" cat /proc/swaps echo "**********/proc/version**********" cat /proc/version echo "**********check-updates**********" sudo zypper list-updates 2>/dev/null || true sudo apt-get update 2>/dev/null || true ; apt upgrade --dry-run 2>/dev/null || true sudo dnf check-update 2>/dev/null || true echo "**********list-enviroment**********" env } function set_warning_message { local info_addr=$1 sudo bash -c "cat > /etc/motd <<EOL ___ ___ / \ / \ HELLO! \_ \ / __/ THIS NODE IS CONNECTED TO PMEMKV JENKINS _\ \ / /__ THERE ARE TESTS CURRENTLY RUNNING ON THIS MACHINE \___ \____/ __/ PLEASE GO AWAY :) \_ _/ | @ @ \_ | FOR MORE INFORMATION GO: ${info_addr} _/ /\ /o) (o/\ \_ \_____/ / \____/ EOL" } function disable_warning_message { sudo rm /etc/motd || true } # Check host linux distribution and return distro name function check_distro { distro=$(cat /etc/os-release | grep -e ^NAME= | cut -c6-) && echo "${distro//\"}" }
2,808
34.556962
94
sh
null
NearPMSW-main/nearpmMDsync/logging/include/txopt.cc
#include "txopt.h" #include <string.h> // source: http://stackoverflow.com/questions/1919183/how-to-allocate-and-free-aligned-memory-in-c void * aligned_malloc(int size) { void *mem = malloc(size+64+sizeof(void*)); void **ptr = (void**)((uintptr_t)((uint64_t)mem+64+uint64_t(sizeof(void*))) & ~(64-1)); ptr[-1] = mem; return ptr; } // source: http://stackoverflow.com/questions/1640258/need-a-fast-random-generator-for-c static unsigned long x=123456789, y=362436069, z=521288629; unsigned long xorshf96() { //period 2^96-1 unsigned long t; x ^= x << 16; x ^= x >> 5; x ^= x << 1; t = x; x = y; y = z; z = t ^ x ^ y; return z; } //volatile void s_fence(); // Flush the selected addresses //volatile void metadata_cache_flush(uint64_t addr, unsigned size); //volatile void cache_flush(uint64_t addr, unsigned size); //volatile void flush_caches(uint64_t addr, unsigned size); // Flush the one cacheline //volatile inline void metadata_flush(uint64_t addr); //volatile inline void cache_flush(uint64_t addr); // Flush the whole caches //volatile inline void metadata_flush(); //volatile inline void cache_flush(); //volatile void TX_OPT(uint64_t addr, unsigned size); // Deduplication and Compression are transaparent /* class Dedup { public: }; class Compress { public: } */ uint64_t CounterAtomic::currAtomicAddr = COUNTER_ATOMIC_VADDR; //uint64_t CounterAtomic::currCacheFlushAddr = CACHE_FLUSH_VADDR; //uint64_t CounterAtomic::currCounterCacheFlushAddr = COUNTER_CACHE_FLUSH_VADDR; void* CounterAtomic::counter_atomic_malloc(unsigned _size) { return (void*)getNextAtomicAddr(_size); } volatile void metadata_cache_flush(void* addr, unsigned size) { int num_cache_line = size / CACHE_LINE_SIZE; if ((uint64_t)addr % CACHE_LINE_SIZE) num_cache_line++; for (int i = 0; i < num_cache_line; ++i) *((volatile uint64_t*)METADATA_CACHE_FLUSH_VADDR) = (uint64_t)addr + i * CACHE_LINE_SIZE; } volatile void cache_flush(void* addr, unsigned size) { int num_cache_line = size / CACHE_LINE_SIZE; if ((uint64_t)addr % CACHE_LINE_SIZE) num_cache_line++; for (int i = 0; i < num_cache_line; ++i) *((volatile uint64_t*)CACHE_FLUSH_VADDR) = (uint64_t)addr + i * CACHE_LINE_SIZE; } volatile void flush_caches(void* addr, unsigned size) { cache_flush(addr, size); metadata_cache_flush(addr, size); } // OPT with both data and addr ready volatile void OPT(void* opt_obj, bool reg, void* pmemaddr, void* data, unsigned size) { // fprintf(stderr, "size: %u\n", size); opt_packet_t opt_packet; opt_packet.opt_obj = opt_obj; //opt_packet.seg_id = i; //opt_packet.pmemaddr = (void*)((uint64_t)(pmemaddr) + i * CACHE_LINE_SIZE); opt_packet.pmemaddr = pmemaddr; //opt_packet.data_ptr = (void*)((uint64_t)(data) + i * CACHE_LINE_SIZE); //opt_packet.data_val = 0; opt_packet.size = size; opt_packet.type = (!reg ? FLAG_OPT : FLAG_OPT_REG); //opt_packet.type = FLAG_OPT; *((opt_packet_t*)TXOPT_VADDR) = opt_packet; //*((opt_packet_t*)TXOPT_VADDR) = (opt_packet_t){opt_obj, pmemaddr, size, FLAG_OPT_DATA}; } // OPT with both data (int) and addr ready volatile void OPT_VAL(void* opt_obj, bool reg, void* pmemaddr, int data_val) { opt_packet_t opt_packet; opt_packet.opt_obj = opt_obj; opt_packet.pmemaddr = pmemaddr; //opt_packet.data_ptr = 0; //opt_packet.data_val = data_val; opt_packet.size = sizeof(int); opt_packet.type = (!reg ? FLAG_OPT_VAL : FLAG_OPT_VAL_REG); //opt_packet.type = FLAG_OPT; *((opt_packet_t*)TXOPT_VADDR) = opt_packet; } // OPT with only data ready volatile void OPT_DATA(void* opt_obj, bool reg, void* data, unsigned size) { opt_packet_t opt_packet; opt_packet.opt_obj = opt_obj; opt_packet.pmemaddr = 0; //opt_packet.data_ptr = (void*)((uint64_t)(data) + i * CACHE_LINE_SIZE); //opt_packet.data_val = 0; opt_packet.size = size; opt_packet.type = (!reg ? FLAG_OPT_DATA : FLAG_OPT_DATA_REG); //opt_packet.type = FLAG_OPT; *((opt_packet_t*)TXOPT_VADDR) = opt_packet; } // OPT with only addr ready volatile void OPT_ADDR(void* opt_obj, bool reg, void* pmemaddr, unsigned size) { opt_packet_t opt_packet; opt_packet.opt_obj = opt_obj; opt_packet.pmemaddr = pmemaddr; //opt_packet.data_ptr = 0; //opt_packet.data_val = 0; opt_packet.size = size; opt_packet.type = (!reg ? FLAG_OPT_ADDR : FLAG_OPT_ADDR_REG); //opt_packet.type = FLAG_OPT; *((opt_packet_t*)TXOPT_VADDR) = opt_packet; } // OPT with only data (int) ready volatile void OPT_DATA_VAL(void* opt_obj, bool reg, int data_val) { opt_packet_t opt_packet; opt_packet.opt_obj = opt_obj; opt_packet.pmemaddr = 0; //opt_packet.data_ptr = 0; //opt_packet.data_val = data_val; opt_packet.size = sizeof(int); opt_packet.type = (!reg ? FLAG_OPT_DATA_VAL : FLAG_OPT_DATA_VAL_REG); //opt_packet.type = FLAG_OPT; *((opt_packet_t*)TXOPT_VADDR) = opt_packet; } volatile void OPT_START(void* opt_obj) { opt_packet_t opt_packet; opt_packet.opt_obj = opt_obj; opt_packet.type = FLAG_OPT_START; } volatile void s_fence() { std::atomic_thread_fence(std::memory_order_acq_rel); } CounterAtomic::CounterAtomic() { val_addr = getNextAtomicAddr(CACHE_LINE_SIZE); } CounterAtomic::CounterAtomic(uint64_t _val) { val_addr = getNextAtomicAddr(CACHE_LINE_SIZE); *((volatile uint64_t*)val_addr) = _val; } CounterAtomic::CounterAtomic(bool _val) { *((volatile uint64_t*)val_addr) = uint64_t(_val); val_addr = getNextAtomicAddr(CACHE_LINE_SIZE); } uint64_t CounterAtomic::getValue() { return *((volatile uint64_t*)val_addr); } uint64_t CounterAtomic::getPtr() { return val_addr; } CounterAtomic& CounterAtomic::operator=(uint64_t _val) { *((volatile uint64_t*)val_addr) = _val; return *this; } CounterAtomic& CounterAtomic::operator+(uint64_t _val) { *((volatile uint64_t*)val_addr) += _val; return *this; } CounterAtomic& CounterAtomic::operator++() { uint64_t val = *((volatile uint64_t*)val_addr); val++; *((volatile uint64_t*)val_addr) = val; return *this; } CounterAtomic& CounterAtomic::operator--() { uint64_t val = *((volatile uint64_t*)val_addr); val--; *((volatile uint64_t*)val_addr) = val; return *this; } CounterAtomic& CounterAtomic::operator-(uint64_t _val) { *((volatile uint64_t*)val_addr) -= _val; return *this; } bool CounterAtomic::operator==(uint64_t _val) { return *((volatile uint64_t*)val_addr) == _val; } bool CounterAtomic::operator!=(uint64_t _val) { return *((volatile uint64_t*)val_addr) != _val; } uint64_t CounterAtomic::getNextAtomicAddr(unsigned _size) { if (currAtomicAddr + _size >= COUNTER_ATOMIC_VADDR + NUM_COUNTER_ATOMIC_PAGE*4*1024) { printf("@@not enough counter atomic space, current addr=%lu, size=%u\n", currAtomicAddr, _size); exit(0); } currAtomicAddr += _size; return (currAtomicAddr - _size); } volatile void CounterAtomic::statOutput() { *((volatile uint64_t*) (STATUS_OUTPUT_VADDR))= 0; } volatile void CounterAtomic::initCounterCache() { *((volatile uint64_t*) (INIT_METADATA_CACHE_VADDR))= 0; }
6,969
24.345455
98
cc
null
NearPMSW-main/nearpmMDsync/logging/include/txopt.h
// The starting address of the selected counter_atomic writes #ifndef TXOPT_H #define TXOPT_H #define COUNTER_ATOMIC_VADDR (4096UL*1024*1024) #define NUM_COUNTER_ATOMIC_PAGE 262144 // The starting address of the flush cache instruction #define CACHE_FLUSH_VADDR (4096UL*1024*1024+4*NUM_COUNTER_ATOMIC_PAGE*1024) // The starting address of the flush metadata cache instruction #define METADATA_CACHE_FLUSH_VADDR (4096UL*1024*1024+(4*NUM_COUNTER_ATOMIC_PAGE+4)*1024) #define STATUS_OUTPUT_VADDR (METADATA_CACHE_FLUSH_VADDR + 1024UL) #define INIT_METADATA_CACHE_VADDR (STATUS_OUTPUT_VADDR + 1024UL) #define TXOPT_VADDR (INIT_METADATA_CACHE_VADDR+1024UL) #define CACHE_LINE_SIZE 64UL #include <vector> #include <deque> #include <cstdlib> #include <cstdint> #include <atomic> #include <stdio.h> #include <cassert> enum opt_flag { FLAG_OPT, FLAG_OPT_VAL, FLAG_OPT_ADDR, FLAG_OPT_DATA, FLAG_OPT_DATA_VAL, /* register no execute */ FLAG_OPT_REG, FLAG_OPT_VAL_REG, FLAG_OPT_ADDR_REG, FLAG_OPT_DATA_REG, FLAG_OPT_DATA_VAL_REG, /* execute registered OPT */ FLAG_OPT_START }; struct opt_t { //int pid; int obj_id; }; // Fields in the OPT packet // Used by both SW and HW struct opt_packet_t { void* opt_obj; void* pmemaddr; //void* data_ptr; //int seg_id; //int data_val; unsigned size; opt_flag type; }; // OPT with both data and addr ready volatile void OPT(void* opt_obj, bool reg, void* pmemaddr, void* data, unsigned size); //#define OPT(opt_obj, pmemaddr, data, size) \ // *((opt_packet_t*)TXOPT_VADDR) = (opt_packet_t){opt_obj, pmemaddr, size, FLAG_OPT_DATA}; // OPT with both data (int) and addr ready volatile void OPT_VAL(void* opt_obj, bool reg, void* pmemaddr, int data_val); // OPT with only data ready volatile void OPT_DATA(void* opt_obj, bool reg, void* data, unsigned size); // OPT with only addr ready volatile void OPT_ADDR(void* opt_obj, bool reg, void* pmemaddr, unsigned size); // OPT with only data (int) ready volatile void OPT_DATA_VAL(void* opt_obj, bool reg, int data_val); // Begin OPT operation volatile void OPT_START(void* opt_obj); // store barrier volatile void s_fence(); // flush both metadata cache and data cache volatile void flush_caches(void* addr, unsigned size); // flush data cache only volatile void cache_flush(void* addr, unsigned size); // flush metadata cache only volatile void metadata_cache_flush(void* addr, unsigned size); // malloc that is cache-line aligned void *aligned_malloc(int size); class CounterAtomic { public: static void* counter_atomic_malloc(unsigned _size); // size is num of bytes static volatile void statOutput(); static volatile void initCounterCache(); uint64_t getValue(); uint64_t getPtr(); CounterAtomic(); CounterAtomic(uint64_t _val); CounterAtomic(bool _val); CounterAtomic& operator=(uint64_t _val); CounterAtomic& operator+(uint64_t _val); CounterAtomic& operator++(); CounterAtomic& operator--(); CounterAtomic& operator-(uint64_t _val); bool operator==(uint64_t _val); bool operator!=(uint64_t _val); private: void init(); static uint64_t getNextAtomicAddr(unsigned _size); static uint64_t getNextCacheFlushAddr(unsigned _size); //static uint64_t getNextPersistBarrierAddr(unsigned _size); static uint64_t getNextCounterCacheFlushAddr(unsigned _size); static uint64_t currAtomicAddr; static uint64_t currCacheFlushAddr; //static uint64_t currPersistentBarrierAddr; static uint64_t currCounterCacheFlushAddr; /* static bool hasAllocateCacheFlush; static bool hasAllocateCounterCacheFlush; static bool hasAllocatePersistBarrier; */ //uint64_t val; uint64_t val_addr = 0; }; #endif
3,665
26.155556
90
h
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/crc32c.h
#ifndef CRC32C_H #define CRC32C_H typedef uint32_t (*crc_func)(uint32_t crc, const void *buf, size_t len); crc_func crc32c; void crc32c_init(void); #endif /* CRC32C_H */
179
17
72
h
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/slabs.h
/* * Copyright 2018 Lenovo * * Licensed under the BSD-3 license. see LICENSE.Lenovo.txt for full text */ /* * Note: * Codes enclosed in `#ifdef PSLAB' and `#endif' are added by Lenovo for * persistent memory support */ /* slabs memory allocation */ #ifndef SLABS_H #define SLABS_H /** Init the subsystem. 1st argument is the limit on no. of bytes to allocate, 0 if no limit. 2nd argument is the growth factor; each slab will use a chunk size equal to the previous slab's chunk size times this factor. 3rd argument specifies if the slab allocator should allocate all memory up front (if true), or allocate memory in chunks as it is needed (if false) */ void slabs_init(const size_t limit, const double factor, const bool prealloc, const uint32_t *slab_sizes); /** Call only during init. Pre-allocates all available memory */ void slabs_prefill_global(void); #ifdef PSLAB int slabs_dump_sizes(uint32_t *slab_sizes, int max); void slabs_prefill_global_from_pmem(void); void slabs_update_policy(void); int do_slabs_renewslab(const unsigned int id, char *ptr); void do_slab_realloc(item *it, unsigned int id); void do_slabs_free(void *ptr, const size_t size, unsigned int id); #endif /** * Given object size, return id to use when allocating/freeing memory for object * 0 means error: can't store such a large object */ unsigned int slabs_clsid(const size_t size); /** Allocate object of given length. 0 on error */ /*@null@*/ #define SLABS_ALLOC_NO_NEWPAGE 1 void *slabs_alloc(const size_t size, unsigned int id, uint64_t *total_bytes, unsigned int flags); /** Free previously allocated object */ void slabs_free(void *ptr, size_t size, unsigned int id); /** Adjust the stats for memory requested */ void slabs_adjust_mem_requested(unsigned int id, size_t old, size_t ntotal); /** Adjust global memory limit up or down */ bool slabs_adjust_mem_limit(size_t new_mem_limit); /** Return a datum for stats in binary protocol */ bool get_stats(const char *stat_type, int nkey, ADD_STAT add_stats, void *c); typedef struct { unsigned int chunks_per_page; unsigned int chunk_size; long int free_chunks; long int total_pages; } slab_stats_automove; void fill_slab_stats_automove(slab_stats_automove *am); unsigned int global_page_pool_size(bool *mem_flag); /** Fill buffer with stats */ /*@null@*/ void slabs_stats(ADD_STAT add_stats, void *c); /* Hints as to freespace in slab class */ unsigned int slabs_available_chunks(unsigned int id, bool *mem_flag, uint64_t *total_bytes, unsigned int *chunks_perslab); void slabs_mlock(void); void slabs_munlock(void); int start_slab_maintenance_thread(void); void stop_slab_maintenance_thread(void); enum reassign_result_type { REASSIGN_OK=0, REASSIGN_RUNNING, REASSIGN_BADCLASS, REASSIGN_NOSPARE, REASSIGN_SRC_DST_SAME }; enum reassign_result_type slabs_reassign(int src, int dst); void slabs_rebalancer_pause(void); void slabs_rebalancer_resume(void); #ifdef EXTSTORE void slabs_set_storage(void *arg); #endif #endif
3,024
31.180851
122
h
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/pslab.h
/* * Copyright 2018 Lenovo * * Licensed under the BSD-3 license. see LICENSE.Lenovo.txt for full text */ #ifndef PSLAB_H #define PSLAB_H #include <libpmem.h> #define PSLAB_POLICY_DRAM 0 #define PSLAB_POLICY_PMEM 1 #define PSLAB_POLICY_BALANCED 2 #define pmem_member_persist(p, m) \ pmem_persist(&(p)->m, sizeof ((p)->m)) #define pmem_member_flush(p, m) \ pmem_flush(&(p)->m, sizeof ((p)->m)) #define pmem_flush_from(p, t, m) \ pmem_flush(&(p)->m, sizeof (t) - offsetof(t, m)); #define pslab_item_data_persist(it) pmem_persist((it)->data, ITEM_dtotal(it) #define pslab_item_data_flush(it) pmem_flush((it)->data, ITEM_dtotal(it)) int pslab_create(char *pool_name, uint32_t pool_size, uint32_t slab_size, uint32_t *slabclass_sizes, int slabclass_num); int pslab_pre_recover(char *name, uint32_t *slab_sizes, int slab_max, int slab_page_size); int pslab_do_recover(void); time_t pslab_process_started(time_t process_started); void pslab_update_flushtime(uint32_t time); void pslab_use_slab(void *p, int id, unsigned int size); void *pslab_get_free_slab(void *slab); int pslab_contains(char *p); uint64_t pslab_addr2off(void *addr); extern bool pslab_force; #endif
1,186
30.236842
90
h
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/config.h
/* config.h. Generated from config.h.in by configure. */ /* config.h.in. Generated from configure.ac by autoheader. */ /* Set to nonzero if you want to include DTRACE */ /* #undef ENABLE_DTRACE */ /* Set to nonzero if you want to include SASL */ /* #undef ENABLE_SASL */ /* Set to nonzero if you want to enable a SASL pwdb */ /* #undef ENABLE_SASL_PWDB */ /* machine is bigendian */ /* #undef ENDIAN_BIG */ /* machine is littleendian */ #define ENDIAN_LITTLE 1 /* Set to nonzero if you want to enable extstorextstore */ /* #undef EXTSTORE */ /* Define to 1 if support accept4 */ #define HAVE_ACCEPT4 1 /* Define to 1 if you have the `clock_gettime' function. */ #define HAVE_CLOCK_GETTIME 1 /* Define this if you have an implementation of drop_privileges() */ /* #undef HAVE_DROP_PRIVILEGES */ /* Define this if you have an implementation of drop_worker_privileges() */ /* #undef HAVE_DROP_WORKER_PRIVILEGES */ /* GCC 64bit Atomics available */ /* #undef HAVE_GCC_64ATOMICS */ /* GCC Atomics available */ #define HAVE_GCC_ATOMICS 1 /* Define to 1 if support getopt_long */ #define HAVE_GETOPT_LONG 1 /* Define to 1 if you have the `getpagesizes' function. */ /* #undef HAVE_GETPAGESIZES */ /* Have ntohll */ /* #undef HAVE_HTONLL */ /* Define to 1 if you have the <inttypes.h> header file. */ #define HAVE_INTTYPES_H 1 /* Define to 1 if you have the `memcntl' function. */ /* #undef HAVE_MEMCNTL */ /* Define to 1 if you have the <memory.h> header file. */ #define HAVE_MEMORY_H 1 /* Define to 1 if you have the `mlockall' function. */ #define HAVE_MLOCKALL 1 /* Define to 1 if you have the `pledge' function. */ /* #undef HAVE_PLEDGE */ /* we have sasl_callback_ft */ /* #undef HAVE_SASL_CALLBACK_FT */ /* Set to nonzero if your SASL implementation supports SASL_CB_GETCONF */ /* #undef HAVE_SASL_CB_GETCONF */ /* Define to 1 if you have the <sasl/sasl.h> header file. */ /* #undef HAVE_SASL_SASL_H */ /* Define to 1 if you have the `setppriv' function. */ /* #undef HAVE_SETPPRIV */ /* Define to 1 if you have the `sigignore' function. */ #define HAVE_SIGIGNORE 1 /* Define to 1 if stdbool.h conforms to C99. */ #define HAVE_STDBOOL_H 1 /* Define to 1 if you have the <stdint.h> header file. */ #define HAVE_STDINT_H 1 /* Define to 1 if you have the <stdlib.h> header file. */ #define HAVE_STDLIB_H 1 /* Define to 1 if you have the <strings.h> header file. */ #define HAVE_STRINGS_H 1 /* Define to 1 if you have the <string.h> header file. */ #define HAVE_STRING_H 1 /* Define to 1 if you have the <sys/stat.h> header file. */ #define HAVE_SYS_STAT_H 1 /* Define to 1 if you have the <sys/types.h> header file. */ #define HAVE_SYS_TYPES_H 1 /* Define this if you have umem.h */ /* #undef HAVE_UMEM_H */ /* Define to 1 if you have the <unistd.h> header file. */ #define HAVE_UNISTD_H 1 /* Define to 1 if the system has the type `_Bool'. */ #define HAVE__BOOL 1 /* Machine need alignment */ /* #undef NEED_ALIGN */ /* Name of package */ #define PACKAGE "memcached" /* Define to the address where bug reports for this package should be sent. */ #define PACKAGE_BUGREPORT "[email protected]" /* Define to the full name of this package. */ #define PACKAGE_NAME "memcached" /* Define to the full name and version of this package. */ #define PACKAGE_STRING "memcached 1.5.4" /* Define to the one symbol short name of this package. */ #define PACKAGE_TARNAME "memcached" /* Define to the home page for this package. */ #define PACKAGE_URL "" /* Define to the version of this package. */ #define PACKAGE_VERSION "1.5.4" /* Set to nonzero if you want to enable pslab */ #define PSLAB 1 /* Define to 1 if you have the ANSI C header files. */ #define STDC_HEADERS 1 /* Version number of package */ #define VERSION "1.5.4" /* find sigignore on Linux */ #define _GNU_SOURCE 1 /* Define to empty if `const' does not conform to ANSI C. */ /* #undef const */ /* define to int if socklen_t not available */ /* #undef socklen_t */ #if HAVE_STDBOOL_H #include <stdbool.h> #else #define bool char #define false 0 #define true 1 #endif #ifdef HAVE_INTTYPES_H #include <inttypes.h> #endif
4,134
24.368098
78
h
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/sasl_defs.h
#ifndef SASL_DEFS_H #define SASL_DEFS_H 1 // Longest one I could find was ``9798-U-RSA-SHA1-ENC'' #define MAX_SASL_MECH_LEN 32 #if defined(HAVE_SASL_SASL_H) && defined(ENABLE_SASL) #include <sasl/sasl.h> void init_sasl(void); extern char my_sasl_hostname[1025]; #else /* End of SASL support */ typedef void* sasl_conn_t; #define init_sasl() {} #define sasl_dispose(x) {} #define sasl_server_new(a, b, c, d, e, f, g, h) 1 #define sasl_listmech(a, b, c, d, e, f, g, h) 1 #define sasl_server_start(a, b, c, d, e, f) 1 #define sasl_server_step(a, b, c, d, e) 1 #define sasl_getprop(a, b, c) {} #define SASL_OK 0 #define SASL_CONTINUE -1 #endif /* sasl compat */ #endif /* SASL_DEFS_H */
693
20.6875
55
h
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/logger.h
/* logging functions */ #ifndef LOGGER_H #define LOGGER_H #include "bipbuffer.h" /* TODO: starttime tunable */ #define LOGGER_BUF_SIZE 1024 * 64 #define LOGGER_WATCHER_BUF_SIZE 1024 * 256 #define LOGGER_ENTRY_MAX_SIZE 2048 #define GET_LOGGER() ((logger *) pthread_getspecific(logger_key)); /* Inlined from memcached.h - should go into sub header */ typedef unsigned int rel_time_t; enum log_entry_type { LOGGER_ASCII_CMD = 0, LOGGER_EVICTION, LOGGER_ITEM_GET, LOGGER_ITEM_STORE, LOGGER_CRAWLER_STATUS, LOGGER_SLAB_MOVE, #ifdef EXTSTORE LOGGER_EXTSTORE_WRITE, LOGGER_COMPACT_START, LOGGER_COMPACT_ABORT, LOGGER_COMPACT_READ_START, LOGGER_COMPACT_READ_END, LOGGER_COMPACT_END, LOGGER_COMPACT_FRAGINFO, #endif }; enum log_entry_subtype { LOGGER_TEXT_ENTRY = 0, LOGGER_EVICTION_ENTRY, LOGGER_ITEM_GET_ENTRY, LOGGER_ITEM_STORE_ENTRY, #ifdef EXTSTORE LOGGER_EXT_WRITE_ENTRY, #endif }; enum logger_ret_type { LOGGER_RET_OK = 0, LOGGER_RET_NOSPACE, LOGGER_RET_ERR }; enum logger_parse_entry_ret { LOGGER_PARSE_ENTRY_OK = 0, LOGGER_PARSE_ENTRY_FULLBUF, LOGGER_PARSE_ENTRY_FAILED }; typedef const struct { enum log_entry_subtype subtype; int reqlen; uint16_t eflags; char *format; } entry_details; /* log entry intermediary structures */ struct logentry_eviction { long long int exptime; uint32_t latime; uint16_t it_flags; uint8_t nkey; uint8_t clsid; char key[]; }; #ifdef EXTSTORE struct logentry_ext_write { long long int exptime; uint32_t latime; uint16_t it_flags; uint8_t nkey; uint8_t clsid; uint8_t bucket; char key[]; }; #endif struct logentry_item_get { uint8_t was_found; uint8_t nkey; uint8_t clsid; char key[]; }; struct logentry_item_store { int status; int cmd; rel_time_t ttl; uint8_t nkey; uint8_t clsid; char key[]; }; /* end intermediary structures */ typedef struct _logentry { enum log_entry_subtype event; uint16_t eflags; uint64_t gid; struct timeval tv; /* not monotonic! */ int size; union { void *entry; /* probably an item */ char end; } data[]; } logentry; #define LOG_SYSEVENTS (1<<1) /* threads start/stop/working */ #define LOG_FETCHERS (1<<2) /* get/gets/etc */ #define LOG_MUTATIONS (1<<3) /* set/append/incr/etc */ #define LOG_SYSERRORS (1<<4) /* malloc/etc errors */ #define LOG_CONNEVENTS (1<<5) /* new client, closed, etc */ #define LOG_EVICTIONS (1<<6) /* details of evicted items */ #define LOG_STRICT (1<<7) /* block worker instead of drop */ #define LOG_RAWCMDS (1<<9) /* raw ascii commands */ typedef struct _logger { struct _logger *prev; struct _logger *next; pthread_mutex_t mutex; /* guard for this + *buf */ uint64_t written; /* entries written to the buffer */ uint64_t dropped; /* entries dropped */ uint64_t blocked; /* times blocked instead of dropped */ uint16_t fetcher_ratio; /* log one out of every N fetches */ uint16_t mutation_ratio; /* log one out of every N mutations */ uint16_t eflags; /* flags this logger should log */ bipbuf_t *buf; const entry_details *entry_map; } logger; enum logger_watcher_type { LOGGER_WATCHER_STDERR = 0, LOGGER_WATCHER_CLIENT = 1 }; typedef struct { void *c; /* original connection structure. still with source thread attached */ int sfd; /* client fd */ int id; /* id number for watcher list */ uint64_t skipped; /* lines skipped since last successful print */ bool failed_flush; /* recently failed to write out (EAGAIN), wait before retry */ enum logger_watcher_type t; /* stderr, client, syslog, etc */ uint16_t eflags; /* flags we are interested in */ bipbuf_t *buf; /* per-watcher output buffer */ } logger_watcher; struct logger_stats { uint64_t worker_dropped; uint64_t worker_written; uint64_t watcher_skipped; uint64_t watcher_sent; }; extern pthread_key_t logger_key; /* public functions */ void logger_init(void); logger *logger_create(void); #define LOGGER_LOG(l, flag, type, ...) \ do { \ logger *myl = l; \ if (l == NULL) \ myl = GET_LOGGER(); \ if (myl->eflags & flag) \ logger_log(myl, type, __VA_ARGS__); \ } while (0) enum logger_ret_type logger_log(logger *l, const enum log_entry_type event, const void *entry, ...); enum logger_add_watcher_ret { LOGGER_ADD_WATCHER_TOO_MANY = 0, LOGGER_ADD_WATCHER_OK, LOGGER_ADD_WATCHER_FAILED }; enum logger_add_watcher_ret logger_add_watcher(void *c, const int sfd, uint16_t f); #endif
4,680
24.032086
100
h
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/getresult.sh
awk -F ' ' '$1 ~ /time/ {sum += $3} END {print sum}' out awk -F ' ' '$1 ~ /pagecnt/ {sum += $2} END {print sum}' out awk -F ' ' '$1 ~ /timecp/ {sum += $3} END {print sum}' out
176
43.25
59
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/extstore.h
#ifndef EXTSTORE_H #define EXTSTORE_H /* A safe-to-read dataset for determining compaction. * id is the array index. */ struct extstore_page_data { uint64_t version; uint64_t bytes_used; unsigned int bucket; }; /* Pages can have objects deleted from them at any time. This creates holes * that can't be reused until the page is either evicted or all objects are * deleted. * bytes_fragmented is the total bytes for all of these holes. * It is the size of all used pages minus each page's bytes_used value. */ struct extstore_stats { uint64_t page_allocs; uint64_t page_count; /* total page count */ uint64_t page_evictions; uint64_t page_reclaims; uint64_t page_size; /* size in bytes per page (supplied by caller) */ uint64_t pages_free; /* currently unallocated/unused pages */ uint64_t pages_used; uint64_t objects_evicted; uint64_t objects_read; uint64_t objects_written; uint64_t objects_used; /* total number of objects stored */ uint64_t bytes_evicted; uint64_t bytes_written; uint64_t bytes_read; /* wbuf - read -> bytes read from storage */ uint64_t bytes_used; /* total number of bytes stored */ uint64_t bytes_fragmented; /* see above comment */ struct extstore_page_data *page_data; }; // TODO: Temporary configuration structure. A "real" library should have an // extstore_set(enum, void *ptr) which hides the implementation. // this is plenty for quick development. struct extstore_conf { unsigned int page_size; // ideally 64-256M in size unsigned int page_count; unsigned int page_buckets; // number of different writeable pages unsigned int wbuf_size; // must divide cleanly into page_size unsigned int wbuf_count; // this might get locked to "2 per active page" unsigned int io_threadcount; unsigned int io_depth; // with normal I/O, hits locks less. req'd for AIO }; enum obj_io_mode { OBJ_IO_READ = 0, OBJ_IO_WRITE, }; typedef struct _obj_io obj_io; typedef void (*obj_io_cb)(void *e, obj_io *io, int ret); /* An object for both reads and writes to the storage engine. * Once an IO is submitted, ->next may be changed by the IO thread. It is not * safe to further modify the IO stack until the entire request is completed. */ struct _obj_io { void *data; /* user supplied data pointer */ struct _obj_io *next; char *buf; /* buffer of data to read or write to */ struct iovec *iov; /* alternatively, use this iovec */ unsigned int iovcnt; /* number of IOV's */ unsigned int page_version; /* page version for read mode */ unsigned int len; /* for both modes */ unsigned int offset; /* for read mode */ unsigned short page_id; /* for read mode */ enum obj_io_mode mode; /* callback pointers? */ obj_io_cb cb; }; enum extstore_res { EXTSTORE_INIT_BAD_WBUF_SIZE = 1, EXTSTORE_INIT_NEED_MORE_WBUF, EXTSTORE_INIT_NEED_MORE_BUCKETS, EXTSTORE_INIT_PAGE_WBUF_ALIGNMENT, EXTSTORE_INIT_OOM, EXTSTORE_INIT_OPEN_FAIL, EXTSTORE_INIT_THREAD_FAIL }; const char *extstore_err(enum extstore_res res); void *extstore_init(char *fn, struct extstore_conf *cf, enum extstore_res *res); int extstore_write_request(void *ptr, unsigned int bucket, obj_io *io); void extstore_write(void *ptr, obj_io *io); int extstore_submit(void *ptr, obj_io *io); /* count are the number of objects being removed, bytes are the original * length of those objects. Bytes is optional but you can't track * fragmentation without it. */ int extstore_check(void *ptr, unsigned int page_id, uint64_t page_version); int extstore_delete(void *ptr, unsigned int page_id, uint64_t page_version, unsigned int count, unsigned int bytes); void extstore_get_stats(void *ptr, struct extstore_stats *st); /* add page data array to a stats structure. * caller must allocate its stats.page_data memory first. */ void extstore_get_page_data(void *ptr, struct extstore_stats *st); void extstore_run_maint(void *ptr); void extstore_close_page(void *ptr, unsigned int page_id, uint64_t page_version); #endif
4,091
36.541284
116
h
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/assoc.h
/* associative array */ void assoc_init(const int hashpower_init); item *assoc_find(const char *key, const size_t nkey, const uint32_t hv); int assoc_insert(item *item, const uint32_t hv); void assoc_delete(const char *key, const size_t nkey, const uint32_t hv); void do_assoc_move_next_bucket(void); int start_assoc_maintenance_thread(void); void stop_assoc_maintenance_thread(void); extern unsigned int hashpower; extern unsigned int item_lock_hashpower;
457
40.636364
73
h
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/run.sh
#!/usr/bin/env bash sudo rm -rf /mnt/mem/* sed -i "s/Werror/Wno-error/g" Makefile make -j USE_PMDK=yes STD=-std=gnu99 sudo ./memcached -u root -m 0 -t 1 -o pslab_policy=pmem,pslab_file=/mnt/mem/pool,pslab_force > out grep "cp" out > time log=$(awk '{sum+= $2;} END{print sum;}' time) echo $1'cp' $log
301
32.555556
98
sh
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/slab_automove_extstore.h
#ifndef SLAB_AUTOMOVE_EXTSTORE_H #define SLAB_AUTOMOVE_EXTSTORE_H void *slab_automove_extstore_init(struct settings *settings); void slab_automove_extstore_free(void *arg); void slab_automove_extstore_run(void *arg, int *src, int *dst); #endif
246
26.444444
63
h
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/memcached.h
/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* * Copyright 2018 Lenovo * * Licensed under the BSD-3 license. see LICENSE.Lenovo.txt for full text */ /* * Note: * Codes enclosed in `#ifdef PSLAB' and `#endif' are added by Lenovo for * persistent memory support */ /** \file * The main memcached header holding commonly used data * structures and function prototypes. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <sys/types.h> #include <sys/socket.h> #include <sys/time.h> #include <netinet/in.h> #include <event.h> #include <netdb.h> #include <pthread.h> #include <unistd.h> #include <assert.h> #include "itoa_ljust.h" #include "protocol_binary.h" #include "cache.h" #include "logger.h" #ifdef EXTSTORE #include "extstore.h" #include "crc32c.h" #endif #ifdef PSLAB #include "pslab.h" #endif #include "sasl_defs.h" /** Maximum length of a key. */ #define KEY_MAX_LENGTH 250 /** Size of an incr buf. */ #define INCR_MAX_STORAGE_LEN 24 #define DATA_BUFFER_SIZE 2048 #define UDP_READ_BUFFER_SIZE 65536 #define UDP_MAX_PAYLOAD_SIZE 1400 #define UDP_HEADER_SIZE 8 #define MAX_SENDBUF_SIZE (256 * 1024 * 1024) /* Up to 3 numbers (2 32bit, 1 64bit), spaces, newlines, null 0 */ #define SUFFIX_SIZE 50 /** Initial size of list of items being returned by "get". */ #define ITEM_LIST_INITIAL 200 /** Initial size of list of CAS suffixes appended to "gets" lines. */ #define SUFFIX_LIST_INITIAL 100 /** Initial size of the sendmsg() scatter/gather array. */ #define IOV_LIST_INITIAL 400 /** Initial number of sendmsg() argument structures to allocate. */ #define MSG_LIST_INITIAL 10 /** High water marks for buffer shrinking */ #define READ_BUFFER_HIGHWAT 8192 #define ITEM_LIST_HIGHWAT 400 #define IOV_LIST_HIGHWAT 600 #define MSG_LIST_HIGHWAT 100 /* Binary protocol stuff */ #define MIN_BIN_PKT_LENGTH 16 #define BIN_PKT_HDR_WORDS (MIN_BIN_PKT_LENGTH/sizeof(uint32_t)) /* Initial power multiplier for the hash table */ #define HASHPOWER_DEFAULT 16 #define HASHPOWER_MAX 32 /* * We only reposition items in the LRU queue if they haven't been repositioned * in this many seconds. That saves us from churning on frequently-accessed * items. */ #define ITEM_UPDATE_INTERVAL 60 /* unistd.h is here */ #if HAVE_UNISTD_H # include <unistd.h> #endif /* Slab sizing definitions. */ #ifdef PSLAB #define POWER_SMALLEST 2 #else #define POWER_SMALLEST 1 #endif #define POWER_LARGEST 256 /* actual cap is 255 */ #define SLAB_GLOBAL_PAGE_POOL 0 /* magic slab class for storing pages for reassignment */ #ifdef PSLAB #define SLAB_GLOBAL_PAGE_POOL_PMEM 1 /* magic slab class for storing pmem pages for reassignment */ #endif #define CHUNK_ALIGN_BYTES 8 /* slab class max is a 6-bit number, -1. */ #define MAX_NUMBER_OF_SLAB_CLASSES (63 + 1) /** How long an object can reasonably be assumed to be locked before harvesting it on a low memory condition. Default: disabled. */ #define TAIL_REPAIR_TIME_DEFAULT 0 /* warning: don't use these macros with a function, as it evals its arg twice */ #define ITEM_get_cas(i) (((i)->it_flags & ITEM_CAS) ? \ (i)->data->cas : (uint64_t)0) #define ITEM_set_cas(i,v) { \ if ((i)->it_flags & ITEM_CAS) { \ (i)->data->cas = v; \ } \ } #define ITEM_key(item) (((char*)&((item)->data)) \ + (((item)->it_flags & ITEM_CAS) ? sizeof(uint64_t) : 0)) #define ITEM_suffix(item) ((char*) &((item)->data) + (item)->nkey + 1 \ + (((item)->it_flags & ITEM_CAS) ? sizeof(uint64_t) : 0)) #define ITEM_data(item) ((char*) &((item)->data) + (item)->nkey + 1 \ + (item)->nsuffix \ + (((item)->it_flags & ITEM_CAS) ? sizeof(uint64_t) : 0)) #define ITEM_ntotal(item) (sizeof(struct _stritem) + (item)->nkey + 1 \ + (item)->nsuffix + (item)->nbytes \ + (((item)->it_flags & ITEM_CAS) ? sizeof(uint64_t) : 0)) #ifdef PSLAB #define ITEM_dtotal(item) (item)->nkey + 1 + (item)->nsuffix + (item)->nbytes \ + (((item)->it_flags & ITEM_CAS) ? sizeof(uint64_t) : 0) #endif #define ITEM_clsid(item) ((item)->slabs_clsid & ~(3<<6)) #define ITEM_lruid(item) ((item)->slabs_clsid & (3<<6)) #define STAT_KEY_LEN 128 #define STAT_VAL_LEN 128 /** Append a simple stat with a stat name, value format and value */ #define APPEND_STAT(name, fmt, val) \ append_stat(name, add_stats, c, fmt, val); /** Append an indexed stat with a stat name (with format), value format and value */ #define APPEND_NUM_FMT_STAT(name_fmt, num, name, fmt, val) \ klen = snprintf(key_str, STAT_KEY_LEN, name_fmt, num, name); \ vlen = snprintf(val_str, STAT_VAL_LEN, fmt, val); \ add_stats(key_str, klen, val_str, vlen, c); /** Common APPEND_NUM_FMT_STAT format. */ #define APPEND_NUM_STAT(num, name, fmt, val) \ APPEND_NUM_FMT_STAT("%d:%s", num, name, fmt, val) /** * Callback for any function producing stats. * * @param key the stat's key * @param klen length of the key * @param val the stat's value in an ascii form (e.g. text form of a number) * @param vlen length of the value * @parm cookie magic callback cookie */ typedef void (*ADD_STAT)(const char *key, const uint16_t klen, const char *val, const uint32_t vlen, const void *cookie); /* * NOTE: If you modify this table you _MUST_ update the function state_text */ /** * Possible states of a connection. */ enum conn_states { conn_listening, /**< the socket which listens for connections */ conn_new_cmd, /**< Prepare connection for next command */ conn_waiting, /**< waiting for a readable socket */ conn_read, /**< reading in a command line */ conn_parse_cmd, /**< try to parse a command from the input buffer */ conn_write, /**< writing out a simple response */ conn_nread, /**< reading in a fixed number of bytes */ conn_swallow, /**< swallowing unnecessary bytes w/o storing */ conn_closing, /**< closing this connection */ conn_mwrite, /**< writing out many items sequentially */ conn_closed, /**< connection is closed */ conn_watch, /**< held by the logger thread as a watcher */ conn_max_state /**< Max state value (used for assertion) */ }; enum bin_substates { bin_no_state, bin_reading_set_header, bin_reading_cas_header, bin_read_set_value, bin_reading_get_key, bin_reading_stat, bin_reading_del_header, bin_reading_incr_header, bin_read_flush_exptime, bin_reading_sasl_auth, bin_reading_sasl_auth_data, bin_reading_touch_key, }; enum protocol { ascii_prot = 3, /* arbitrary value. */ binary_prot, negotiating_prot /* Discovering the protocol */ }; enum network_transport { local_transport, /* Unix sockets*/ tcp_transport, udp_transport }; enum pause_thread_types { PAUSE_WORKER_THREADS = 0, PAUSE_ALL_THREADS, RESUME_ALL_THREADS, RESUME_WORKER_THREADS }; #define IS_TCP(x) (x == tcp_transport) #define IS_UDP(x) (x == udp_transport) #define NREAD_ADD 1 #define NREAD_SET 2 #define NREAD_REPLACE 3 #define NREAD_APPEND 4 #define NREAD_PREPEND 5 #define NREAD_CAS 6 enum store_item_type { NOT_STORED=0, STORED, EXISTS, NOT_FOUND, TOO_LARGE, NO_MEMORY }; enum delta_result_type { OK, NON_NUMERIC, EOM, DELTA_ITEM_NOT_FOUND, DELTA_ITEM_CAS_MISMATCH }; /** Time relative to server start. Smaller than time_t on 64-bit systems. */ // TODO: Move to sub-header. needed in logger.h //typedef unsigned int rel_time_t; /** Use X macros to avoid iterating over the stats fields during reset and * aggregation. No longer have to add new stats in 3+ places. */ #define SLAB_STATS_FIELDS \ X(set_cmds) \ X(get_hits) \ X(touch_hits) \ X(delete_hits) \ X(cas_hits) \ X(cas_badval) \ X(incr_hits) \ X(decr_hits) /** Stats stored per slab (and per thread). */ struct slab_stats { #define X(name) uint64_t name; SLAB_STATS_FIELDS #undef X }; #define THREAD_STATS_FIELDS \ X(get_cmds) \ X(get_misses) \ X(get_expired) \ X(get_flushed) \ X(touch_cmds) \ X(touch_misses) \ X(delete_misses) \ X(incr_misses) \ X(decr_misses) \ X(cas_misses) \ X(bytes_read) \ X(bytes_written) \ X(flush_cmds) \ X(conn_yields) /* # of yields for connections (-R option)*/ \ X(auth_cmds) \ X(auth_errors) \ X(idle_kicks) /* idle connections killed */ #ifdef EXTSTORE #define EXTSTORE_THREAD_STATS_FIELDS \ X(get_extstore) \ X(recache_from_extstore) \ X(miss_from_extstore) \ X(badcrc_from_extstore) #endif /** * Stats stored per-thread. */ struct thread_stats { pthread_mutex_t mutex; #define X(name) uint64_t name; THREAD_STATS_FIELDS #ifdef EXTSTORE EXTSTORE_THREAD_STATS_FIELDS #endif #undef X struct slab_stats slab_stats[MAX_NUMBER_OF_SLAB_CLASSES]; uint64_t lru_hits[POWER_LARGEST]; }; /** * Global stats. Only resettable stats should go into this structure. */ struct stats { uint64_t total_items; uint64_t total_conns; uint64_t rejected_conns; uint64_t malloc_fails; uint64_t listen_disabled_num; uint64_t slabs_moved; /* times slabs were moved around */ uint64_t slab_reassign_rescues; /* items rescued during slab move */ uint64_t slab_reassign_evictions_nomem; /* valid items lost during slab move */ uint64_t slab_reassign_inline_reclaim; /* valid items lost during slab move */ uint64_t slab_reassign_chunk_rescues; /* chunked-item chunks recovered */ uint64_t slab_reassign_busy_items; /* valid temporarily unmovable */ uint64_t slab_reassign_busy_deletes; /* refcounted items killed */ uint64_t lru_crawler_starts; /* Number of item crawlers kicked off */ uint64_t lru_maintainer_juggles; /* number of LRU bg pokes */ uint64_t time_in_listen_disabled_us; /* elapsed time in microseconds while server unable to process new connections */ uint64_t log_worker_dropped; /* logs dropped by worker threads */ uint64_t log_worker_written; /* logs written by worker threads */ uint64_t log_watcher_skipped; /* logs watchers missed */ uint64_t log_watcher_sent; /* logs sent to watcher buffers */ #ifdef EXTSTORE uint64_t extstore_compact_lost; /* items lost because they were locked */ uint64_t extstore_compact_rescues; /* items re-written during compaction */ uint64_t extstore_compact_skipped; /* unhit items skipped during compaction */ #endif struct timeval maxconns_entered; /* last time maxconns entered */ }; /** * Global "state" stats. Reflects state that shouldn't be wiped ever. * Ordered for some cache line locality for commonly updated counters. */ struct stats_state { uint64_t curr_items; uint64_t curr_bytes; uint64_t curr_conns; uint64_t hash_bytes; /* size used for hash tables */ unsigned int conn_structs; unsigned int reserved_fds; unsigned int hash_power_level; /* Better hope it's not over 9000 */ bool hash_is_expanding; /* If the hash table is being expanded */ bool accepting_conns; /* whether we are currently accepting */ bool slab_reassign_running; /* slab reassign in progress */ bool lru_crawler_running; /* crawl in progress */ }; #define MAX_VERBOSITY_LEVEL 2 /* When adding a setting, be sure to update process_stat_settings */ /** * Globally accessible settings as derived from the commandline. */ struct settings { size_t maxbytes; int maxconns; int port; int udpport; char *inter; int verbose; rel_time_t oldest_live; /* ignore existing items older than this */ uint64_t oldest_cas; /* ignore existing items with CAS values lower than this */ int evict_to_free; char *socketpath; /* path to unix socket if using local socket */ int access; /* access mask (a la chmod) for unix domain socket */ double factor; /* chunk size growth factor */ int chunk_size; int num_threads; /* number of worker (without dispatcher) libevent threads to run */ int num_threads_per_udp; /* number of worker threads serving each udp socket */ char prefix_delimiter; /* character that marks a key prefix (for stats) */ int detail_enabled; /* nonzero if we're collecting detailed stats */ int reqs_per_event; /* Maximum number of io to process on each io-event. */ bool use_cas; enum protocol binding_protocol; int backlog; int item_size_max; /* Maximum item size */ int slab_chunk_size_max; /* Upper end for chunks within slab pages. */ int slab_page_size; /* Slab's page units. */ bool sasl; /* SASL on/off */ bool maxconns_fast; /* Whether or not to early close connections */ bool lru_crawler; /* Whether or not to enable the autocrawler thread */ bool lru_maintainer_thread; /* LRU maintainer background thread */ bool lru_segmented; /* Use split or flat LRU's */ bool slab_reassign; /* Whether or not slab reassignment is allowed */ int slab_automove; /* Whether or not to automatically move slabs */ double slab_automove_ratio; /* youngest must be within pct of oldest */ unsigned int slab_automove_window; /* window mover for algorithm */ int hashpower_init; /* Starting hash power level */ bool shutdown_command; /* allow shutdown command */ int tail_repair_time; /* LRU tail refcount leak repair time */ bool flush_enabled; /* flush_all enabled */ bool dump_enabled; /* whether cachedump/metadump commands work */ char *hash_algorithm; /* Hash algorithm in use */ int lru_crawler_sleep; /* Microsecond sleep between items */ uint32_t lru_crawler_tocrawl; /* Number of items to crawl per run */ int hot_lru_pct; /* percentage of slab space for HOT_LRU */ int warm_lru_pct; /* percentage of slab space for WARM_LRU */ double hot_max_factor; /* HOT tail age relative to COLD tail */ double warm_max_factor; /* WARM tail age relative to COLD tail */ int crawls_persleep; /* Number of LRU crawls to run before sleeping */ bool inline_ascii_response; /* pre-format the VALUE line for ASCII responses */ bool temp_lru; /* TTL < temporary_ttl uses TEMP_LRU */ uint32_t temporary_ttl; /* temporary LRU threshold */ int idle_timeout; /* Number of seconds to let connections idle */ unsigned int logger_watcher_buf_size; /* size of logger's per-watcher buffer */ unsigned int logger_buf_size; /* size of per-thread logger buffer */ bool drop_privileges; /* Whether or not to drop unnecessary process privileges */ bool relaxed_privileges; /* Relax process restrictions when running testapp */ #ifdef EXTSTORE unsigned int ext_item_size; /* minimum size of items to store externally */ unsigned int ext_item_age; /* max age of tail item before storing ext. */ unsigned int ext_low_ttl; /* remaining TTL below this uses own pages */ unsigned int ext_recache_rate; /* counter++ % recache_rate == 0 > recache */ unsigned int ext_wbuf_size; /* read only note for the engine */ unsigned int ext_compact_under; /* when fewer than this many pages, compact */ unsigned int ext_drop_under; /* when fewer than this many pages, drop COLD items */ double ext_max_frag; /* ideal maximum page fragmentation */ double slab_automove_freeratio; /* % of memory to hold free as buffer */ bool ext_drop_unread; /* skip unread items during compaction */ /* per-slab-class free chunk limit */ unsigned int ext_free_memchunks[MAX_NUMBER_OF_SLAB_CLASSES]; #endif #ifdef PSLAB size_t pslab_size; /* pmem slab pool size */ unsigned int pslab_policy; /* pmem slab allocation policy */ bool pslab_recover; /* do recovery from pmem slab pool */ #endif }; extern struct stats stats; extern struct stats_state stats_state; extern time_t process_started; extern struct settings settings; #define ITEM_LINKED 1 #define ITEM_CAS 2 /* temp */ #define ITEM_SLABBED 4 /* Item was fetched at least once in its lifetime */ #define ITEM_FETCHED 8 /* Appended on fetch, removed on LRU shuffling */ #define ITEM_ACTIVE 16 /* If an item's storage are chained chunks. */ #define ITEM_CHUNKED 32 #define ITEM_CHUNK 64 #ifdef PSLAB /* If an item is stored in pmem */ #define ITEM_PSLAB 64 #endif #ifdef EXTSTORE /* ITEM_data bulk is external to item */ #define ITEM_HDR 128 #endif /** * Structure for storing items within memcached. */ typedef struct _stritem { /* Protected by LRU locks */ struct _stritem *next; struct _stritem *prev; /* Rest are protected by an item lock */ struct _stritem *h_next; /* hash chain next */ rel_time_t time; /* least recent access */ rel_time_t exptime; /* expire time */ int nbytes; /* size of data */ unsigned short refcount; uint8_t nsuffix; /* length of flags-and-length string */ uint8_t it_flags; /* ITEM_* above */ uint8_t slabs_clsid;/* which slab class we're in */ uint8_t nkey; /* key length, w/terminating null and padding */ /* this odd type prevents type-punning issues when we do * the little shuffle to save space when not using CAS. */ union { uint64_t cas; char end; } data[]; /* if it_flags & ITEM_CAS we have 8 bytes CAS */ /* then null-terminated key */ /* then " flags length\r\n" (no terminating null) */ /* then data with terminating \r\n (no terminating null; it's binary!) */ } item; // TODO: If we eventually want user loaded modules, we can't use an enum :( enum crawler_run_type { CRAWLER_AUTOEXPIRE=0, CRAWLER_EXPIRED, CRAWLER_METADUMP }; typedef struct { struct _stritem *next; struct _stritem *prev; struct _stritem *h_next; /* hash chain next */ rel_time_t time; /* least recent access */ rel_time_t exptime; /* expire time */ int nbytes; /* size of data */ unsigned short refcount; uint8_t nsuffix; /* length of flags-and-length string */ uint8_t it_flags; /* ITEM_* above */ uint8_t slabs_clsid;/* which slab class we're in */ uint8_t nkey; /* key length, w/terminating null and padding */ uint32_t remaining; /* Max keys to crawl per slab per invocation */ uint64_t reclaimed; /* items reclaimed during this crawl. */ uint64_t unfetched; /* items reclaimed unfetched during this crawl. */ uint64_t checked; /* items examined during this crawl. */ } crawler; /* Header when an item is actually a chunk of another item. */ typedef struct _strchunk { struct _strchunk *next; /* points within its own chain. */ struct _strchunk *prev; /* can potentially point to the head. */ struct _stritem *head; /* always points to the owner chunk */ int size; /* available chunk space in bytes */ int used; /* chunk space used */ int nbytes; /* used. */ unsigned short refcount; /* used? */ uint8_t orig_clsid; /* For obj hdr chunks slabs_clsid is fake. */ uint8_t it_flags; /* ITEM_* above. */ uint8_t slabs_clsid; /* Same as above. */ #ifdef PSLAB _Bool pslab : 1; uint64_t next_poff; /* offset of next chunk in pmem */ #endif char data[]; } item_chunk; #ifdef EXTSTORE typedef struct { unsigned int page_version; /* from IO header */ unsigned int offset; /* from IO header */ unsigned short page_id; /* from IO header */ } item_hdr; #endif typedef struct { pthread_t thread_id; /* unique ID of this thread */ struct event_base *base; /* libevent handle this thread uses */ struct event notify_event; /* listen event for notify pipe */ int notify_receive_fd; /* receiving end of notify pipe */ int notify_send_fd; /* sending end of notify pipe */ struct thread_stats stats; /* Stats generated by this thread */ struct conn_queue *new_conn_queue; /* queue of new connections to handle */ cache_t *suffix_cache; /* suffix cache */ #ifdef EXTSTORE cache_t *io_cache; /* IO objects */ void *storage; /* data object for storage system */ #endif logger *l; /* logger buffer */ void *lru_bump_buf; /* async LRU bump buffer */ } LIBEVENT_THREAD; typedef struct conn conn; #ifdef EXTSTORE typedef struct _io_wrap { obj_io io; struct _io_wrap *next; conn *c; item *hdr_it; /* original header item. */ unsigned int iovec_start; /* start of the iovecs for this IO */ unsigned int iovec_count; /* total number of iovecs */ unsigned int iovec_data; /* specific index of data iovec */ bool miss; /* signal a miss to unlink hdr_it */ bool badcrc; /* signal a crc failure */ bool active; // FIXME: canary for test. remove } io_wrap; #endif /** * The structure representing a connection into memcached. */ struct conn { int sfd; sasl_conn_t *sasl_conn; bool authenticated; enum conn_states state; enum bin_substates substate; rel_time_t last_cmd_time; struct event event; short ev_flags; short which; /** which events were just triggered */ char *rbuf; /** buffer to read commands into */ char *rcurr; /** but if we parsed some already, this is where we stopped */ int rsize; /** total allocated size of rbuf */ int rbytes; /** how much data, starting from rcur, do we have unparsed */ char *wbuf; char *wcurr; int wsize; int wbytes; /** which state to go into after finishing current write */ enum conn_states write_and_go; void *write_and_free; /** free this memory after finishing writing */ char *ritem; /** when we read in an item's value, it goes here */ int rlbytes; /* data for the nread state */ /** * item is used to hold an item structure created after reading the command * line of set/add/replace commands, but before we finished reading the actual * data. The data is read into ITEM_data(item) to avoid extra copying. */ void *item; /* for commands set/add/replace */ /* data for the swallow state */ int sbytes; /* how many bytes to swallow */ /* data for the mwrite state */ struct iovec *iov; int iovsize; /* number of elements allocated in iov[] */ int iovused; /* number of elements used in iov[] */ struct msghdr *msglist; int msgsize; /* number of elements allocated in msglist[] */ int msgused; /* number of elements used in msglist[] */ int msgcurr; /* element in msglist[] being transmitted now */ int msgbytes; /* number of bytes in current msg */ item **ilist; /* list of items to write out */ int isize; item **icurr; int ileft; char **suffixlist; int suffixsize; char **suffixcurr; int suffixleft; #ifdef EXTSTORE int io_wrapleft; unsigned int recache_counter; io_wrap *io_wraplist; /* linked list of io_wraps */ bool io_queued; /* FIXME: debugging flag */ #endif enum protocol protocol; /* which protocol this connection speaks */ enum network_transport transport; /* what transport is used by this connection */ /* data for UDP clients */ int request_id; /* Incoming UDP request ID, if this is a UDP "connection" */ struct sockaddr_in6 request_addr; /* udp: Who sent the most recent request */ socklen_t request_addr_size; unsigned char *hdrbuf; /* udp packet headers */ int hdrsize; /* number of headers' worth of space is allocated */ bool noreply; /* True if the reply should not be sent. */ /* current stats command */ struct { char *buffer; size_t size; size_t offset; } stats; /* Binary protocol stuff */ /* This is where the binary header goes */ protocol_binary_request_header binary_header; uint64_t cas; /* the cas to return */ short cmd; /* current command being processed */ int opaque; int keylen; conn *next; /* Used for generating a list of conn structures */ LIBEVENT_THREAD *thread; /* Pointer to the thread object serving this connection */ uint32_t objid; }; /* array of conn structures, indexed by file descriptor */ extern conn **conns; /* current time of day (updated periodically) */ extern volatile rel_time_t current_time; /* TODO: Move to slabs.h? */ extern volatile int slab_rebalance_signal; struct slab_rebalance { void *slab_start; void *slab_end; void *slab_pos; int s_clsid; int d_clsid; uint32_t busy_items; uint32_t rescues; uint32_t evictions_nomem; uint32_t inline_reclaim; uint32_t chunk_rescues; uint32_t busy_deletes; uint32_t busy_loops; uint8_t done; }; extern struct slab_rebalance slab_rebal; #ifdef EXTSTORE extern void *ext_storage; #endif /* * Functions */ void do_accept_new_conns(const bool do_accept); enum delta_result_type do_add_delta(conn *c, const char *key, const size_t nkey, const bool incr, const int64_t delta, char *buf, uint64_t *cas, const uint32_t hv); enum store_item_type do_store_item(item *item, int comm, conn* c, const uint32_t hv); conn *conn_new(const int sfd, const enum conn_states init_state, const int event_flags, const int read_buffer_size, enum network_transport transport, struct event_base *base); void conn_worker_readd(conn *c); extern int daemonize(int nochdir, int noclose); #define mutex_lock(x) pthread_mutex_lock(x) #define mutex_unlock(x) pthread_mutex_unlock(x) #include "stats.h" #include "slabs.h" #include "assoc.h" #include "items.h" #include "crawler.h" #include "trace.h" #include "hash.h" #include "util.h" /* * Functions such as the libevent-related calls that need to do cross-thread * communication in multithreaded mode (rather than actually doing the work * in the current thread) are called via "dispatch_" frontends, which are * also #define-d to directly call the underlying code in singlethreaded mode. */ void memcached_thread_init(int nthreads, void *arg); void redispatch_conn(conn *c); void dispatch_conn_new(int sfd, enum conn_states init_state, int event_flags, int read_buffer_size, enum network_transport transport); void sidethread_conn_close(conn *c); /* Lock wrappers for cache functions that are called from main loop. */ enum delta_result_type add_delta(conn *c, const char *key, const size_t nkey, const int incr, const int64_t delta, char *buf, uint64_t *cas); void accept_new_conns(const bool do_accept); conn *conn_from_freelist(void); bool conn_add_to_freelist(conn *c); void conn_close_idle(conn *c); item *item_alloc(char *key, size_t nkey, int flags, rel_time_t exptime, int nbytes); #define DO_UPDATE true #define DONT_UPDATE false item *item_get(const char *key, const size_t nkey, conn *c, const bool do_update); item *item_touch(const char *key, const size_t nkey, uint32_t exptime, conn *c); int item_link(item *it); void item_remove(item *it); int item_replace(item *it, item *new_it, const uint32_t hv); void item_unlink(item *it); void item_lock(uint32_t hv); void *item_trylock(uint32_t hv); void item_trylock_unlock(void *arg); void item_unlock(uint32_t hv); void pause_threads(enum pause_thread_types type); #define refcount_incr(it) ++(it->refcount) #define refcount_decr(it) --(it->refcount) void STATS_LOCK(void); void STATS_UNLOCK(void); void threadlocal_stats_reset(void); void threadlocal_stats_aggregate(struct thread_stats *stats); void slab_stats_aggregate(struct thread_stats *stats, struct slab_stats *out); /* Stat processing functions */ void append_stat(const char *name, ADD_STAT add_stats, conn *c, const char *fmt, ...); enum store_item_type store_item(item *item, int comm, conn *c); #if HAVE_DROP_PRIVILEGES extern void drop_privileges(void); #else #define drop_privileges() #endif #if HAVE_DROP_WORKER_PRIVILEGES extern void drop_worker_privileges(void); #else #define drop_worker_privileges() #endif /* If supported, give compiler hints for branch prediction. */ #if !defined(__GNUC__) || (__GNUC__ == 2 && __GNUC_MINOR__ < 96) #define __builtin_expect(x, expected_value) (x) #endif #define likely(x) __builtin_expect((x),1) #define unlikely(x) __builtin_expect((x),0)
28,992
34.749692
175
h
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/crawler.h
#ifndef CRAWLER_H #define CRAWLER_H typedef struct { uint64_t histo[61]; uint64_t ttl_hourplus; uint64_t noexp; uint64_t reclaimed; uint64_t seen; rel_time_t start_time; rel_time_t end_time; bool run_complete; } crawlerstats_t; struct crawler_expired_data { pthread_mutex_t lock; crawlerstats_t crawlerstats[POWER_LARGEST]; /* redundant with crawlerstats_t so we can get overall start/stop/done */ rel_time_t start_time; rel_time_t end_time; bool crawl_complete; bool is_external; /* whether this was an alloc local or remote to the module. */ }; enum crawler_result_type { CRAWLER_OK=0, CRAWLER_RUNNING, CRAWLER_BADCLASS, CRAWLER_NOTSTARTED, CRAWLER_ERROR }; int start_item_crawler_thread(void); int stop_item_crawler_thread(void); int init_lru_crawler(void *arg); enum crawler_result_type lru_crawler_crawl(char *slabs, enum crawler_run_type, void *c, const int sfd); int lru_crawler_start(uint8_t *ids, uint32_t remaining, const enum crawler_run_type type, void *data, void *c, const int sfd); void lru_crawler_pause(void); void lru_crawler_resume(void); #endif
1,191
29.564103
103
h
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/slab_automove.h
#ifndef SLAB_AUTOMOVE_H #define SLAB_AUTOMOVE_H /* default automove functions */ void *slab_automove_init(struct settings *settings); void slab_automove_free(void *arg); void slab_automove_run(void *arg, int *src, int *dst); typedef void *(*slab_automove_init_func)(struct settings *settings); typedef void (*slab_automove_free_func)(void *arg); typedef void (*slab_automove_run_func)(void *arg, int *src, int *dst); typedef struct { slab_automove_init_func init; slab_automove_free_func free; slab_automove_run_func run; } slab_automove_reg_t; #endif
568
27.45
70
h
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/storage.h
#ifndef STORAGE_H #define STORAGE_H int lru_maintainer_store(void *storage, const int clsid); int start_storage_compact_thread(void *arg); void storage_compact_pause(void); void storage_compact_resume(void); #endif
217
20.8
57
h
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/stats.h
/* stats */ void stats_prefix_init(void); void stats_prefix_clear(void); void stats_prefix_record_get(const char *key, const size_t nkey, const bool is_hit); void stats_prefix_record_delete(const char *key, const size_t nkey); void stats_prefix_record_set(const char *key, const size_t nkey); /*@null@*/ char *stats_prefix_dump(int *length);
342
37.111111
84
h
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/itoa_ljust.h
#ifndef ITOA_LJUST_H #define ITOA_LJUST_H //=== itoa_ljust.h - Fast integer to ascii conversion // // Fast and simple integer to ASCII conversion: // // - 32 and 64-bit integers // - signed and unsigned // - user supplied buffer must be large enough for all decimal digits // in value plus minus sign if negative // - left-justified // - NUL terminated // - return value is pointer to NUL terminator // // Copyright (c) 2016 Arturo Martin-de-Nicolas // [email protected] // https://github.com/amdn/itoa_ljust/ //===----------------------------------------------------------------------===// #include <stdint.h> char* itoa_u32(uint32_t u, char* buffer); char* itoa_32( int32_t i, char* buffer); char* itoa_u64(uint64_t u, char* buffer); char* itoa_64( int64_t i, char* buffer); #endif // ITOA_LJUST_H
822
27.37931
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/murmur3_hash.h
//----------------------------------------------------------------------------- // MurmurHash3 was written by Austin Appleby, and is placed in the public // domain. The author hereby disclaims copyright to this source code. #ifndef MURMURHASH3_H #define MURMURHASH3_H //----------------------------------------------------------------------------- // Platform-specific functions and macros #include <stdint.h> #include <stddef.h> //----------------------------------------------------------------------------- uint32_t MurmurHash3_x86_32(const void *key, size_t length); //----------------------------------------------------------------------------- #endif // MURMURHASH3_H
681
33.1
79
h
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/util.h
/* fast-enough functions for uriencoding strings. */ void uriencode_init(void); bool uriencode(const char *src, char *dst, const size_t srclen, const size_t dstlen); /* * Wrappers around strtoull/strtoll that are safer and easier to * use. For tests and assumptions, see internal_tests.c. * * str a NULL-terminated base decimal 10 unsigned integer * out out parameter, if conversion succeeded * * returns true if conversion succeeded. */ bool safe_strtoull(const char *str, uint64_t *out); bool safe_strtoll(const char *str, int64_t *out); bool safe_strtoul(const char *str, uint32_t *out); bool safe_strtol(const char *str, int32_t *out); bool safe_strtod(const char *str, double *out); #ifndef HAVE_HTONLL extern uint64_t htonll(uint64_t); extern uint64_t ntohll(uint64_t); #endif #ifdef __GCC # define __gcc_attribute__ __attribute__ #else # define __gcc_attribute__(x) #endif /** * Vararg variant of perror that makes for more useful error messages * when reporting with parameters. * * @param fmt a printf format */ void vperror(const char *fmt, ...) __gcc_attribute__ ((format (printf, 1, 2)));
1,127
27.923077
85
h
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/protocol_binary.h
/* * Copyright (c) <2008>, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY SUN MICROSYSTEMS, INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL SUN MICROSYSTEMS, INC. BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Summary: Constants used by to implement the binary protocol. * * Copy: See Copyright for the status of this software. * * Author: Trond Norbye <[email protected]> */ #ifndef PROTOCOL_BINARY_H #define PROTOCOL_BINARY_H /** * This file contains definitions of the constants and packet formats * defined in the binary specification. Please note that you _MUST_ remember * to convert each multibyte field to / from network byte order to / from * host order. */ #ifdef __cplusplus extern "C" { #endif /** * Definition of the legal "magic" values used in a packet. * See section 3.1 Magic byte */ typedef enum { PROTOCOL_BINARY_REQ = 0x80, PROTOCOL_BINARY_RES = 0x81 } protocol_binary_magic; /** * Definition of the valid response status numbers. * See section 3.2 Response Status */ typedef enum { PROTOCOL_BINARY_RESPONSE_SUCCESS = 0x00, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT = 0x01, PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS = 0x02, PROTOCOL_BINARY_RESPONSE_E2BIG = 0x03, PROTOCOL_BINARY_RESPONSE_EINVAL = 0x04, PROTOCOL_BINARY_RESPONSE_NOT_STORED = 0x05, PROTOCOL_BINARY_RESPONSE_DELTA_BADVAL = 0x06, PROTOCOL_BINARY_RESPONSE_AUTH_ERROR = 0x20, PROTOCOL_BINARY_RESPONSE_AUTH_CONTINUE = 0x21, PROTOCOL_BINARY_RESPONSE_UNKNOWN_COMMAND = 0x81, PROTOCOL_BINARY_RESPONSE_ENOMEM = 0x82 } protocol_binary_response_status; /** * Definition of the different command opcodes. * See section 3.3 Command Opcodes */ typedef enum { PROTOCOL_BINARY_CMD_GET = 0x00, PROTOCOL_BINARY_CMD_SET = 0x01, PROTOCOL_BINARY_CMD_ADD = 0x02, PROTOCOL_BINARY_CMD_REPLACE = 0x03, PROTOCOL_BINARY_CMD_DELETE = 0x04, PROTOCOL_BINARY_CMD_INCREMENT = 0x05, PROTOCOL_BINARY_CMD_DECREMENT = 0x06, PROTOCOL_BINARY_CMD_QUIT = 0x07, PROTOCOL_BINARY_CMD_FLUSH = 0x08, PROTOCOL_BINARY_CMD_GETQ = 0x09, PROTOCOL_BINARY_CMD_NOOP = 0x0a, PROTOCOL_BINARY_CMD_VERSION = 0x0b, PROTOCOL_BINARY_CMD_GETK = 0x0c, PROTOCOL_BINARY_CMD_GETKQ = 0x0d, PROTOCOL_BINARY_CMD_APPEND = 0x0e, PROTOCOL_BINARY_CMD_PREPEND = 0x0f, PROTOCOL_BINARY_CMD_STAT = 0x10, PROTOCOL_BINARY_CMD_SETQ = 0x11, PROTOCOL_BINARY_CMD_ADDQ = 0x12, PROTOCOL_BINARY_CMD_REPLACEQ = 0x13, PROTOCOL_BINARY_CMD_DELETEQ = 0x14, PROTOCOL_BINARY_CMD_INCREMENTQ = 0x15, PROTOCOL_BINARY_CMD_DECREMENTQ = 0x16, PROTOCOL_BINARY_CMD_QUITQ = 0x17, PROTOCOL_BINARY_CMD_FLUSHQ = 0x18, PROTOCOL_BINARY_CMD_APPENDQ = 0x19, PROTOCOL_BINARY_CMD_PREPENDQ = 0x1a, PROTOCOL_BINARY_CMD_TOUCH = 0x1c, PROTOCOL_BINARY_CMD_GAT = 0x1d, PROTOCOL_BINARY_CMD_GATQ = 0x1e, PROTOCOL_BINARY_CMD_GATK = 0x23, PROTOCOL_BINARY_CMD_GATKQ = 0x24, PROTOCOL_BINARY_CMD_SASL_LIST_MECHS = 0x20, PROTOCOL_BINARY_CMD_SASL_AUTH = 0x21, PROTOCOL_BINARY_CMD_SASL_STEP = 0x22, /* These commands are used for range operations and exist within * this header for use in other projects. Range operations are * not expected to be implemented in the memcached server itself. */ PROTOCOL_BINARY_CMD_RGET = 0x30, PROTOCOL_BINARY_CMD_RSET = 0x31, PROTOCOL_BINARY_CMD_RSETQ = 0x32, PROTOCOL_BINARY_CMD_RAPPEND = 0x33, PROTOCOL_BINARY_CMD_RAPPENDQ = 0x34, PROTOCOL_BINARY_CMD_RPREPEND = 0x35, PROTOCOL_BINARY_CMD_RPREPENDQ = 0x36, PROTOCOL_BINARY_CMD_RDELETE = 0x37, PROTOCOL_BINARY_CMD_RDELETEQ = 0x38, PROTOCOL_BINARY_CMD_RINCR = 0x39, PROTOCOL_BINARY_CMD_RINCRQ = 0x3a, PROTOCOL_BINARY_CMD_RDECR = 0x3b, PROTOCOL_BINARY_CMD_RDECRQ = 0x3c /* End Range operations */ } protocol_binary_command; /** * Definition of the data types in the packet * See section 3.4 Data Types */ typedef enum { PROTOCOL_BINARY_RAW_BYTES = 0x00 } protocol_binary_datatypes; /** * Definition of the header structure for a request packet. * See section 2 */ typedef union { struct { uint8_t magic; uint8_t opcode; uint16_t keylen; uint8_t extlen; uint8_t datatype; uint16_t reserved; uint32_t bodylen; uint32_t opaque; uint64_t cas; } request; uint8_t bytes[24]; } protocol_binary_request_header; /** * Definition of the header structure for a response packet. * See section 2 */ typedef union { struct { uint8_t magic; uint8_t opcode; uint16_t keylen; uint8_t extlen; uint8_t datatype; uint16_t status; uint32_t bodylen; uint32_t opaque; uint64_t cas; } response; uint8_t bytes[24]; } protocol_binary_response_header; /** * Definition of a request-packet containing no extras */ typedef union { struct { protocol_binary_request_header header; } message; uint8_t bytes[sizeof(protocol_binary_request_header)]; } protocol_binary_request_no_extras; /** * Definition of a response-packet containing no extras */ typedef union { struct { protocol_binary_response_header header; } message; uint8_t bytes[sizeof(protocol_binary_response_header)]; } protocol_binary_response_no_extras; /** * Definition of the packet used by the get, getq, getk and getkq command. * See section 4 */ typedef protocol_binary_request_no_extras protocol_binary_request_get; typedef protocol_binary_request_no_extras protocol_binary_request_getq; typedef protocol_binary_request_no_extras protocol_binary_request_getk; typedef protocol_binary_request_no_extras protocol_binary_request_getkq; /** * Definition of the packet returned from a successful get, getq, getk and * getkq. * See section 4 */ typedef union { struct { protocol_binary_response_header header; struct { uint32_t flags; } body; } message; uint8_t bytes[sizeof(protocol_binary_response_header) + 4]; } protocol_binary_response_get; typedef protocol_binary_response_get protocol_binary_response_getq; typedef protocol_binary_response_get protocol_binary_response_getk; typedef protocol_binary_response_get protocol_binary_response_getkq; /** * Definition of the packet used by the delete command * See section 4 */ typedef protocol_binary_request_no_extras protocol_binary_request_delete; /** * Definition of the packet returned by the delete command * See section 4 */ typedef protocol_binary_response_no_extras protocol_binary_response_delete; /** * Definition of the packet used by the flush command * See section 4 * Please note that the expiration field is optional, so remember to see * check the header.bodysize to see if it is present. */ typedef union { struct { protocol_binary_request_header header; struct { uint32_t expiration; } body; } message; uint8_t bytes[sizeof(protocol_binary_request_header) + 4]; } protocol_binary_request_flush; /** * Definition of the packet returned by the flush command * See section 4 */ typedef protocol_binary_response_no_extras protocol_binary_response_flush; /** * Definition of the packet used by set, add and replace * See section 4 */ typedef union { struct { protocol_binary_request_header header; struct { uint32_t flags; uint32_t expiration; } body; } message; uint8_t bytes[sizeof(protocol_binary_request_header) + 8]; } protocol_binary_request_set; typedef protocol_binary_request_set protocol_binary_request_add; typedef protocol_binary_request_set protocol_binary_request_replace; /** * Definition of the packet returned by set, add and replace * See section 4 */ typedef protocol_binary_response_no_extras protocol_binary_response_set; typedef protocol_binary_response_no_extras protocol_binary_response_add; typedef protocol_binary_response_no_extras protocol_binary_response_replace; /** * Definition of the noop packet * See section 4 */ typedef protocol_binary_request_no_extras protocol_binary_request_noop; /** * Definition of the packet returned by the noop command * See section 4 */ typedef protocol_binary_response_no_extras protocol_binary_response_noop; /** * Definition of the structure used by the increment and decrement * command. * See section 4 */ typedef union { struct { protocol_binary_request_header header; struct { uint64_t delta; uint64_t initial; uint32_t expiration; } body; } message; uint8_t bytes[sizeof(protocol_binary_request_header) + 20]; } protocol_binary_request_incr; typedef protocol_binary_request_incr protocol_binary_request_decr; /** * Definition of the response from an incr or decr command * command. * See section 4 */ typedef union { struct { protocol_binary_response_header header; struct { uint64_t value; } body; } message; uint8_t bytes[sizeof(protocol_binary_response_header) + 8]; } protocol_binary_response_incr; typedef protocol_binary_response_incr protocol_binary_response_decr; /** * Definition of the quit * See section 4 */ typedef protocol_binary_request_no_extras protocol_binary_request_quit; /** * Definition of the packet returned by the quit command * See section 4 */ typedef protocol_binary_response_no_extras protocol_binary_response_quit; /** * Definition of the packet used by append and prepend command * See section 4 */ typedef protocol_binary_request_no_extras protocol_binary_request_append; typedef protocol_binary_request_no_extras protocol_binary_request_prepend; /** * Definition of the packet returned from a successful append or prepend * See section 4 */ typedef protocol_binary_response_no_extras protocol_binary_response_append; typedef protocol_binary_response_no_extras protocol_binary_response_prepend; /** * Definition of the packet used by the version command * See section 4 */ typedef protocol_binary_request_no_extras protocol_binary_request_version; /** * Definition of the packet returned from a successful version command * See section 4 */ typedef protocol_binary_response_no_extras protocol_binary_response_version; /** * Definition of the packet used by the stats command. * See section 4 */ typedef protocol_binary_request_no_extras protocol_binary_request_stats; /** * Definition of the packet returned from a successful stats command * See section 4 */ typedef protocol_binary_response_no_extras protocol_binary_response_stats; /** * Definition of the packet used by the touch command. */ typedef union { struct { protocol_binary_request_header header; struct { uint32_t expiration; } body; } message; uint8_t bytes[sizeof(protocol_binary_request_header) + 4]; } protocol_binary_request_touch; /** * Definition of the packet returned from the touch command */ typedef protocol_binary_response_no_extras protocol_binary_response_touch; /** * Definition of the packet used by the GAT(Q) command. */ typedef union { struct { protocol_binary_request_header header; struct { uint32_t expiration; } body; } message; uint8_t bytes[sizeof(protocol_binary_request_header) + 4]; } protocol_binary_request_gat; typedef protocol_binary_request_gat protocol_binary_request_gatq; typedef protocol_binary_request_gat protocol_binary_request_gatk; typedef protocol_binary_request_gat protocol_binary_request_gatkq; /** * Definition of the packet returned from the GAT(Q) */ typedef protocol_binary_response_get protocol_binary_response_gat; typedef protocol_binary_response_get protocol_binary_response_gatq; typedef protocol_binary_response_get protocol_binary_response_gatk; typedef protocol_binary_response_get protocol_binary_response_gatkq; /** * Definition of a request for a range operation. * See http://code.google.com/p/memcached/wiki/RangeOps * * These types are used for range operations and exist within * this header for use in other projects. Range operations are * not expected to be implemented in the memcached server itself. */ typedef union { struct { protocol_binary_response_header header; struct { uint16_t size; uint8_t reserved; uint8_t flags; uint32_t max_results; } body; } message; uint8_t bytes[sizeof(protocol_binary_request_header) + 4]; } protocol_binary_request_rangeop; typedef protocol_binary_request_rangeop protocol_binary_request_rget; typedef protocol_binary_request_rangeop protocol_binary_request_rset; typedef protocol_binary_request_rangeop protocol_binary_request_rsetq; typedef protocol_binary_request_rangeop protocol_binary_request_rappend; typedef protocol_binary_request_rangeop protocol_binary_request_rappendq; typedef protocol_binary_request_rangeop protocol_binary_request_rprepend; typedef protocol_binary_request_rangeop protocol_binary_request_rprependq; typedef protocol_binary_request_rangeop protocol_binary_request_rdelete; typedef protocol_binary_request_rangeop protocol_binary_request_rdeleteq; typedef protocol_binary_request_rangeop protocol_binary_request_rincr; typedef protocol_binary_request_rangeop protocol_binary_request_rincrq; typedef protocol_binary_request_rangeop protocol_binary_request_rdecr; typedef protocol_binary_request_rangeop protocol_binary_request_rdecrq; #ifdef __cplusplus } #endif #endif /* PROTOCOL_BINARY_H */
16,525
34.087049
80
h
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/items.h
/* * Copyright 2018 Lenovo * * Licensed under the BSD-3 license. see LICENSE.Lenovo.txt for full text */ /* * Note: * Codes enclosed in `#ifdef PSLAB' and `#endif' are added by Lenovo for * persistent memory support */ #define HOT_LRU 0 #define WARM_LRU 64 #define COLD_LRU 128 #define TEMP_LRU 192 #define CLEAR_LRU(id) (id & ~(3<<6)) #define GET_LRU(id) (id & (3<<6)) /* See items.c */ uint64_t get_cas_id(void); /*@null@*/ item *do_item_alloc(char *key, const size_t nkey, const unsigned int flags, const rel_time_t exptime, const int nbytes); item_chunk *do_item_alloc_chunk(item_chunk *ch, const size_t bytes_remain); item *do_item_alloc_pull(const size_t ntotal, const unsigned int id); void item_free(item *it); bool item_size_ok(const size_t nkey, const int flags, const int nbytes); int do_item_link(item *it, const uint32_t hv); /** may fail if transgresses limits */ #ifdef PSLAB void do_item_relink(item *it, const uint32_t hv); #endif void do_item_unlink(item *it, const uint32_t hv); void do_item_unlink_nolock(item *it, const uint32_t hv); void do_item_remove(item *it); void do_item_update(item *it); /** update LRU time to current and reposition */ void do_item_update_nolock(item *it); int do_item_replace(item *it, item *new_it, const uint32_t hv); int item_is_flushed(item *it); void do_item_linktail_q(item *it); void do_item_unlinktail_q(item *it); item *do_item_crawl_q(item *it); void *item_lru_bump_buf_create(void); #define LRU_PULL_EVICT 1 #define LRU_PULL_CRAWL_BLOCKS 2 #define LRU_PULL_RETURN_ITEM 4 /* fill info struct if available */ struct lru_pull_tail_return { item *it; uint32_t hv; }; int lru_pull_tail(const int orig_id, const int cur_lru, const uint64_t total_bytes, const uint8_t flags, const rel_time_t max_age, struct lru_pull_tail_return *ret_it); /*@null@*/ char *item_cachedump(const unsigned int slabs_clsid, const unsigned int limit, unsigned int *bytes); void item_stats(ADD_STAT add_stats, void *c); void do_item_stats_add_crawl(const int i, const uint64_t reclaimed, const uint64_t unfetched, const uint64_t checked); void item_stats_totals(ADD_STAT add_stats, void *c); /*@null@*/ void item_stats_sizes(ADD_STAT add_stats, void *c); void item_stats_sizes_init(void); void item_stats_sizes_enable(ADD_STAT add_stats, void *c); void item_stats_sizes_disable(ADD_STAT add_stats, void *c); void item_stats_sizes_add(item *it); void item_stats_sizes_remove(item *it); bool item_stats_sizes_status(void); /* stats getter for slab automover */ typedef struct { int64_t evicted; int64_t outofmemory; uint32_t age; } item_stats_automove; void fill_item_stats_automove(item_stats_automove *am); item *do_item_get(const char *key, const size_t nkey, const uint32_t hv, conn *c, const bool do_update); item *do_item_touch(const char *key, const size_t nkey, uint32_t exptime, const uint32_t hv, conn *c); void item_stats_reset(void); extern pthread_mutex_t lru_locks[POWER_LARGEST]; int start_lru_maintainer_thread(void *arg); int stop_lru_maintainer_thread(void); int init_lru_maintainer(void); void lru_maintainer_pause(void); void lru_maintainer_resume(void); void *lru_bump_buf_create(void); #ifdef EXTSTORE #define STORAGE_delete(e, it) \ do { \ if (it->it_flags & ITEM_HDR) { \ item_hdr *hdr = (item_hdr *)ITEM_data(it); \ extstore_delete(e, hdr->page_id, hdr->page_version, \ 1, ITEM_ntotal(it)); \ } \ } while (0) #else #define STORAGE_delete(...) #endif
3,550
31.577982
120
h
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/trace.h
#ifndef TRACE_H #define TRACE_H #ifdef ENABLE_DTRACE #include "memcached_dtrace.h" #else #define MEMCACHED_ASSOC_DELETE(arg0, arg1, arg2) #define MEMCACHED_ASSOC_DELETE_ENABLED() (0) #define MEMCACHED_ASSOC_FIND(arg0, arg1, arg2) #define MEMCACHED_ASSOC_FIND_ENABLED() (0) #define MEMCACHED_ASSOC_INSERT(arg0, arg1, arg2) #define MEMCACHED_ASSOC_INSERT_ENABLED() (0) #define MEMCACHED_COMMAND_ADD(arg0, arg1, arg2, arg3, arg4) #define MEMCACHED_COMMAND_ADD_ENABLED() (0) #define MEMCACHED_COMMAND_APPEND(arg0, arg1, arg2, arg3, arg4) #define MEMCACHED_COMMAND_APPEND_ENABLED() (0) #define MEMCACHED_COMMAND_CAS(arg0, arg1, arg2, arg3, arg4) #define MEMCACHED_COMMAND_CAS_ENABLED() (0) #define MEMCACHED_COMMAND_DECR(arg0, arg1, arg2, arg3) #define MEMCACHED_COMMAND_DECR_ENABLED() (0) #define MEMCACHED_COMMAND_DELETE(arg0, arg1, arg2) #define MEMCACHED_COMMAND_DELETE_ENABLED() (0) #define MEMCACHED_COMMAND_GET(arg0, arg1, arg2, arg3, arg4) #define MEMCACHED_COMMAND_GET_ENABLED() (0) #define MEMCACHED_COMMAND_TOUCH(arg0, arg1, arg2, arg3, arg4) #define MEMCACHED_COMMAND_TOUCH_ENABLED() (0) #define MEMCACHED_COMMAND_INCR(arg0, arg1, arg2, arg3) #define MEMCACHED_COMMAND_INCR_ENABLED() (0) #define MEMCACHED_COMMAND_PREPEND(arg0, arg1, arg2, arg3, arg4) #define MEMCACHED_COMMAND_PREPEND_ENABLED() (0) #define MEMCACHED_COMMAND_REPLACE(arg0, arg1, arg2, arg3, arg4) #define MEMCACHED_COMMAND_REPLACE_ENABLED() (0) #define MEMCACHED_COMMAND_SET(arg0, arg1, arg2, arg3, arg4) #define MEMCACHED_COMMAND_SET_ENABLED() (0) #define MEMCACHED_CONN_ALLOCATE(arg0) #define MEMCACHED_CONN_ALLOCATE_ENABLED() (0) #define MEMCACHED_CONN_CREATE(arg0) #define MEMCACHED_CONN_CREATE_ENABLED() (0) #define MEMCACHED_CONN_DESTROY(arg0) #define MEMCACHED_CONN_DESTROY_ENABLED() (0) #define MEMCACHED_CONN_DISPATCH(arg0, arg1) #define MEMCACHED_CONN_DISPATCH_ENABLED() (0) #define MEMCACHED_CONN_RELEASE(arg0) #define MEMCACHED_CONN_RELEASE_ENABLED() (0) #define MEMCACHED_ITEM_LINK(arg0, arg1, arg2) #define MEMCACHED_ITEM_LINK_ENABLED() (0) #define MEMCACHED_ITEM_REMOVE(arg0, arg1, arg2) #define MEMCACHED_ITEM_REMOVE_ENABLED() (0) #define MEMCACHED_ITEM_REPLACE(arg0, arg1, arg2, arg3, arg4, arg5) #define MEMCACHED_ITEM_REPLACE_ENABLED() (0) #define MEMCACHED_ITEM_UNLINK(arg0, arg1, arg2) #define MEMCACHED_ITEM_UNLINK_ENABLED() (0) #define MEMCACHED_ITEM_UPDATE(arg0, arg1, arg2) #define MEMCACHED_ITEM_UPDATE_ENABLED() (0) #define MEMCACHED_PROCESS_COMMAND_END(arg0, arg1, arg2) #define MEMCACHED_PROCESS_COMMAND_END_ENABLED() (0) #define MEMCACHED_PROCESS_COMMAND_START(arg0, arg1, arg2) #define MEMCACHED_PROCESS_COMMAND_START_ENABLED() (0) #define MEMCACHED_SLABS_ALLOCATE(arg0, arg1, arg2, arg3) #define MEMCACHED_SLABS_ALLOCATE_ENABLED() (0) #define MEMCACHED_SLABS_ALLOCATE_FAILED(arg0, arg1) #define MEMCACHED_SLABS_ALLOCATE_FAILED_ENABLED() (0) #define MEMCACHED_SLABS_FREE(arg0, arg1, arg2) #define MEMCACHED_SLABS_FREE_ENABLED() (0) #define MEMCACHED_SLABS_SLABCLASS_ALLOCATE(arg0) #define MEMCACHED_SLABS_SLABCLASS_ALLOCATE_ENABLED() (0) #define MEMCACHED_SLABS_SLABCLASS_ALLOCATE_FAILED(arg0) #define MEMCACHED_SLABS_SLABCLASS_ALLOCATE_FAILED_ENABLED() (0) #endif #endif
3,179
43.166667
66
h
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/jenkins_hash.h
#ifndef JENKINS_HASH_H #define JENKINS_HASH_H #ifdef __cplusplus extern "C" { #endif uint32_t jenkins_hash(const void *key, size_t length); #ifdef __cplusplus } #endif #endif /* JENKINS_HASH_H */
213
12.375
54
h
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/cache.h
/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ #ifndef CACHE_H #define CACHE_H #include <pthread.h> #ifdef HAVE_UMEM_H #include <umem.h> #define cache_t umem_cache_t #define cache_alloc(a) umem_cache_alloc(a, UMEM_DEFAULT) #define do_cache_alloc(a) umem_cache_alloc(a, UMEM_DEFAULT) #define cache_free(a, b) umem_cache_free(a, b) #define do_cache_free(a, b) umem_cache_free(a, b) #define cache_create(a,b,c,d,e) umem_cache_create((char*)a, b, c, d, e, NULL, NULL, NULL, 0) #define cache_destroy(a) umem_cache_destroy(a); #else #ifndef NDEBUG /* may be used for debug purposes */ extern int cache_error; #endif /** * Constructor used to initialize allocated objects * * @param obj pointer to the object to initialized. * @param notused1 This parameter is currently not used. * @param notused2 This parameter is currently not used. * @return you should return 0, but currently this is not checked */ typedef int cache_constructor_t(void* obj, void* notused1, int notused2); /** * Destructor used to clean up allocated objects before they are * returned to the operating system. * * @param obj pointer to the object to clean up. * @param notused1 This parameter is currently not used. * @param notused2 This parameter is currently not used. * @return you should return 0, but currently this is not checked */ typedef void cache_destructor_t(void* obj, void* notused); /** * Definition of the structure to keep track of the internal details of * the cache allocator. Touching any of these variables results in * undefined behavior. */ typedef struct { /** Mutex to protect access to the structure */ pthread_mutex_t mutex; /** Name of the cache objects in this cache (provided by the caller) */ char *name; /** List of pointers to available buffers in this cache */ void **ptr; /** The size of each element in this cache */ size_t bufsize; /** The capacity of the list of elements */ int freetotal; /** The current number of free elements */ int freecurr; /** The constructor to be called each time we allocate more memory */ cache_constructor_t* constructor; /** The destructor to be called each time before we release memory */ cache_destructor_t* destructor; } cache_t; /** * Create an object cache. * * The object cache will let you allocate objects of the same size. It is fully * MT safe, so you may allocate objects from multiple threads without having to * do any synchronization in the application code. * * @param name the name of the object cache. This name may be used for debug purposes * and may help you track down what kind of object you have problems with * (buffer overruns, leakage etc) * @param bufsize the size of each object in the cache * @param align the alignment requirements of the objects in the cache. * @param constructor the function to be called to initialize memory when we need * to allocate more memory from the os. * @param destructor the function to be called before we release the memory back * to the os. * @return a handle to an object cache if successful, NULL otherwise. */ cache_t* cache_create(const char* name, size_t bufsize, size_t align, cache_constructor_t* constructor, cache_destructor_t* destructor); /** * Destroy an object cache. * * Destroy and invalidate an object cache. You should return all buffers allocated * with cache_alloc by using cache_free before calling this function. Not doing * so results in undefined behavior (the buffers may or may not be invalidated) * * @param handle the handle to the object cache to destroy. */ void cache_destroy(cache_t* handle); /** * Allocate an object from the cache. * * @param handle the handle to the object cache to allocate from * @return a pointer to an initialized object from the cache, or NULL if * the allocation cannot be satisfied. */ void* cache_alloc(cache_t* handle); void* do_cache_alloc(cache_t* handle); /** * Return an object back to the cache. * * The caller should return the object in an initialized state so that * the object may be returned in an expected state from cache_alloc. * * @param handle handle to the object cache to return the object to * @param ptr pointer to the object to return. */ void cache_free(cache_t* handle, void* ptr); void do_cache_free(cache_t* handle, void* ptr); #endif #endif
4,498
36.181818
92
h
null
NearPMSW-main/nearpmMDsync/checkpointing/memcached-pmem-checkpointing/bipbuffer.h
#ifndef BIPBUFFER_H #define BIPBUFFER_H typedef struct { unsigned long int size; /* region A */ unsigned int a_start, a_end; /* region B */ unsigned int b_end; /* is B inuse? */ int b_inuse; unsigned char data[]; } bipbuf_t; /** * Create a new bip buffer. * * malloc()s space * * @param[in] size The size of the buffer */ bipbuf_t *bipbuf_new(const unsigned int size); /** * Initialise a bip buffer. Use memory provided by user. * * No malloc()s are performed. * * @param[in] size The size of the array */ void bipbuf_init(bipbuf_t* me, const unsigned int size); /** * Free the bip buffer */ void bipbuf_free(bipbuf_t *me); /* TODO: DOCUMENTATION */ unsigned char *bipbuf_request(bipbuf_t* me, const int size); int bipbuf_push(bipbuf_t* me, const int size); /** * @param[in] data The data to be offered to the buffer * @param[in] size The size of the data to be offered * @return number of bytes offered */ int bipbuf_offer(bipbuf_t *me, const unsigned char *data, const int size); /** * Look at data. Don't move cursor * * @param[in] len The length of the data to be peeked * @return data on success, NULL if we can't peek at this much data */ unsigned char *bipbuf_peek(const bipbuf_t* me, const unsigned int len); /** * Look at data. Don't move cursor * * @param[in] len The length of the data returned * @return data on success, NULL if nothing available */ unsigned char *bipbuf_peek_all(const bipbuf_t* me, unsigned int *len); /** * Get pointer to data to read. Move the cursor on. * * @param[in] len The length of the data to be polled * @return pointer to data, NULL if we can't poll this much data */ unsigned char *bipbuf_poll(bipbuf_t* me, const unsigned int size); /** * @return the size of the bipbuffer */ int bipbuf_size(const bipbuf_t* me); /** * @return 1 if buffer is empty; 0 otherwise */ int bipbuf_is_empty(const bipbuf_t* me); /** * @return how much space we have assigned */ int bipbuf_used(const bipbuf_t* cb); /** * @return bytes of unused space */ int bipbuf_unused(const bipbuf_t* me); #endif /* BIPBUFFER_H */
2,118
23.079545
74
h