repo
stringlengths
1
152
file
stringlengths
14
221
code
stringlengths
501
25k
file_length
int64
501
25k
avg_line_length
float64
20
99.5
max_line_length
int64
21
134
extension_type
stringclasses
2 values
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/pmemops.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ #ifndef LIBPMEMOBJ_PMEMOPS_H #define LIBPMEMOBJ_PMEMOPS_H 1 #include <stddef.h> #include <stdint.h> #include "util.h" #ifdef __cplusplus extern "C" { #endif typedef int (*persist_fn)(void *base, const void *, size_t, unsigned); typedef int (*flush_fn)(void *base, const void *, size_t, unsigned); typedef void (*drain_fn)(void *base); typedef void *(*memcpy_fn)(void *base, void *dest, const void *src, size_t len, unsigned flags); typedef void *(*memmove_fn)(void *base, void *dest, const void *src, size_t len, unsigned flags); typedef void *(*memset_fn)(void *base, void *dest, int c, size_t len, unsigned flags); typedef int (*remote_read_fn)(void *ctx, uintptr_t base, void *dest, void *addr, size_t length); struct pmem_ops { /* for 'master' replica: with or without data replication */ persist_fn persist; /* persist function */ flush_fn flush; /* flush function */ drain_fn drain; /* drain function */ memcpy_fn memcpy; /* persistent memcpy function */ memmove_fn memmove; /* persistent memmove function */ memset_fn memset; /* persistent memset function */ void *base; //char a; //temp var end struct remote_ops { remote_read_fn read; void *ctx; uintptr_t base; } remote; void *device; uint16_t objid; }; static force_inline int pmemops_xpersist(const struct pmem_ops *p_ops, const void *d, size_t s, unsigned flags) { return p_ops->persist(p_ops->base, d, s, flags); } static force_inline void pmemops_persist(const struct pmem_ops *p_ops, const void *d, size_t s) { (void) pmemops_xpersist(p_ops, d, s, 0); } static force_inline int pmemops_xflush(const struct pmem_ops *p_ops, const void *d, size_t s, unsigned flags) { return p_ops->flush(p_ops->base, d, s, flags); } static force_inline void pmemops_flush(const struct pmem_ops *p_ops, const void *d, size_t s) { (void) pmemops_xflush(p_ops, d, s, 0); } static force_inline void pmemops_drain(const struct pmem_ops *p_ops) { p_ops->drain(p_ops->base); } static force_inline void * pmemops_memcpy(const struct pmem_ops *p_ops, void *dest, const void *src, size_t len, unsigned flags) { return p_ops->memcpy(p_ops->base, dest, src, len, flags); } static force_inline void * pmemops_memmove(const struct pmem_ops *p_ops, void *dest, const void *src, size_t len, unsigned flags) { return p_ops->memmove(p_ops->base, dest, src, len, flags); } static force_inline void * pmemops_memset(const struct pmem_ops *p_ops, void *dest, int c, size_t len, unsigned flags) { return p_ops->memset(p_ops->base, dest, c, len, flags); } #ifdef __cplusplus } #endif #endif
2,672
22.866071
80
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/sync.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * sync.h -- internal to obj synchronization API */ #ifndef LIBPMEMOBJ_SYNC_H #define LIBPMEMOBJ_SYNC_H 1 #include <errno.h> #include <stdint.h> #include "libpmemobj.h" #include "out.h" #include "os_thread.h" #ifdef __cplusplus extern "C" { #endif /* * internal definitions of PMEM-locks */ typedef union padded_pmemmutex { char padding[_POBJ_CL_SIZE]; struct { uint64_t runid; union { os_mutex_t mutex; struct { void *bsd_mutex_p; union padded_pmemmutex *next; } bsd_u; } mutex_u; } pmemmutex; } PMEMmutex_internal; #define PMEMmutex_lock pmemmutex.mutex_u.mutex #define PMEMmutex_bsd_mutex_p pmemmutex.mutex_u.bsd_u.bsd_mutex_p #define PMEMmutex_next pmemmutex.mutex_u.bsd_u.next typedef union padded_pmemrwlock { char padding[_POBJ_CL_SIZE]; struct { uint64_t runid; union { os_rwlock_t rwlock; struct { void *bsd_rwlock_p; union padded_pmemrwlock *next; } bsd_u; } rwlock_u; } pmemrwlock; } PMEMrwlock_internal; #define PMEMrwlock_lock pmemrwlock.rwlock_u.rwlock #define PMEMrwlock_bsd_rwlock_p pmemrwlock.rwlock_u.bsd_u.bsd_rwlock_p #define PMEMrwlock_next pmemrwlock.rwlock_u.bsd_u.next typedef union padded_pmemcond { char padding[_POBJ_CL_SIZE]; struct { uint64_t runid; union { os_cond_t cond; struct { void *bsd_cond_p; union padded_pmemcond *next; } bsd_u; } cond_u; } pmemcond; } PMEMcond_internal; #define PMEMcond_cond pmemcond.cond_u.cond #define PMEMcond_bsd_cond_p pmemcond.cond_u.bsd_u.bsd_cond_p #define PMEMcond_next pmemcond.cond_u.bsd_u.next /* * pmemobj_mutex_lock_nofail -- pmemobj_mutex_lock variant that never * fails from caller perspective. If pmemobj_mutex_lock failed, this function * aborts the program. */ static inline void pmemobj_mutex_lock_nofail(PMEMobjpool *pop, PMEMmutex *mutexp) { int ret = pmemobj_mutex_lock(pop, mutexp); if (ret) { errno = ret; FATAL("!pmemobj_mutex_lock"); } } /* * pmemobj_mutex_unlock_nofail -- pmemobj_mutex_unlock variant that never * fails from caller perspective. If pmemobj_mutex_unlock failed, this function * aborts the program. */ static inline void pmemobj_mutex_unlock_nofail(PMEMobjpool *pop, PMEMmutex *mutexp) { int ret = pmemobj_mutex_unlock(pop, mutexp); if (ret) { errno = ret; FATAL("!pmemobj_mutex_unlock"); } } int pmemobj_mutex_assert_locked(PMEMobjpool *pop, PMEMmutex *mutexp); #ifdef __cplusplus } #endif #endif
2,504
21.168142
79
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/sync.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * sync.c -- persistent memory resident synchronization primitives */ #include <inttypes.h> #include "obj.h" #include "out.h" #include "util.h" #include "sync.h" #include "sys_util.h" #include "util.h" #include "valgrind_internal.h" #ifdef __FreeBSD__ #define RECORD_LOCK(init, type, p) \ if (init) {\ PMEM##type##_internal *head = pop->type##_head;\ while (!util_bool_compare_and_swap64(&pop->type##_head, head,\ p)) {\ head = pop->type##_head;\ }\ p->PMEM##type##_next = head;\ } #else #define RECORD_LOCK(init, type, p) #endif /* * _get_value -- (internal) atomically initialize and return a value. * Returns -1 on error, 0 if the caller is not the value * initializer, 1 if the caller is the value initializer. */ static int _get_value(uint64_t pop_runid, volatile uint64_t *runid, void *value, void *arg, int (*init_value)(void *value, void *arg)) { uint64_t tmp_runid; int initializer = 0; while ((tmp_runid = *runid) != pop_runid) { if (tmp_runid == pop_runid - 1) continue; if (!util_bool_compare_and_swap64(runid, tmp_runid, pop_runid - 1)) continue; initializer = 1; if (init_value(value, arg)) { ERR("error initializing lock"); util_fetch_and_and64(runid, 0); return -1; } if (util_bool_compare_and_swap64(runid, pop_runid - 1, pop_runid) == 0) { ERR("error setting lock runid"); return -1; } } return initializer; } /* * get_mutex -- (internal) atomically initialize, record and return a mutex */ static inline os_mutex_t * get_mutex(PMEMobjpool *pop, PMEMmutex_internal *imp) { if (likely(imp->pmemmutex.runid == pop->run_id)) return &imp->PMEMmutex_lock; volatile uint64_t *runid = &imp->pmemmutex.runid; LOG(5, "PMEMmutex %p pop->run_id %" PRIu64 " pmemmutex.runid %" PRIu64, imp, pop->run_id, *runid); ASSERTeq((uintptr_t)runid % util_alignof(uint64_t), 0); COMPILE_ERROR_ON(sizeof(PMEMmutex) != sizeof(PMEMmutex_internal)); COMPILE_ERROR_ON(util_alignof(PMEMmutex) != util_alignof(os_mutex_t)); VALGRIND_REMOVE_PMEM_MAPPING(imp, _POBJ_CL_SIZE); int initializer = _get_value(pop->run_id, runid, &imp->PMEMmutex_lock, NULL, (void *)os_mutex_init); if (initializer == -1) { return NULL; } RECORD_LOCK(initializer, mutex, imp); return &imp->PMEMmutex_lock; } /* * get_rwlock -- (internal) atomically initialize, record and return a rwlock */ static inline os_rwlock_t * get_rwlock(PMEMobjpool *pop, PMEMrwlock_internal *irp) { if (likely(irp->pmemrwlock.runid == pop->run_id)) return &irp->PMEMrwlock_lock; volatile uint64_t *runid = &irp->pmemrwlock.runid; LOG(5, "PMEMrwlock %p pop->run_id %"\ PRIu64 " pmemrwlock.runid %" PRIu64, irp, pop->run_id, *runid); ASSERTeq((uintptr_t)runid % util_alignof(uint64_t), 0); COMPILE_ERROR_ON(sizeof(PMEMrwlock) != sizeof(PMEMrwlock_internal)); COMPILE_ERROR_ON(util_alignof(PMEMrwlock) != util_alignof(os_rwlock_t)); VALGRIND_REMOVE_PMEM_MAPPING(irp, _POBJ_CL_SIZE); int initializer = _get_value(pop->run_id, runid, &irp->PMEMrwlock_lock, NULL, (void *)os_rwlock_init); if (initializer == -1) { return NULL; } RECORD_LOCK(initializer, rwlock, irp); return &irp->PMEMrwlock_lock; } /* * get_cond -- (internal) atomically initialize, record and return a * condition variable */ static inline os_cond_t * get_cond(PMEMobjpool *pop, PMEMcond_internal *icp) { if (likely(icp->pmemcond.runid == pop->run_id)) return &icp->PMEMcond_cond; volatile uint64_t *runid = &icp->pmemcond.runid; LOG(5, "PMEMcond %p pop->run_id %" PRIu64 " pmemcond.runid %" PRIu64, icp, pop->run_id, *runid); ASSERTeq((uintptr_t)runid % util_alignof(uint64_t), 0); COMPILE_ERROR_ON(sizeof(PMEMcond) != sizeof(PMEMcond_internal)); COMPILE_ERROR_ON(util_alignof(PMEMcond) != util_alignof(os_cond_t)); VALGRIND_REMOVE_PMEM_MAPPING(icp, _POBJ_CL_SIZE); int initializer = _get_value(pop->run_id, runid, &icp->PMEMcond_cond, NULL, (void *)os_cond_init); if (initializer == -1) { return NULL; } RECORD_LOCK(initializer, cond, icp); return &icp->PMEMcond_cond; } /* * pmemobj_mutex_zero -- zero-initialize a pmem resident mutex * * This function is not MT safe. */ void pmemobj_mutex_zero(PMEMobjpool *pop, PMEMmutex *mutexp) { LOG(3, "pop %p mutex %p", pop, mutexp); ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp)); PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp; mutexip->pmemmutex.runid = 0; pmemops_persist(&pop->p_ops, &mutexip->pmemmutex.runid, sizeof(mutexip->pmemmutex.runid)); } /* * pmemobj_mutex_lock -- lock a pmem resident mutex * * Atomically initializes and locks a PMEMmutex, otherwise behaves as its * POSIX counterpart. */ int pmemobj_mutex_lock(PMEMobjpool *pop, PMEMmutex *mutexp) { LOG(3, "pop %p mutex %p", pop, mutexp); ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp)); PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp; os_mutex_t *mutex = get_mutex(pop, mutexip); if (mutex == NULL) return EINVAL; ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0); return os_mutex_lock(mutex); } /* * pmemobj_mutex_assert_locked -- checks whether mutex is locked. * * Returns 0 when mutex is locked. */ int pmemobj_mutex_assert_locked(PMEMobjpool *pop, PMEMmutex *mutexp) { LOG(3, "pop %p mutex %p", pop, mutexp); ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp)); PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp; os_mutex_t *mutex = get_mutex(pop, mutexip); if (mutex == NULL) return EINVAL; ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0); int ret = os_mutex_trylock(mutex); if (ret == EBUSY) return 0; if (ret == 0) { util_mutex_unlock(mutex); /* * There's no good error code for this case. EINVAL is used for * something else here. */ return ENODEV; } return ret; } /* * pmemobj_mutex_timedlock -- lock a pmem resident mutex * * Atomically initializes and locks a PMEMmutex, otherwise behaves as its * POSIX counterpart. */ int pmemobj_mutex_timedlock(PMEMobjpool *pop, PMEMmutex *__restrict mutexp, const struct timespec *__restrict abs_timeout) { LOG(3, "pop %p mutex %p", pop, mutexp); ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp)); PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp; os_mutex_t *mutex = get_mutex(pop, mutexip); if (mutex == NULL) return EINVAL; ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0); return os_mutex_timedlock(mutex, abs_timeout); } /* * pmemobj_mutex_trylock -- trylock a pmem resident mutex * * Atomically initializes and trylocks a PMEMmutex, otherwise behaves as its * POSIX counterpart. */ int pmemobj_mutex_trylock(PMEMobjpool *pop, PMEMmutex *mutexp) { LOG(3, "pop %p mutex %p", pop, mutexp); ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp)); PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp; os_mutex_t *mutex = get_mutex(pop, mutexip); if (mutex == NULL) return EINVAL; ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0); return os_mutex_trylock(mutex); } /* * pmemobj_mutex_unlock -- unlock a pmem resident mutex */ int pmemobj_mutex_unlock(PMEMobjpool *pop, PMEMmutex *mutexp) { LOG(3, "pop %p mutex %p", pop, mutexp); ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp)); /* XXX potential performance improvement - move GET to debug version */ PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp; os_mutex_t *mutex = get_mutex(pop, mutexip); if (mutex == NULL) return EINVAL; ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0); return os_mutex_unlock(mutex); } /* * pmemobj_rwlock_zero -- zero-initialize a pmem resident rwlock * * This function is not MT safe. */ void pmemobj_rwlock_zero(PMEMobjpool *pop, PMEMrwlock *rwlockp) { LOG(3, "pop %p rwlock %p", pop, rwlockp); ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp)); PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp; rwlockip->pmemrwlock.runid = 0; pmemops_persist(&pop->p_ops, &rwlockip->pmemrwlock.runid, sizeof(rwlockip->pmemrwlock.runid)); } /* * pmemobj_rwlock_rdlock -- rdlock a pmem resident mutex * * Atomically initializes and rdlocks a PMEMrwlock, otherwise behaves as its * POSIX counterpart. */ int pmemobj_rwlock_rdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp) { LOG(3, "pop %p rwlock %p", pop, rwlockp); ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp)); PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp; os_rwlock_t *rwlock = get_rwlock(pop, rwlockip); if (rwlock == NULL) return EINVAL; ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0); return os_rwlock_rdlock(rwlock); } /* * pmemobj_rwlock_wrlock -- wrlock a pmem resident mutex * * Atomically initializes and wrlocks a PMEMrwlock, otherwise behaves as its * POSIX counterpart. */ int pmemobj_rwlock_wrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp) { LOG(3, "pop %p rwlock %p", pop, rwlockp); ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp)); PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp; os_rwlock_t *rwlock = get_rwlock(pop, rwlockip); if (rwlock == NULL) return EINVAL; ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0); return os_rwlock_wrlock(rwlock); } /* * pmemobj_rwlock_timedrdlock -- timedrdlock a pmem resident mutex * * Atomically initializes and timedrdlocks a PMEMrwlock, otherwise behaves as * its POSIX counterpart. */ int pmemobj_rwlock_timedrdlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp, const struct timespec *__restrict abs_timeout) { LOG(3, "pop %p rwlock %p timeout sec %ld nsec %ld", pop, rwlockp, abs_timeout->tv_sec, abs_timeout->tv_nsec); ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp)); PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp; os_rwlock_t *rwlock = get_rwlock(pop, rwlockip); if (rwlock == NULL) return EINVAL; ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0); return os_rwlock_timedrdlock(rwlock, abs_timeout); } /* * pmemobj_rwlock_timedwrlock -- timedwrlock a pmem resident mutex * * Atomically initializes and timedwrlocks a PMEMrwlock, otherwise behaves as * its POSIX counterpart. */ int pmemobj_rwlock_timedwrlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp, const struct timespec *__restrict abs_timeout) { LOG(3, "pop %p rwlock %p timeout sec %ld nsec %ld", pop, rwlockp, abs_timeout->tv_sec, abs_timeout->tv_nsec); ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp)); PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp; os_rwlock_t *rwlock = get_rwlock(pop, rwlockip); if (rwlock == NULL) return EINVAL; ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0); return os_rwlock_timedwrlock(rwlock, abs_timeout); } /* * pmemobj_rwlock_tryrdlock -- tryrdlock a pmem resident mutex * * Atomically initializes and tryrdlocks a PMEMrwlock, otherwise behaves as its * POSIX counterpart. */ int pmemobj_rwlock_tryrdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp) { LOG(3, "pop %p rwlock %p", pop, rwlockp); ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp)); PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp; os_rwlock_t *rwlock = get_rwlock(pop, rwlockip); if (rwlock == NULL) return EINVAL; ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0); return os_rwlock_tryrdlock(rwlock); } /* * pmemobj_rwlock_trywrlock -- trywrlock a pmem resident mutex * * Atomically initializes and trywrlocks a PMEMrwlock, otherwise behaves as its * POSIX counterpart. */ int pmemobj_rwlock_trywrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp) { LOG(3, "pop %p rwlock %p", pop, rwlockp); ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp)); PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp; os_rwlock_t *rwlock = get_rwlock(pop, rwlockip); if (rwlock == NULL) return EINVAL; ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0); return os_rwlock_trywrlock(rwlock); } /* * pmemobj_rwlock_unlock -- unlock a pmem resident rwlock */ int pmemobj_rwlock_unlock(PMEMobjpool *pop, PMEMrwlock *rwlockp) { LOG(3, "pop %p rwlock %p", pop, rwlockp); ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp)); /* XXX potential performance improvement - move GET to debug version */ PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp; os_rwlock_t *rwlock = get_rwlock(pop, rwlockip); if (rwlock == NULL) return EINVAL; ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0); return os_rwlock_unlock(rwlock); } /* * pmemobj_cond_zero -- zero-initialize a pmem resident condition variable * * This function is not MT safe. */ void pmemobj_cond_zero(PMEMobjpool *pop, PMEMcond *condp) { LOG(3, "pop %p cond %p", pop, condp); ASSERTeq(pop, pmemobj_pool_by_ptr(condp)); PMEMcond_internal *condip = (PMEMcond_internal *)condp; condip->pmemcond.runid = 0; pmemops_persist(&pop->p_ops, &condip->pmemcond.runid, sizeof(condip->pmemcond.runid)); } /* * pmemobj_cond_broadcast -- broadcast a pmem resident condition variable * * Atomically initializes and broadcast a PMEMcond, otherwise behaves as its * POSIX counterpart. */ int pmemobj_cond_broadcast(PMEMobjpool *pop, PMEMcond *condp) { LOG(3, "pop %p cond %p", pop, condp); ASSERTeq(pop, pmemobj_pool_by_ptr(condp)); PMEMcond_internal *condip = (PMEMcond_internal *)condp; os_cond_t *cond = get_cond(pop, condip); if (cond == NULL) return EINVAL; ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0); return os_cond_broadcast(cond); } /* * pmemobj_cond_signal -- signal a pmem resident condition variable * * Atomically initializes and signal a PMEMcond, otherwise behaves as its * POSIX counterpart. */ int pmemobj_cond_signal(PMEMobjpool *pop, PMEMcond *condp) { LOG(3, "pop %p cond %p", pop, condp); ASSERTeq(pop, pmemobj_pool_by_ptr(condp)); PMEMcond_internal *condip = (PMEMcond_internal *)condp; os_cond_t *cond = get_cond(pop, condip); if (cond == NULL) return EINVAL; ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0); return os_cond_signal(cond); } /* * pmemobj_cond_timedwait -- timedwait on a pmem resident condition variable * * Atomically initializes and timedwait on a PMEMcond, otherwise behaves as its * POSIX counterpart. */ int pmemobj_cond_timedwait(PMEMobjpool *pop, PMEMcond *__restrict condp, PMEMmutex *__restrict mutexp, const struct timespec *__restrict abs_timeout) { LOG(3, "pop %p cond %p mutex %p abstime sec %ld nsec %ld", pop, condp, mutexp, abs_timeout->tv_sec, abs_timeout->tv_nsec); ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp)); ASSERTeq(pop, pmemobj_pool_by_ptr(condp)); PMEMcond_internal *condip = (PMEMcond_internal *)condp; PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp; os_cond_t *cond = get_cond(pop, condip); os_mutex_t *mutex = get_mutex(pop, mutexip); if ((cond == NULL) || (mutex == NULL)) return EINVAL; ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0); ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0); return os_cond_timedwait(cond, mutex, abs_timeout); } /* * pmemobj_cond_wait -- wait on a pmem resident condition variable * * Atomically initializes and wait on a PMEMcond, otherwise behaves as its * POSIX counterpart. */ int pmemobj_cond_wait(PMEMobjpool *pop, PMEMcond *condp, PMEMmutex *__restrict mutexp) { LOG(3, "pop %p cond %p mutex %p", pop, condp, mutexp); ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp)); ASSERTeq(pop, pmemobj_pool_by_ptr(condp)); PMEMcond_internal *condip = (PMEMcond_internal *)condp; PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp; os_cond_t *cond = get_cond(pop, condip); os_mutex_t *mutex = get_mutex(pop, mutexip); if ((cond == NULL) || (mutex == NULL)) return EINVAL; ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0); ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0); return os_cond_wait(cond, mutex); } /* * pmemobj_volatile -- atomically initialize, record and return a * generic value */ void * pmemobj_volatile(PMEMobjpool *pop, struct pmemvlt *vlt, void *ptr, size_t size, int (*constr)(void *ptr, void *arg), void *arg) { LOG(3, "pop %p vlt %p ptr %p constr %p arg %p", pop, vlt, ptr, constr, arg); if (likely(vlt->runid == pop->run_id)) return ptr; VALGRIND_REMOVE_PMEM_MAPPING(ptr, size); VALGRIND_ADD_TO_TX(vlt, sizeof(*vlt)); if (_get_value(pop->run_id, &vlt->runid, ptr, arg, constr) < 0) { VALGRIND_REMOVE_FROM_TX(vlt, sizeof(*vlt)); return NULL; } VALGRIND_REMOVE_FROM_TX(vlt, sizeof(*vlt)); VALGRIND_SET_CLEAN(vlt, sizeof(*vlt)); return ptr; }
16,501
24.664075
80
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/lane.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * lane.h -- internal definitions for lanes */ #ifndef LIBPMEMOBJ_LANE_H #define LIBPMEMOBJ_LANE_H 1 #include <stdint.h> #include "ulog.h" #include "libpmemobj.h" #ifdef __cplusplus extern "C" { #endif /* * Distance between lanes used by threads required to prevent threads from * false sharing part of lanes array. Used if properly spread lanes are * available. Otherwise less spread out lanes would be used. */ #define LANE_JUMP (64 / sizeof(uint64_t)) /* * Number of times the algorithm will try to reacquire the primary lane for the * thread. If this threshold is exceeded, a new primary lane is selected for the * thread. */ #define LANE_PRIMARY_ATTEMPTS 128 #define RLANE_DEFAULT 0 #define LANE_TOTAL_SIZE 3072 /* 3 * 1024 (sum of 3 old lane sections) */ /* * We have 3 kilobytes to distribute. * The smallest capacity is needed for the internal redo log for which we can * accurately calculate the maximum number of occupied space: 48 bytes, * 3 times sizeof(struct ulog_entry_val). One for bitmap OR, second for bitmap * AND, third for modification of the destination pointer. For future needs, * this has been bumped up to 12 ulog entries. * * The remaining part has to be split between transactional redo and undo logs, * and since by far the most space consuming operations are transactional * snapshots, most of the space, 2 kilobytes, is assigned to the undo log. * After that, the remainder, 640 bytes, or 40 ulog entries, is left for the * transactional redo logs. * Thanks to this distribution, all small and medium transactions should be * entirely performed without allocating any additional metadata. * * These values must be cacheline size aligned to be used for ulogs. Therefore * they are parametrized for the size of the struct ulog changes between * platforms. */ #define LANE_UNDO_SIZE (LANE_TOTAL_SIZE \ - LANE_REDO_EXTERNAL_SIZE \ - LANE_REDO_INTERNAL_SIZE \ - 3 * sizeof(struct ulog)) /* 2048 for 64B ulog */ #define LANE_REDO_EXTERNAL_SIZE ALIGN_UP(704 - sizeof(struct ulog), \ CACHELINE_SIZE) /* 640 for 64B ulog */ #define LANE_REDO_INTERNAL_SIZE ALIGN_UP(256 - sizeof(struct ulog), \ CACHELINE_SIZE) /* 192 for 64B ulog */ struct lane_layout { /* * Redo log for self-contained and 'one-shot' allocator operations. * Cannot be extended. */ struct ULOG(LANE_REDO_INTERNAL_SIZE) internal; /* * Redo log for large operations/transactions. * Can be extended by the use of internal ulog. */ struct ULOG(LANE_REDO_EXTERNAL_SIZE) external; /* * Undo log for snapshots done in a transaction. * Can be extended/shrunk by the use of internal ulog. */ struct ULOG(LANE_UNDO_SIZE) undo; }; struct lane { struct lane_layout *layout; /* pointer to persistent layout */ struct operation_context *internal; /* context for internal ulog */ struct operation_context *external; /* context for external ulog */ struct operation_context *undo; /* context for undo ulog */ }; struct lane_descriptor { /* * Number of lanes available at runtime must be <= total number of lanes * available in the pool. Number of lanes can be limited by shortage of * other resources e.g. available RNIC's submission queue sizes. */ unsigned runtime_nlanes; unsigned next_lane_idx; uint64_t *lane_locks; struct lane *lane; }; typedef int (*section_layout_op)(PMEMobjpool *pop, void *data, unsigned length); typedef void *(*section_constr)(PMEMobjpool *pop, void *data); typedef void (*section_destr)(PMEMobjpool *pop, void *rt); typedef int (*section_global_op)(PMEMobjpool *pop); struct section_operations { section_constr construct_rt; section_destr destroy_rt; section_layout_op check; section_layout_op recover; section_global_op boot; section_global_op cleanup; }; struct lane_info { uint64_t pop_uuid_lo; uint64_t lane_idx; unsigned long nest_count; /* * The index of the primary lane for the thread. A thread will always * try to acquire the primary lane first, and only if that fails it will * look for a different available lane. */ uint64_t primary; int primary_attempts; struct lane_info *prev, *next; }; void lane_info_boot(void); void lane_info_destroy(void); void lane_init_data(PMEMobjpool *pop); int lane_boot(PMEMobjpool *pop); void lane_cleanup(PMEMobjpool *pop); int lane_recover_and_section_boot(PMEMobjpool *pop); int lane_section_cleanup(PMEMobjpool *pop); int lane_check(PMEMobjpool *pop); unsigned lane_hold(PMEMobjpool *pop, struct lane **lane); void lane_release(PMEMobjpool *pop); #ifdef __cplusplus } #endif #endif
4,652
30.02
80
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/ulog.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * ulog.h -- unified log public interface */ #ifndef LIBPMEMOBJ_ULOG_H #define LIBPMEMOBJ_ULOG_H 1 #include <stddef.h> #include <stdint.h> #include <time.h> #include "vec.h" #include "pmemops.h" #include<x86intrin.h> ////cmd write optimization /* struct ulog_cmd_packet{ uint32_t ulog_offset : 32; uint32_t base_offset : 32; uint32_t src : 32; uint32_t size : 32; }; */ struct ulog_entry_base { uint64_t offset; /* offset with operation type flag */ }; /* * ulog_entry_val -- log entry */ struct ulog_entry_val { struct ulog_entry_base base; uint64_t value; /* value to be applied */ }; /* * ulog_entry_buf - ulog buffer entry */ struct ulog_entry_buf { struct ulog_entry_base base; /* offset with operation type flag */ uint64_t checksum; /* checksum of the entire log entry */ uint64_t size; /* size of the buffer to be modified */ uint8_t data[]; /* content to fill in */ }; #define ULOG_UNUSED ((CACHELINE_SIZE - 40) / 8) /* * This structure *must* be located at a cacheline boundary. To achieve this, * the next field is always allocated with extra padding, and then the offset * is additionally aligned. */ #define ULOG(capacity_bytes) {\ /* 64 bytes of metadata */\ uint64_t checksum; /* checksum of ulog header and its entries */\ uint64_t next; /* offset of ulog extension */\ uint64_t capacity; /* capacity of this ulog in bytes */\ uint64_t gen_num; /* generation counter */\ uint64_t flags; /* ulog flags */\ uint64_t unused[ULOG_UNUSED]; /* must be 0 */\ uint8_t data[capacity_bytes]; /* N bytes of data */\ }\ #define SIZEOF_ULOG(base_capacity)\ (sizeof(struct ulog) + base_capacity) /* * Ulog buffer allocated by the user must be marked by this flag. * It is important to not free it at the end: * what user has allocated - user should free himself. */ #define ULOG_USER_OWNED (1U << 0) /* use this for allocations of aligned ulog extensions */ #define SIZEOF_ALIGNED_ULOG(base_capacity)\ ALIGN_UP(SIZEOF_ULOG(base_capacity + (2 * CACHELINE_SIZE)), CACHELINE_SIZE) struct ulog ULOG(0); VEC(ulog_next, uint64_t); typedef uint64_t ulog_operation_type; #define ULOG_OPERATION_SET (0b000ULL << 61ULL) #define ULOG_OPERATION_AND (0b001ULL << 61ULL) #define ULOG_OPERATION_OR (0b010ULL << 61ULL) #define ULOG_OPERATION_BUF_SET (0b101ULL << 61ULL) #define ULOG_OPERATION_BUF_CPY (0b110ULL << 61ULL) #define ULOG_BIT_OPERATIONS (ULOG_OPERATION_AND | ULOG_OPERATION_OR) /* immediately frees all associated ulog structures */ #define ULOG_FREE_AFTER_FIRST (1U << 0) /* increments gen_num of the first, preallocated, ulog */ #define ULOG_INC_FIRST_GEN_NUM (1U << 1) /* informs if there was any buffer allocated by user in the tx */ #define ULOG_ANY_USER_BUFFER (1U << 2) typedef int (*ulog_check_offset_fn)(void *ctx, uint64_t offset); typedef int (*ulog_extend_fn)(void *, uint64_t *, uint64_t); typedef int (*ulog_entry_cb)(struct ulog_entry_base *e, void *arg, const struct pmem_ops *p_ops); typedef int (*ulog_entry_cb_ndp)(struct ulog_entry_base *e, struct ulog_entry_base *f, void *arg, const struct pmem_ops *p_ops); typedef void (*ulog_free_fn)(void *base, uint64_t *next); typedef int (*ulog_rm_user_buffer_fn)(void *, void *addr); struct ulog *ulog_next(struct ulog *ulog, const struct pmem_ops *p_ops); void ulog_construct(uint64_t offset, size_t capacity, uint64_t gen_num, int flush, uint64_t flags, const struct pmem_ops *p_ops); size_t ulog_capacity(struct ulog *ulog, size_t ulog_base_bytes, const struct pmem_ops *p_ops); void ulog_rebuild_next_vec(struct ulog *ulog, struct ulog_next *next, const struct pmem_ops *p_ops); int ulog_foreach_entry(struct ulog *ulog, ulog_entry_cb cb, void *arg, const struct pmem_ops *ops, struct ulog *ulognvm); int ulog_foreach_entry_ndp(struct ulog *ulogdram, struct ulog *ulognvm, ulog_entry_cb_ndp cb, void *arg, const struct pmem_ops *ops); int ulog_reserve(struct ulog *ulog, size_t ulog_base_nbytes, size_t gen_num, int auto_reserve, size_t *new_capacity_bytes, ulog_extend_fn extend, struct ulog_next *next, const struct pmem_ops *p_ops); void ulog_store(struct ulog *dest, struct ulog *src, size_t nbytes, size_t ulog_base_nbytes, size_t ulog_total_capacity, struct ulog_next *next, const struct pmem_ops *p_ops); int ulog_free_next(struct ulog *u, const struct pmem_ops *p_ops, ulog_free_fn ulog_free, ulog_rm_user_buffer_fn user_buff_remove, uint64_t flags); void ulog_clobber(struct ulog *dest, struct ulog_next *next, const struct pmem_ops *p_ops); int ulog_clobber_data(struct ulog *dest, size_t nbytes, size_t ulog_base_nbytes, struct ulog_next *next, ulog_free_fn ulog_free, ulog_rm_user_buffer_fn user_buff_remove, const struct pmem_ops *p_ops, unsigned flags); void ulog_clobber_entry(const struct ulog_entry_base *e, const struct pmem_ops *p_ops); void ulog_process(struct ulog *ulog, ulog_check_offset_fn check, const struct pmem_ops *p_ops); void ulog_process_ndp(struct ulog *ulognvm, struct ulog *ulogdeam, ulog_check_offset_fn check, const struct pmem_ops *p_ops); size_t ulog_base_nbytes(struct ulog *ulog); int ulog_recovery_needed(struct ulog *ulog, int verify_checksum); struct ulog *ulog_by_offset(size_t offset, const struct pmem_ops *p_ops); uint64_t ulog_entry_offset(const struct ulog_entry_base *entry); ulog_operation_type ulog_entry_type( const struct ulog_entry_base *entry); struct ulog_entry_val *ulog_entry_val_create(struct ulog *ulog, size_t offset, uint64_t *dest, uint64_t value, ulog_operation_type type, const struct pmem_ops *p_ops); #ifdef USE_NDP_CLOBBER struct ulog_entry_buf * ulog_entry_buf_create(struct ulog *ulog, size_t offset, uint64_t gen_num, uint64_t *dest, const void *src, uint64_t size, ulog_operation_type type, const struct pmem_ops *p_ops, int clear_next_header); #else struct ulog_entry_buf * ulog_entry_buf_create(struct ulog *ulog, size_t offset, uint64_t gen_num, uint64_t *dest, const void *src, uint64_t size, ulog_operation_type type, const struct pmem_ops *p_ops); #endif void ulog_entry_apply(const struct ulog_entry_base *e, int persist, const struct pmem_ops *p_ops); void ulog_entry_apply_ndp(const struct ulog_entry_base *e, const struct ulog_entry_base *f, int persist, const struct pmem_ops *p_ops); size_t ulog_entry_size(const struct ulog_entry_base *entry); void ulog_recover(struct ulog *ulog, ulog_check_offset_fn check, const struct pmem_ops *p_ops); int ulog_check(struct ulog *ulog, ulog_check_offset_fn check, const struct pmem_ops *p_ops); #endif
6,600
32.170854
104
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/libpmemobj/lane.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * lane.c -- lane implementation */ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include <inttypes.h> #include <errno.h> #include <limits.h> #include <sched.h> #include "libpmemobj.h" #include "critnib.h" #include "lane.h" #include "out.h" #include "util.h" #include "obj.h" #include "os_thread.h" #include "valgrind_internal.h" #include "memops.h" #include "palloc.h" #include "tx.h" static os_tls_key_t Lane_info_key; static __thread struct critnib *Lane_info_ht; static __thread struct lane_info *Lane_info_records; static __thread struct lane_info *Lane_info_cache; /* * lane_info_create -- (internal) constructor for thread shared data */ static inline void lane_info_create(void) { Lane_info_ht = critnib_new(); if (Lane_info_ht == NULL) FATAL("critnib_new"); } /* * lane_info_delete -- (internal) deletes lane info hash table */ static inline void lane_info_delete(void) { if (unlikely(Lane_info_ht == NULL)) return; critnib_delete(Lane_info_ht); struct lane_info *record; struct lane_info *head = Lane_info_records; while (head != NULL) { record = head; head = head->next; Free(record); } Lane_info_ht = NULL; Lane_info_records = NULL; Lane_info_cache = NULL; } /* * lane_info_ht_boot -- (internal) boot lane info and add it to thread shared * data */ static inline void lane_info_ht_boot(void) { lane_info_create(); int result = os_tls_set(Lane_info_key, Lane_info_ht); if (result != 0) { errno = result; FATAL("!os_tls_set"); } } /* * lane_info_ht_destroy -- (internal) destructor for thread shared data */ static inline void lane_info_ht_destroy(void *ht) { lane_info_delete(); } /* * lane_info_boot -- initialize lane info hash table and lane info key */ void lane_info_boot(void) { int result = os_tls_key_create(&Lane_info_key, lane_info_ht_destroy); if (result != 0) { errno = result; FATAL("!os_tls_key_create"); } } /* * lane_info_destroy -- destroy lane info hash table */ void lane_info_destroy(void) { lane_info_delete(); (void) os_tls_key_delete(Lane_info_key); } /* * lane_info_cleanup -- remove lane info record regarding pool being deleted */ static inline void lane_info_cleanup(PMEMobjpool *pop) { if (unlikely(Lane_info_ht == NULL)) return; struct lane_info *info = critnib_remove(Lane_info_ht, pop->uuid_lo); if (likely(info != NULL)) { if (info->prev) info->prev->next = info->next; if (info->next) info->next->prev = info->prev; if (Lane_info_cache == info) Lane_info_cache = NULL; if (Lane_info_records == info) Lane_info_records = info->next; Free(info); } } /* * lane_get_layout -- (internal) calculates the real pointer of the lane layout */ static struct lane_layout * lane_get_layout(PMEMobjpool *pop, uint64_t lane_idx) { return (void *)((char *)pop + pop->lanes_offset + sizeof(struct lane_layout) * lane_idx); } /* * lane_ulog_constructor -- (internal) constructor of a ulog extension */ static int lane_ulog_constructor(void *base, void *ptr, size_t usable_size, void *arg) { PMEMobjpool *pop = base; const struct pmem_ops *p_ops = &pop->p_ops; size_t capacity = ALIGN_DOWN(usable_size - sizeof(struct ulog), CACHELINE_SIZE); uint64_t gen_num = *(uint64_t *)arg; ulog_construct(OBJ_PTR_TO_OFF(base, ptr), capacity, gen_num, 1, 0, p_ops); return 0; } /* * lane_undo_extend -- allocates a new undo log */ static int lane_undo_extend(void *base, uint64_t *redo, uint64_t gen_num) { PMEMobjpool *pop = base; struct tx_parameters *params = pop->tx_params; size_t s = SIZEOF_ALIGNED_ULOG(params->cache_size); return pmalloc_construct(base, redo, s, lane_ulog_constructor, &gen_num, 0, OBJ_INTERNAL_OBJECT_MASK, 0); } /* * lane_redo_extend -- allocates a new redo log */ static int lane_redo_extend(void *base, uint64_t *redo, uint64_t gen_num) { size_t s = SIZEOF_ALIGNED_ULOG(LANE_REDO_EXTERNAL_SIZE); return pmalloc_construct(base, redo, s, lane_ulog_constructor, &gen_num, 0, OBJ_INTERNAL_OBJECT_MASK, 0); } /* * lane_init -- (internal) initializes a single lane runtime variables */ static int lane_init(PMEMobjpool *pop, struct lane *lane, struct lane_layout *layout) { ASSERTne(lane, NULL); lane->layout = layout; lane->internal = operation_new((struct ulog *)&layout->internal, LANE_REDO_INTERNAL_SIZE, NULL, NULL, &pop->p_ops, LOG_TYPE_REDO); if (lane->internal == NULL) goto error_internal_new; lane->external = operation_new((struct ulog *)&layout->external, LANE_REDO_EXTERNAL_SIZE, lane_redo_extend, (ulog_free_fn)pfree, &pop->p_ops, LOG_TYPE_REDO); if (lane->external == NULL) goto error_external_new; lane->undo = operation_new((struct ulog *)&layout->undo, LANE_UNDO_SIZE, lane_undo_extend, (ulog_free_fn)pfree, &pop->p_ops, LOG_TYPE_UNDO); if (lane->undo == NULL) goto error_undo_new; return 0; error_undo_new: operation_delete(lane->external); error_external_new: operation_delete(lane->internal); error_internal_new: return -1; } /* * lane_destroy -- cleanups a single lane runtime variables */ static void lane_destroy(PMEMobjpool *pop, struct lane *lane) { operation_delete(lane->undo); operation_delete(lane->internal); operation_delete(lane->external); } /* * lane_boot -- initializes all lanes */ int lane_boot(PMEMobjpool *pop) { int err = 0; pop->lanes_desc.lane = Malloc(sizeof(struct lane) * pop->nlanes); if (pop->lanes_desc.lane == NULL) { err = ENOMEM; ERR("!Malloc of volatile lanes"); goto error_lanes_malloc; } pop->lanes_desc.next_lane_idx = 0; pop->lanes_desc.lane_locks = Zalloc(sizeof(*pop->lanes_desc.lane_locks) * pop->nlanes); if (pop->lanes_desc.lane_locks == NULL) { ERR("!Malloc for lane locks"); goto error_locks_malloc; } /* add lanes to pmemcheck ignored list */ VALGRIND_ADD_TO_GLOBAL_TX_IGNORE((char *)pop + pop->lanes_offset, (sizeof(struct lane_layout) * pop->nlanes)); uint64_t i; for (i = 0; i < pop->nlanes; ++i) { struct lane_layout *layout = lane_get_layout(pop, i); if ((err = lane_init(pop, &pop->lanes_desc.lane[i], layout))) { ERR("!lane_init"); goto error_lane_init; } } return 0; error_lane_init: for (; i >= 1; --i) lane_destroy(pop, &pop->lanes_desc.lane[i - 1]); Free(pop->lanes_desc.lane_locks); pop->lanes_desc.lane_locks = NULL; error_locks_malloc: Free(pop->lanes_desc.lane); pop->lanes_desc.lane = NULL; error_lanes_malloc: return err; } /* * lane_init_data -- initializes ulogs for all the lanes */ void lane_init_data(PMEMobjpool *pop) { struct lane_layout *layout; for (uint64_t i = 0; i < pop->nlanes; ++i) { layout = lane_get_layout(pop, i); ulog_construct(OBJ_PTR_TO_OFF(pop, &layout->internal), LANE_REDO_INTERNAL_SIZE, 0, 0, 0, &pop->p_ops); ulog_construct(OBJ_PTR_TO_OFF(pop, &layout->external), LANE_REDO_EXTERNAL_SIZE, 0, 0, 0, &pop->p_ops); ulog_construct(OBJ_PTR_TO_OFF(pop, &layout->undo), LANE_UNDO_SIZE, 0, 0, 0, &pop->p_ops); } layout = lane_get_layout(pop, 0); pmemops_xpersist(&pop->p_ops, layout, pop->nlanes * sizeof(struct lane_layout), PMEMOBJ_F_RELAXED); } /* * lane_cleanup -- destroys all lanes */ void lane_cleanup(PMEMobjpool *pop) { for (uint64_t i = 0; i < pop->nlanes; ++i) lane_destroy(pop, &pop->lanes_desc.lane[i]); Free(pop->lanes_desc.lane); pop->lanes_desc.lane = NULL; Free(pop->lanes_desc.lane_locks); pop->lanes_desc.lane_locks = NULL; lane_info_cleanup(pop); } /* * lane_recover_and_section_boot -- performs initialization and recovery of all * lanes */ int lane_recover_and_section_boot(PMEMobjpool *pop) { COMPILE_ERROR_ON(SIZEOF_ULOG(LANE_UNDO_SIZE) + SIZEOF_ULOG(LANE_REDO_EXTERNAL_SIZE) + SIZEOF_ULOG(LANE_REDO_INTERNAL_SIZE) != LANE_TOTAL_SIZE); int err = 0; uint64_t i; /* lane index */ struct lane_layout *layout; /* * First we need to recover the internal/external redo logs so that the * allocator state is consistent before we boot it. */ for (i = 0; i < pop->nlanes; ++i) { layout = lane_get_layout(pop, i); ulog_recover((struct ulog *)&layout->internal, OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops); ulog_recover((struct ulog *)&layout->external, OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops); } if ((err = pmalloc_boot(pop)) != 0) return err; /* * Undo logs must be processed after the heap is initialized since * a undo recovery might require deallocation of the next ulogs. */ for (i = 0; i < pop->nlanes; ++i) { struct operation_context *ctx = pop->lanes_desc.lane[i].undo; operation_resume(ctx); operation_process(ctx); operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM | ULOG_FREE_AFTER_FIRST); } return 0; } /* * lane_section_cleanup -- performs runtime cleanup of all lanes */ int lane_section_cleanup(PMEMobjpool *pop) { return pmalloc_cleanup(pop); } /* * lane_check -- performs check of all lanes */ int lane_check(PMEMobjpool *pop) { int err = 0; uint64_t j; /* lane index */ struct lane_layout *layout; for (j = 0; j < pop->nlanes; ++j) { layout = lane_get_layout(pop, j); if (ulog_check((struct ulog *)&layout->internal, OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops) != 0) { LOG(2, "lane %" PRIu64 " internal redo failed: %d", j, err); return err; } } return 0; } /* * get_lane -- (internal) get free lane index */ static inline void get_lane(uint64_t *locks, struct lane_info *info, uint64_t nlocks) { info->lane_idx = info->primary; while (1) { do { info->lane_idx %= nlocks; if (likely(util_bool_compare_and_swap64( &locks[info->lane_idx], 0, 1))) { if (info->lane_idx == info->primary) { info->primary_attempts = LANE_PRIMARY_ATTEMPTS; } else if (info->primary_attempts == 0) { info->primary = info->lane_idx; info->primary_attempts = LANE_PRIMARY_ATTEMPTS; } return; } if (info->lane_idx == info->primary && info->primary_attempts > 0) { info->primary_attempts--; } ++info->lane_idx; } while (info->lane_idx < nlocks); sched_yield(); } } /* * get_lane_info_record -- (internal) get lane record attached to memory pool * or first free */ static inline struct lane_info * get_lane_info_record(PMEMobjpool *pop) { if (likely(Lane_info_cache != NULL && Lane_info_cache->pop_uuid_lo == pop->uuid_lo)) { return Lane_info_cache; } if (unlikely(Lane_info_ht == NULL)) { lane_info_ht_boot(); } struct lane_info *info = critnib_get(Lane_info_ht, pop->uuid_lo); if (unlikely(info == NULL)) { info = Malloc(sizeof(struct lane_info)); if (unlikely(info == NULL)) { FATAL("Malloc"); } info->pop_uuid_lo = pop->uuid_lo; info->lane_idx = UINT64_MAX; info->nest_count = 0; info->next = Lane_info_records; info->prev = NULL; info->primary = 0; info->primary_attempts = LANE_PRIMARY_ATTEMPTS; if (Lane_info_records) { Lane_info_records->prev = info; } Lane_info_records = info; if (unlikely(critnib_insert( Lane_info_ht, pop->uuid_lo, info) != 0)) { FATAL("critnib_insert"); } } Lane_info_cache = info; return info; } /* * lane_hold -- grabs a per-thread lane in a round-robin fashion */ unsigned lane_hold(PMEMobjpool *pop, struct lane **lanep) { /* * Before runtime lane initialization all remote operations are * executed using RLANE_DEFAULT. */ if (unlikely(!pop->lanes_desc.runtime_nlanes)) { ASSERT(pop->has_remote_replicas); if (lanep != NULL) FATAL("cannot obtain section before lane's init"); return RLANE_DEFAULT; } struct lane_info *lane = get_lane_info_record(pop); while (unlikely(lane->lane_idx == UINT64_MAX)) { /* initial wrap to next CL */ lane->primary = lane->lane_idx = util_fetch_and_add32( &pop->lanes_desc.next_lane_idx, LANE_JUMP); } /* handles wraparound */ uint64_t *llocks = pop->lanes_desc.lane_locks; /* grab next free lane from lanes available at runtime */ if (!lane->nest_count++) { get_lane(llocks, lane, pop->lanes_desc.runtime_nlanes); } struct lane *l = &pop->lanes_desc.lane[lane->lane_idx]; /* reinitialize lane's content only if in outermost hold */ if (lanep && lane->nest_count == 1) { VALGRIND_ANNOTATE_NEW_MEMORY(l, sizeof(*l)); VALGRIND_ANNOTATE_NEW_MEMORY(l->layout, sizeof(*l->layout)); operation_init(l->external); operation_init(l->internal); operation_init(l->undo); } if (lanep) *lanep = l; return (unsigned)lane->lane_idx; } /* * lane_release -- drops the per-thread lane */ void lane_release(PMEMobjpool *pop) { if (unlikely(!pop->lanes_desc.runtime_nlanes)) { ASSERT(pop->has_remote_replicas); return; } struct lane_info *lane = get_lane_info_record(pop); ASSERTne(lane, NULL); ASSERTne(lane->lane_idx, UINT64_MAX); if (unlikely(lane->nest_count == 0)) { FATAL("lane_release"); } else if (--(lane->nest_count) == 0) { if (unlikely(!util_bool_compare_and_swap64( &pop->lanes_desc.lane_locks[lane->lane_idx], 1, 0))) { FATAL("util_bool_compare_and_swap64"); } } }
12,994
21.678883
79
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/rpmem_obc/rpmem_obc_test_common.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * rpmem_obc_test_common.h -- common declarations for rpmem_obc test */ #include "unittest.h" #include "out.h" #include "librpmem.h" #include "rpmem.h" #include "rpmem_proto.h" #include "rpmem_common.h" #include "rpmem_util.h" #include "rpmem_obc.h" #define POOL_SIZE 1024 #define NLANES 32 #define NLANES_RESP 16 #define PROVIDER RPMEM_PROV_LIBFABRIC_SOCKETS #define POOL_DESC "pool_desc" #define RKEY 0xabababababababab #define RADDR 0x0101010101010101 #define PORT 1234 #define BUFF_SIZE 8192 #define POOL_ATTR_INIT {\ .signature = "<RPMEM>",\ .major = 1,\ .compat_features = 2,\ .incompat_features = 3,\ .ro_compat_features = 4,\ .poolset_uuid = "POOLSET_UUID0123",\ .uuid = "UUID0123456789AB",\ .next_uuid = "NEXT_UUID0123456",\ .prev_uuid = "PREV_UUID0123456",\ .user_flags = "USER_FLAGS012345",\ } #define POOL_ATTR_ALT {\ .signature = "<ALT>",\ .major = 5,\ .compat_features = 6,\ .incompat_features = 7,\ .ro_compat_features = 8,\ .poolset_uuid = "UUID_POOLSET_ALT",\ .uuid = "ALT_UUIDCDEFFEDC",\ .next_uuid = "456UUID_NEXT_ALT",\ .prev_uuid = "UUID012_ALT_PREV",\ .user_flags = "012345USER_FLAGS",\ } static const struct rpmem_pool_attr POOL_ATTR = POOL_ATTR_INIT; struct server { int fd_in; int fd_out; }; void set_rpmem_cmd(const char *fmt, ...); struct server *srv_init(void); void srv_fini(struct server *s); void srv_recv(struct server *s, void *buff, size_t len); void srv_send(struct server *s, const void *buff, size_t len); void srv_wait_disconnect(struct server *s); void client_connect_wait(struct rpmem_obc *rpc, char *target); /* * Since the server may disconnect the connection at any moment * from the client's perspective, execute the test in a loop so * the moment when the connection is closed will be possibly different. */ #define ECONNRESET_LOOP 10 void server_econnreset(struct server *s, const void *msg, size_t len); TEST_CASE_DECLARE(client_enotconn); TEST_CASE_DECLARE(client_connect); TEST_CASE_DECLARE(client_monitor); TEST_CASE_DECLARE(server_monitor); TEST_CASE_DECLARE(server_wait); TEST_CASE_DECLARE(client_create); TEST_CASE_DECLARE(server_create); TEST_CASE_DECLARE(server_create_econnreset); TEST_CASE_DECLARE(server_create_eproto); TEST_CASE_DECLARE(server_create_error); TEST_CASE_DECLARE(client_open); TEST_CASE_DECLARE(server_open); TEST_CASE_DECLARE(server_open_econnreset); TEST_CASE_DECLARE(server_open_eproto); TEST_CASE_DECLARE(server_open_error); TEST_CASE_DECLARE(client_close); TEST_CASE_DECLARE(server_close); TEST_CASE_DECLARE(server_close_econnreset); TEST_CASE_DECLARE(server_close_eproto); TEST_CASE_DECLARE(server_close_error); TEST_CASE_DECLARE(client_set_attr); TEST_CASE_DECLARE(server_set_attr); TEST_CASE_DECLARE(server_set_attr_econnreset); TEST_CASE_DECLARE(server_set_attr_eproto); TEST_CASE_DECLARE(server_set_attr_error);
2,951
26.082569
71
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/rpmem_obc/rpmem_obc_test_create.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * rpmem_obc_test_create.c -- test cases for rpmem_obc_create function */ #include "rpmem_obc_test_common.h" static const struct rpmem_msg_create_resp CREATE_RESP = { .hdr = { .type = RPMEM_MSG_TYPE_CREATE_RESP, .size = sizeof(struct rpmem_msg_create_resp), .status = 0, }, .ibc = { .port = PORT, .rkey = RKEY, .raddr = RADDR, .persist_method = RPMEM_PM_GPSPM, .nlanes = NLANES_RESP, }, }; /* * check_create_msg -- check create message */ static void check_create_msg(struct rpmem_msg_create *msg) { size_t pool_desc_size = strlen(POOL_DESC) + 1; size_t msg_size = sizeof(struct rpmem_msg_create) + pool_desc_size; struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT; UT_ASSERTeq(msg->hdr.type, RPMEM_MSG_TYPE_CREATE); UT_ASSERTeq(msg->hdr.size, msg_size); UT_ASSERTeq(msg->c.major, RPMEM_PROTO_MAJOR); UT_ASSERTeq(msg->c.minor, RPMEM_PROTO_MINOR); UT_ASSERTeq(msg->c.pool_size, POOL_SIZE); UT_ASSERTeq(msg->c.provider, PROVIDER); UT_ASSERTeq(msg->c.nlanes, NLANES); UT_ASSERTeq(msg->c.buff_size, BUFF_SIZE); UT_ASSERTeq(msg->pool_desc.size, pool_desc_size); UT_ASSERTeq(strcmp((char *)msg->pool_desc.desc, POOL_DESC), 0); UT_ASSERTeq(memcmp(&msg->pool_attr, &pool_attr, sizeof(pool_attr)), 0); } /* * server_create_handle -- handle a create request message */ static void server_create_handle(struct server *s, const struct rpmem_msg_create_resp *resp) { size_t msg_size = sizeof(struct rpmem_msg_create) + strlen(POOL_DESC) + 1; struct rpmem_msg_create *msg = MALLOC(msg_size); srv_recv(s, msg, msg_size); rpmem_ntoh_msg_create(msg); check_create_msg(msg); srv_send(s, resp, sizeof(*resp)); FREE(msg); } /* * Number of cases for EPROTO test. Must be kept in sync with the * server_create_eproto function. */ #define CREATE_EPROTO_COUNT 8 /* * server_create_eproto -- send invalid create request responses to a client */ int server_create_eproto(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s 0-%d", tc->name, CREATE_EPROTO_COUNT - 1); int i = atoi(argv[0]); struct server *s = srv_init(); struct rpmem_msg_create_resp resp = CREATE_RESP; switch (i) { case 0: resp.hdr.type = MAX_RPMEM_MSG_TYPE; break; case 1: resp.hdr.type = RPMEM_MSG_TYPE_OPEN_RESP; break; case 2: resp.hdr.size -= 1; break; case 3: resp.hdr.size += 1; break; case 4: resp.hdr.status = MAX_RPMEM_ERR; break; case 5: resp.ibc.port = 0; break; case 6: resp.ibc.port = UINT16_MAX + 1; break; case 7: resp.ibc.persist_method = MAX_RPMEM_PM; break; default: UT_ASSERT(0); break; } rpmem_hton_msg_create_resp(&resp); server_create_handle(s, &resp); srv_fini(s); return 1; } /* * server_create_error -- return an error status in create response message */ int server_create_error(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s 0-%d", tc->name, MAX_RPMEM_ERR); enum rpmem_err e = (enum rpmem_err)atoi(argv[0]); struct server *s = srv_init(); struct rpmem_msg_create_resp resp = CREATE_RESP; resp.hdr.status = e; rpmem_hton_msg_create_resp(&resp); server_create_handle(s, &resp); srv_fini(s); return 1; } /* * server_create_econnreset -- test case for closing connection - server side */ int server_create_econnreset(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s 0|1", tc->name); int do_send = atoi(argv[0]); struct server *s = srv_init(); struct rpmem_msg_create_resp resp = CREATE_RESP; rpmem_hton_msg_create_resp(&resp); if (do_send) srv_send(s, &resp, sizeof(resp) / 2); srv_fini(s); return 1; } /* * server_create -- test case for rpmem_obc_create function - server side */ int server_create(const struct test_case *tc, int argc, char *argv[]) { if (argc < 0) UT_FATAL("usage: %s", tc->name); struct server *s = srv_init(); struct rpmem_msg_create_resp resp = CREATE_RESP; rpmem_hton_msg_create_resp(&resp); server_create_handle(s, &resp); srv_fini(s); return 0; } /* * client_create_errno -- perform create request operation and expect * specified errno. If ex_errno is zero expect certain values in res struct. */ static void client_create_errno(char *target, int ex_errno) { struct rpmem_req_attr req = { .pool_size = POOL_SIZE, .nlanes = NLANES, .provider = PROVIDER, .pool_desc = POOL_DESC, .buff_size = BUFF_SIZE, }; struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT; struct rpmem_resp_attr res; int ret; struct rpmem_obc *rpc = rpmem_obc_init(); UT_ASSERTne(rpc, NULL); client_connect_wait(rpc, target); ret = rpmem_obc_create(rpc, &req, &res, &pool_attr); if (ex_errno) { UT_ASSERTne(ret, 0); UT_ASSERTeq(errno, ex_errno); } else { UT_ASSERTeq(ret, 0); UT_ASSERTeq(res.port, CREATE_RESP.ibc.port); UT_ASSERTeq(res.rkey, CREATE_RESP.ibc.rkey); UT_ASSERTeq(res.raddr, CREATE_RESP.ibc.raddr); UT_ASSERTeq(res.persist_method, CREATE_RESP.ibc.persist_method); UT_ASSERTeq(res.nlanes, CREATE_RESP.ibc.nlanes); } rpmem_obc_disconnect(rpc); rpmem_obc_fini(rpc); } /* * client_create_error -- check if valid errno is set if error status returned */ static void client_create_error(char *target) { struct rpmem_req_attr req = { .pool_size = POOL_SIZE, .nlanes = NLANES, .provider = PROVIDER, .pool_desc = POOL_DESC, .buff_size = BUFF_SIZE, }; struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT; struct rpmem_resp_attr res; int ret; for (enum rpmem_err e = 1; e < MAX_RPMEM_ERR; e++) { set_rpmem_cmd("server_create_error %d", e); int ex_errno = rpmem_util_proto_errno(e); struct rpmem_obc *rpc = rpmem_obc_init(); UT_ASSERTne(rpc, NULL); client_connect_wait(rpc, target); ret = rpmem_obc_create(rpc, &req, &res, &pool_attr); UT_ASSERTne(ret, 0); UT_ASSERTeq(errno, ex_errno); rpmem_obc_disconnect(rpc); rpmem_obc_fini(rpc); } } /* * client_create -- test case for create request operation - client side */ int client_create(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <addr>[:<port>]", tc->name); char *target = argv[0]; for (int i = 0; i < ECONNRESET_LOOP; i++) { set_rpmem_cmd("server_create_econnreset %d", i % 2); client_create_errno(target, ECONNRESET); } for (int i = 0; i < CREATE_EPROTO_COUNT; i++) { set_rpmem_cmd("server_create_eproto %d", i); client_create_errno(target, EPROTO); } client_create_error(target); set_rpmem_cmd("server_create"); client_create_errno(target, 0); return 1; }
6,642
20.498382
80
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/rpmem_obc/rpmem_obc_test.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2017, Intel Corporation */ /* * rpmem_obc_test.c -- unit test for rpmem_obc module */ #include "rpmem_obc_test_common.h" #include "pmemcommon.h" /* * test_cases -- available test cases */ static struct test_case test_cases[] = { TEST_CASE(client_enotconn), TEST_CASE(client_connect), TEST_CASE(client_create), TEST_CASE(server_create), TEST_CASE(server_create_econnreset), TEST_CASE(server_create_eproto), TEST_CASE(server_create_error), TEST_CASE(client_open), TEST_CASE(server_open), TEST_CASE(server_open_econnreset), TEST_CASE(server_open_eproto), TEST_CASE(server_open_error), TEST_CASE(client_close), TEST_CASE(server_close), TEST_CASE(server_close_econnreset), TEST_CASE(server_close_eproto), TEST_CASE(server_close_error), TEST_CASE(client_monitor), TEST_CASE(server_monitor), TEST_CASE(client_set_attr), TEST_CASE(server_set_attr), TEST_CASE(server_set_attr_econnreset), TEST_CASE(server_set_attr_eproto), TEST_CASE(server_set_attr_error), }; #define NTESTS (sizeof(test_cases) / sizeof(test_cases[0])) int main(int argc, char *argv[]) { START(argc, argv, "rpmem_obc"); common_init("rpmem_obc", "RPMEM_LOG_LEVEL", "RPMEM_LOG_FILE", 0, 0); rpmem_util_cmds_init(); TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS); rpmem_util_cmds_fini(); common_fini(); DONE(NULL); }
1,388
20.369231
59
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/rpmem_obc/rpmem_obc_test_open.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * rpmem_obc_test_open.c -- test cases for rpmem_obj_open function */ #include "rpmem_obc_test_common.h" static const struct rpmem_msg_open_resp OPEN_RESP = { .hdr = { .type = RPMEM_MSG_TYPE_OPEN_RESP, .size = sizeof(struct rpmem_msg_open_resp), .status = 0, }, .ibc = { .port = PORT, .rkey = RKEY, .raddr = RADDR, .persist_method = RPMEM_PM_GPSPM, .nlanes = NLANES_RESP, }, .pool_attr = POOL_ATTR_INIT, }; /* * check_open_msg -- check open message */ static void check_open_msg(struct rpmem_msg_open *msg) { size_t pool_desc_size = strlen(POOL_DESC) + 1; size_t msg_size = sizeof(struct rpmem_msg_open) + pool_desc_size; UT_ASSERTeq(msg->hdr.type, RPMEM_MSG_TYPE_OPEN); UT_ASSERTeq(msg->hdr.size, msg_size); UT_ASSERTeq(msg->c.major, RPMEM_PROTO_MAJOR); UT_ASSERTeq(msg->c.minor, RPMEM_PROTO_MINOR); UT_ASSERTeq(msg->c.pool_size, POOL_SIZE); UT_ASSERTeq(msg->c.provider, PROVIDER); UT_ASSERTeq(msg->c.nlanes, NLANES); UT_ASSERTeq(msg->c.buff_size, BUFF_SIZE); UT_ASSERTeq(msg->pool_desc.size, pool_desc_size); UT_ASSERTeq(strcmp((char *)msg->pool_desc.desc, POOL_DESC), 0); } /* * server_open_handle -- handle an open request message */ static void server_open_handle(struct server *s, const struct rpmem_msg_open_resp *resp) { size_t msg_size = sizeof(struct rpmem_msg_open) + strlen(POOL_DESC) + 1; struct rpmem_msg_open *msg = MALLOC(msg_size); srv_recv(s, msg, msg_size); rpmem_ntoh_msg_open(msg); check_open_msg(msg); srv_send(s, resp, sizeof(*resp)); FREE(msg); } /* * Number of cases for EPROTO test. Must be kept in sync with the * server_open_eproto function. */ #define OPEN_EPROTO_COUNT 8 /* * server_open_eproto -- send invalid open request responses to a client */ int server_open_eproto(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s 0-%d", tc->name, OPEN_EPROTO_COUNT - 1); int i = atoi(argv[0]); struct server *s = srv_init(); struct rpmem_msg_open_resp resp = OPEN_RESP; switch (i) { case 0: resp.hdr.type = MAX_RPMEM_MSG_TYPE; break; case 1: resp.hdr.type = RPMEM_MSG_TYPE_CREATE_RESP; break; case 2: resp.hdr.size -= 1; break; case 3: resp.hdr.size += 1; break; case 4: resp.hdr.status = MAX_RPMEM_ERR; break; case 5: resp.ibc.port = 0; break; case 6: resp.ibc.port = UINT16_MAX + 1; break; case 7: resp.ibc.persist_method = MAX_RPMEM_PM; break; default: UT_ASSERT(0); break; } rpmem_hton_msg_open_resp(&resp); server_open_handle(s, &resp); srv_fini(s); return 1; } /* * server_open_error -- return error status in open response message */ int server_open_error(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s 0-%d", tc->name, MAX_RPMEM_ERR); enum rpmem_err e = (enum rpmem_err)atoi(argv[0]); struct server *s = srv_init(); struct rpmem_msg_open_resp resp = OPEN_RESP; resp.hdr.status = e; rpmem_hton_msg_open_resp(&resp); server_open_handle(s, &resp); srv_fini(s); return 1; } /* * server_open -- test case for rpmem_obc_create function - server side */ int server_open_econnreset(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s 0|1", tc->name); int do_send = atoi(argv[0]); struct server *s = srv_init(); struct rpmem_msg_open_resp resp = OPEN_RESP; rpmem_hton_msg_open_resp(&resp); if (do_send) srv_send(s, &resp, sizeof(resp) / 2); srv_fini(s); return 1; } /* * server_open -- test case for open request message - server side */ int server_open(const struct test_case *tc, int argc, char *argv[]) { struct server *s = srv_init(); struct rpmem_msg_open_resp resp = OPEN_RESP; rpmem_hton_msg_open_resp(&resp); server_open_handle(s, &resp); srv_fini(s); return 0; } /* * client_open_errno -- perform open request operation and expect * specified errno, repeat the operation specified number of times. * If ex_errno is zero expect certain values in res struct. */ static void client_open_errno(char *target, int ex_errno) { struct rpmem_req_attr req = { .pool_size = POOL_SIZE, .nlanes = NLANES, .provider = PROVIDER, .pool_desc = POOL_DESC, .buff_size = BUFF_SIZE, }; struct rpmem_pool_attr pool_attr; memset(&pool_attr, 0, sizeof(pool_attr)); struct rpmem_resp_attr res; int ret; struct rpmem_obc *rpc = rpmem_obc_init(); UT_ASSERTne(rpc, NULL); client_connect_wait(rpc, target); ret = rpmem_obc_open(rpc, &req, &res, &pool_attr); if (ex_errno) { UT_ASSERTne(ret, 0); UT_ASSERTeq(errno, ex_errno); } else { UT_ASSERTeq(ret, 0); UT_ASSERTeq(res.port, OPEN_RESP.ibc.port); UT_ASSERTeq(res.rkey, OPEN_RESP.ibc.rkey); UT_ASSERTeq(res.raddr, OPEN_RESP.ibc.raddr); UT_ASSERTeq(res.persist_method, OPEN_RESP.ibc.persist_method); UT_ASSERTeq(res.nlanes, OPEN_RESP.ibc.nlanes); UT_ASSERTeq(memcmp(pool_attr.signature, OPEN_RESP.pool_attr.signature, RPMEM_POOL_HDR_SIG_LEN), 0); UT_ASSERTeq(pool_attr.major, OPEN_RESP.pool_attr.major); UT_ASSERTeq(pool_attr.compat_features, OPEN_RESP.pool_attr.compat_features); UT_ASSERTeq(pool_attr.incompat_features, OPEN_RESP.pool_attr.incompat_features); UT_ASSERTeq(pool_attr.ro_compat_features, OPEN_RESP.pool_attr.ro_compat_features); UT_ASSERTeq(memcmp(pool_attr.poolset_uuid, OPEN_RESP.pool_attr.poolset_uuid, RPMEM_POOL_HDR_UUID_LEN), 0); UT_ASSERTeq(memcmp(pool_attr.uuid, OPEN_RESP.pool_attr.uuid, RPMEM_POOL_HDR_UUID_LEN), 0); UT_ASSERTeq(memcmp(pool_attr.next_uuid, OPEN_RESP.pool_attr.next_uuid, RPMEM_POOL_HDR_UUID_LEN), 0); UT_ASSERTeq(memcmp(pool_attr.prev_uuid, OPEN_RESP.pool_attr.prev_uuid, RPMEM_POOL_HDR_UUID_LEN), 0); UT_ASSERTeq(memcmp(pool_attr.user_flags, OPEN_RESP.pool_attr.user_flags, RPMEM_POOL_USER_FLAGS_LEN), 0); } rpmem_obc_disconnect(rpc); rpmem_obc_fini(rpc); } /* * client_open_error -- check if valid errno is set if error status returned */ static void client_open_error(char *target) { struct rpmem_req_attr req = { .pool_size = POOL_SIZE, .nlanes = NLANES, .provider = PROVIDER, .pool_desc = POOL_DESC, .buff_size = BUFF_SIZE, }; struct rpmem_pool_attr pool_attr; memset(&pool_attr, 0, sizeof(pool_attr)); struct rpmem_resp_attr res; int ret; for (enum rpmem_err e = 1; e < MAX_RPMEM_ERR; e++) { set_rpmem_cmd("server_open_error %d", e); int ex_errno = rpmem_util_proto_errno(e); struct rpmem_obc *rpc = rpmem_obc_init(); UT_ASSERTne(rpc, NULL); client_connect_wait(rpc, target); ret = rpmem_obc_open(rpc, &req, &res, &pool_attr); UT_ASSERTne(ret, 0); UT_ASSERTeq(errno, ex_errno); rpmem_obc_disconnect(rpc); rpmem_obc_fini(rpc); } } /* * client_open -- test case for open request message - client side */ int client_open(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <addr>[:<port>]", tc->name); char *target = argv[0]; for (int i = 0; i < ECONNRESET_LOOP; i++) { set_rpmem_cmd("server_open_econnreset %d", i % 2); client_open_errno(target, ECONNRESET); } for (int i = 0; i < OPEN_EPROTO_COUNT; i++) { set_rpmem_cmd("server_open_eproto %d", i); client_open_errno(target, EPROTO); } client_open_error(target); set_rpmem_cmd("server_open"); client_open_errno(target, 0); return 1; }
7,427
21.306306
76
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/rpmemd_db/rpmemd_db_test.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2020, Intel Corporation */ /* * rpmemd_db_test.c -- unit test for pool set database * * usage: rpmemd_db <log-file> <root_dir> <pool_desc_1> <pool_desc_2> */ #include "file.h" #include "unittest.h" #include "librpmem.h" #include "rpmemd_db.h" #include "rpmemd_log.h" #include "util_pmem.h" #include "set.h" #include "out.h" #include <limits.h> #include <stdlib.h> #include <unistd.h> #include <time.h> #define POOL_MODE 0644 #define FAILED_FUNC(func_name) \ UT_ERR("!%s(): %s() failed", __func__, func_name); #define FAILED_FUNC_PARAM(func_name, param) \ UT_ERR("!%s(): %s(%s) failed", __func__, func_name, param); #define NPOOLS_DUAL 2 #define POOL_ATTR_CREATE 0 #define POOL_ATTR_OPEN 1 #define POOL_ATTR_SET_ATTR 2 #define POOL_STATE_INITIAL 0 #define POOL_STATE_CREATED 1 #define POOL_STATE_OPENED 2 #define POOL_STATE_CLOSED POOL_STATE_CREATED #define POOL_STATE_REMOVED POOL_STATE_INITIAL /* * fill_rand -- fill a buffer with random values */ static void fill_rand(void *addr, size_t len) { unsigned char *buff = addr; srand(time(NULL)); for (unsigned i = 0; i < len; i++) buff[i] = (rand() % ('z' - 'a')) + 'a'; } /* * test_init -- test rpmemd_db_init() and rpmemd_db_fini() */ static int test_init(const char *root_dir) { struct rpmemd_db *db; db = rpmemd_db_init(root_dir, POOL_MODE); if (db == NULL) { FAILED_FUNC("rpmemd_db_init"); return -1; } rpmemd_db_fini(db); return 0; } /* * test_check_dir -- test rpmemd_db_check_dir() */ static int test_check_dir(const char *root_dir) { struct rpmemd_db *db; int ret; db = rpmemd_db_init(root_dir, POOL_MODE); if (db == NULL) { FAILED_FUNC("rpmemd_db_init"); return -1; } ret = rpmemd_db_check_dir(db); if (ret) { FAILED_FUNC("rpmemd_db_check_dir"); } rpmemd_db_fini(db); return ret; } /* * test_create -- test rpmemd_db_pool_create() */ static int test_create(const char *root_dir, const char *pool_desc) { struct rpmem_pool_attr attr; memset(&attr, 0, sizeof(attr)); attr.incompat_features = 2; struct rpmemd_db_pool *prp; struct rpmemd_db *db; int ret = -1; db = rpmemd_db_init(root_dir, POOL_MODE); if (db == NULL) { FAILED_FUNC("rpmemd_db_init"); return -1; } prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr); if (prp == NULL) { FAILED_FUNC("rpmemd_db_pool_create"); goto fini; } rpmemd_db_pool_close(db, prp); ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0); if (ret) { FAILED_FUNC("rpmemd_db_pool_remove"); } fini: rpmemd_db_fini(db); return ret; } /* * test_create_dual -- dual test for rpmemd_db_pool_create() */ static int test_create_dual(const char *root_dir, const char *pool_desc_1, const char *pool_desc_2) { struct rpmem_pool_attr attr1; memset(&attr1, 0, sizeof(attr1)); attr1.incompat_features = 2; struct rpmemd_db_pool *prp1, *prp2; struct rpmemd_db *db; int ret = -1; db = rpmemd_db_init(root_dir, POOL_MODE); if (db == NULL) { FAILED_FUNC("rpmemd_db_init"); return -1; } /* test dual create */ prp1 = rpmemd_db_pool_create(db, pool_desc_1, 0, &attr1); if (prp1 == NULL) { FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_1); goto err_create_1; } prp2 = rpmemd_db_pool_create(db, pool_desc_2, 0, &attr1); if (prp2 == NULL) { FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_2); goto err_create_2; } rpmemd_db_pool_close(db, prp2); rpmemd_db_pool_close(db, prp1); ret = rpmemd_db_pool_remove(db, pool_desc_2, 0, 0); if (ret) { FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_2); goto err_remove_2; } ret = rpmemd_db_pool_remove(db, pool_desc_1, 0, 0); if (ret) { FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_1); } goto fini; err_create_2: rpmemd_db_pool_close(db, prp1); err_remove_2: rpmemd_db_pool_remove(db, pool_desc_1, 0, 0); err_create_1: fini: rpmemd_db_fini(db); return ret; } /* * compare_attr -- compare pool's attributes */ static void compare_attr(struct rpmem_pool_attr *a1, struct rpmem_pool_attr *a2) { char *msg; if (a1->major != a2->major) { msg = "major"; goto err_mismatch; } if (a1->compat_features != a2->compat_features) { msg = "compat_features"; goto err_mismatch; } if (a1->incompat_features != a2->incompat_features) { msg = "incompat_features"; goto err_mismatch; } if (a1->ro_compat_features != a2->ro_compat_features) { msg = "ro_compat_features"; goto err_mismatch; } if (memcmp(a1->signature, a2->signature, RPMEM_POOL_HDR_SIG_LEN)) { msg = "signature"; goto err_mismatch; } if (memcmp(a1->poolset_uuid, a2->poolset_uuid, RPMEM_POOL_HDR_UUID_LEN)) { msg = "poolset_uuid"; goto err_mismatch; } if (memcmp(a1->uuid, a2->uuid, RPMEM_POOL_HDR_UUID_LEN)) { msg = "uuid"; goto err_mismatch; } if (memcmp(a1->next_uuid, a2->next_uuid, RPMEM_POOL_HDR_UUID_LEN)) { msg = "next_uuid"; goto err_mismatch; } if (memcmp(a1->prev_uuid, a2->prev_uuid, RPMEM_POOL_HDR_UUID_LEN)) { msg = "prev_uuid"; goto err_mismatch; } return; err_mismatch: errno = EINVAL; UT_FATAL("%s(): pool attributes mismatch (%s)", __func__, msg); } /* * test_open -- test rpmemd_db_pool_open() */ static int test_open(const char *root_dir, const char *pool_desc) { struct rpmem_pool_attr attr1, attr2; struct rpmemd_db_pool *prp; struct rpmemd_db *db; int ret = -1; fill_rand(&attr1, sizeof(attr1)); attr1.major = 1; attr1.incompat_features = 2; attr1.compat_features = 0; db = rpmemd_db_init(root_dir, POOL_MODE); if (db == NULL) { FAILED_FUNC("rpmemd_db_init"); return -1; } prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr1); if (prp == NULL) { FAILED_FUNC("rpmemd_db_pool_create"); goto fini; } rpmemd_db_pool_close(db, prp); prp = rpmemd_db_pool_open(db, pool_desc, 0, &attr2); if (prp == NULL) { FAILED_FUNC("rpmemd_db_pool_open"); goto fini; } rpmemd_db_pool_close(db, prp); compare_attr(&attr1, &attr2); ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0); if (ret) { FAILED_FUNC("rpmemd_db_pool_remove"); } fini: rpmemd_db_fini(db); return ret; } /* * test_open_dual -- dual test for rpmemd_db_pool_open() */ static int test_open_dual(const char *root_dir, const char *pool_desc_1, const char *pool_desc_2) { struct rpmem_pool_attr attr1a, attr2a, attr1b, attr2b; struct rpmemd_db_pool *prp1, *prp2; struct rpmemd_db *db; int ret = -1; fill_rand(&attr1a, sizeof(attr1a)); fill_rand(&attr1b, sizeof(attr1b)); attr1a.major = 1; attr1a.incompat_features = 2; attr1a.compat_features = 0; attr1b.major = 1; attr1b.incompat_features = 2; attr1b.compat_features = 0; db = rpmemd_db_init(root_dir, POOL_MODE); if (db == NULL) { FAILED_FUNC("rpmemd_db_init"); return -1; } prp1 = rpmemd_db_pool_create(db, pool_desc_1, 0, &attr1a); if (prp1 == NULL) { FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_1); goto err_create_1; } rpmemd_db_pool_close(db, prp1); prp2 = rpmemd_db_pool_create(db, pool_desc_2, 0, &attr1b); if (prp2 == NULL) { FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_2); goto err_create_2; } rpmemd_db_pool_close(db, prp2); /* test dual open */ prp1 = rpmemd_db_pool_open(db, pool_desc_1, 0, &attr2a); if (prp1 == NULL) { FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc_1); goto err_open_1; } prp2 = rpmemd_db_pool_open(db, pool_desc_2, 0, &attr2b); if (prp2 == NULL) { FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc_2); goto err_open_2; } rpmemd_db_pool_close(db, prp1); rpmemd_db_pool_close(db, prp2); compare_attr(&attr1a, &attr2a); compare_attr(&attr1b, &attr2b); ret = rpmemd_db_pool_remove(db, pool_desc_2, 0, 0); if (ret) { FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_2); goto err_remove_2; } ret = rpmemd_db_pool_remove(db, pool_desc_1, 0, 0); if (ret) { FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_1); } goto fini; err_open_2: rpmemd_db_pool_close(db, prp1); err_open_1: rpmemd_db_pool_remove(db, pool_desc_2, 0, 0); err_create_2: err_remove_2: rpmemd_db_pool_remove(db, pool_desc_1, 0, 0); err_create_1: fini: rpmemd_db_fini(db); return ret; } /* * test_set_attr -- test rpmemd_db_pool_set_attr() */ static int test_set_attr(const char *root_dir, const char *pool_desc) { struct rpmem_pool_attr attr[3]; struct rpmemd_db_pool *prp; struct rpmemd_db *db; int ret = -1; fill_rand(&attr[POOL_ATTR_CREATE], sizeof(attr[POOL_ATTR_CREATE])); fill_rand(&attr[POOL_ATTR_SET_ATTR], sizeof(attr[POOL_ATTR_SET_ATTR])); attr[POOL_ATTR_CREATE].major = 1; attr[POOL_ATTR_CREATE].incompat_features = 2; attr[POOL_ATTR_CREATE].compat_features = 0; attr[POOL_ATTR_SET_ATTR].major = 1; attr[POOL_ATTR_SET_ATTR].incompat_features = 2; attr[POOL_ATTR_SET_ATTR].compat_features = 0; db = rpmemd_db_init(root_dir, POOL_MODE); if (db == NULL) { FAILED_FUNC("rpmemd_db_init"); return -1; } prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr[POOL_ATTR_CREATE]); if (prp == NULL) { FAILED_FUNC("rpmemd_db_pool_create"); goto err_create; } rpmemd_db_pool_close(db, prp); prp = rpmemd_db_pool_open(db, pool_desc, 0, &attr[POOL_ATTR_OPEN]); if (prp == NULL) { FAILED_FUNC("rpmemd_db_pool_open"); goto err_open; } compare_attr(&attr[POOL_ATTR_CREATE], &attr[POOL_ATTR_OPEN]); ret = rpmemd_db_pool_set_attr(prp, &attr[POOL_ATTR_SET_ATTR]); if (ret) { FAILED_FUNC("rpmemd_db_pool_set_attr"); goto err_set_attr; } rpmemd_db_pool_close(db, prp); prp = rpmemd_db_pool_open(db, pool_desc, 0, &attr[POOL_ATTR_OPEN]); if (prp == NULL) { FAILED_FUNC("rpmemd_db_pool_open"); goto err_open; } compare_attr(&attr[POOL_ATTR_SET_ATTR], &attr[POOL_ATTR_OPEN]); rpmemd_db_pool_close(db, prp); ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0); if (ret) { FAILED_FUNC("rpmemd_db_pool_remove"); } goto fini; err_set_attr: rpmemd_db_pool_close(db, prp); err_open: rpmemd_db_pool_remove(db, pool_desc, 0, 0); err_create: fini: rpmemd_db_fini(db); return ret; } /* * test_set_attr_dual -- dual test for rpmemd_db_pool_set_attr() */ static int test_set_attr_dual(const char *root_dir, const char *pool_desc_1, const char *pool_desc_2) { struct rpmem_pool_attr attr[NPOOLS_DUAL][3]; struct rpmemd_db_pool *prp[NPOOLS_DUAL]; const char *pool_desc[NPOOLS_DUAL] = {pool_desc_1, pool_desc_2}; unsigned pool_state[NPOOLS_DUAL] = {POOL_STATE_INITIAL}; struct rpmemd_db *db; int ret = -1; /* initialize rpmem database */ db = rpmemd_db_init(root_dir, POOL_MODE); if (db == NULL) { FAILED_FUNC("rpmemd_db_init"); return -1; } for (unsigned p = 0; p < NPOOLS_DUAL; ++p) { /* * generate random pool attributes for create and set * attributes operations */ fill_rand(&attr[p][POOL_ATTR_CREATE], sizeof(attr[p][POOL_ATTR_CREATE])); fill_rand(&attr[p][POOL_ATTR_SET_ATTR], sizeof(attr[p][POOL_ATTR_SET_ATTR])); attr[p][POOL_ATTR_CREATE].major = 1; attr[p][POOL_ATTR_CREATE].incompat_features = 2; attr[p][POOL_ATTR_CREATE].compat_features = 0; attr[p][POOL_ATTR_SET_ATTR].major = 1; attr[p][POOL_ATTR_SET_ATTR].incompat_features = 2; attr[p][POOL_ATTR_SET_ATTR].compat_features = 0; /* create pool */ prp[p] = rpmemd_db_pool_create(db, pool_desc[p], 0, &attr[p][POOL_ATTR_CREATE]); if (prp[p] == NULL) { FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc[p]); goto err; } rpmemd_db_pool_close(db, prp[p]); pool_state[p] = POOL_STATE_CREATED; } /* open pools and check pool attributes */ for (unsigned p = 0; p < NPOOLS_DUAL; ++p) { prp[p] = rpmemd_db_pool_open(db, pool_desc[p], 0, &attr[p][POOL_ATTR_OPEN]); if (prp[p] == NULL) { FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc[p]); goto err; } pool_state[p] = POOL_STATE_OPENED; compare_attr(&attr[p][POOL_ATTR_CREATE], &attr[p][POOL_ATTR_OPEN]); } /* set attributes and close pools */ for (unsigned p = 0; p < NPOOLS_DUAL; ++p) { ret = rpmemd_db_pool_set_attr(prp[p], &attr[p][POOL_ATTR_SET_ATTR]); if (ret) { FAILED_FUNC_PARAM("rpmemd_db_pool_set_attr", pool_desc[p]); goto err; } rpmemd_db_pool_close(db, prp[p]); pool_state[p] = POOL_STATE_CLOSED; } /* open pools and check attributes */ for (unsigned p = 0; p < NPOOLS_DUAL; ++p) { prp[p] = rpmemd_db_pool_open(db, pool_desc[p], 0, &attr[p][POOL_ATTR_OPEN]); if (prp[p] == NULL) { FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc[p]); goto err; } pool_state[p] = POOL_STATE_OPENED; compare_attr(&attr[p][POOL_ATTR_SET_ATTR], &attr[p][POOL_ATTR_OPEN]); } err: for (unsigned p = 0; p < NPOOLS_DUAL; ++p) { if (pool_state[p] == POOL_STATE_OPENED) { rpmemd_db_pool_close(db, prp[p]); pool_state[p] = POOL_STATE_CLOSED; } if (pool_state[p] == POOL_STATE_CREATED) { ret = rpmemd_db_pool_remove(db, pool_desc[p], 0, 0); if (ret) { FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc[p]); } pool_state[p] = POOL_STATE_REMOVED; } } rpmemd_db_fini(db); return ret; } static int exists_cb(struct part_file *pf, void *arg) { return util_file_exists(pf->part->path); } static int noexists_cb(struct part_file *pf, void *arg) { int exists = util_file_exists(pf->part->path); if (exists < 0) return -1; else return !exists; } /* * test_remove -- test for rpmemd_db_pool_remove() */ static void test_remove(const char *root_dir, const char *pool_desc) { struct rpmem_pool_attr attr; struct rpmemd_db_pool *prp; struct rpmemd_db *db; int ret; char path[PATH_MAX]; SNPRINTF(path, PATH_MAX, "%s/%s", root_dir, pool_desc); fill_rand(&attr, sizeof(attr)); strncpy((char *)attr.poolset_uuid, "TEST", sizeof(attr.poolset_uuid)); attr.incompat_features = 2; attr.compat_features = 0; db = rpmemd_db_init(root_dir, POOL_MODE); UT_ASSERTne(db, NULL); prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr); UT_ASSERTne(prp, NULL); rpmemd_db_pool_close(db, prp); ret = util_poolset_foreach_part(path, exists_cb, NULL); UT_ASSERTeq(ret, 1); ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0); UT_ASSERTeq(ret, 0); ret = util_poolset_foreach_part(path, noexists_cb, NULL); UT_ASSERTeq(ret, 1); prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr); UT_ASSERTne(prp, NULL); rpmemd_db_pool_close(db, prp); ret = rpmemd_db_pool_remove(db, pool_desc, 0, 1); UT_ASSERTeq(ret, 0); ret = util_file_exists(path); UT_ASSERTne(ret, 1); rpmemd_db_fini(db); } int main(int argc, char *argv[]) { char *pool_desc[2], *log_file; char root_dir[PATH_MAX]; START(argc, argv, "rpmemd_db"); util_init(); out_init("rpmemd_db", "RPMEM_LOG_LEVEL", "RPMEM_LOG_FILE", 0, 0); if (argc != 5) UT_FATAL("usage: %s <log-file> <root_dir> <pool_desc_1>" " <pool_desc_2>", argv[0]); log_file = argv[1]; if (realpath(argv[2], root_dir) == NULL) UT_FATAL("!realpath(%s)", argv[1]); pool_desc[0] = argv[3]; pool_desc[1] = argv[4]; if (rpmemd_log_init("rpmemd error: ", log_file, 0)) FAILED_FUNC("rpmemd_log_init"); test_init(root_dir); test_check_dir(root_dir); test_create(root_dir, pool_desc[0]); test_create_dual(root_dir, pool_desc[0], pool_desc[1]); test_open(root_dir, pool_desc[0]); test_open_dual(root_dir, pool_desc[0], pool_desc[1]); test_set_attr(root_dir, pool_desc[0]); test_set_attr_dual(root_dir, pool_desc[0], pool_desc[1]); test_remove(root_dir, pool_desc[0]); rpmemd_log_close(); out_fini(); DONE(NULL); }
15,339
22.636364
72
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_granularity/pmem2_granularity.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * pmem2_granularity.c -- test for graunlarity functionality */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "config.h" #include "source.h" #include "pmem2_granularity.h" #include "unittest.h" #include "ut_pmem2_config.h" #include "ut_pmem2_utils.h" #include "out.h" size_t Is_nfit = 1; size_t Pc_type = 7; size_t Pc_capabilities; /* * parse_args -- parse args from the input */ static int parse_args(const struct test_case *tc, int argc, char *argv[], char **file) { if (argc < 1) UT_FATAL("usage: %s <file>", tc->name); *file = argv[0]; return 1; } /* * set_eadr -- set variable required for mocked functions */ static void set_eadr() { int is_eadr = atoi(os_getenv("IS_EADR")); if (is_eadr) Pc_capabilities = 3; else Pc_capabilities = 2; } /* * test_ctx -- essential parameters used by test */ struct test_ctx { int fd; enum pmem2_granularity requested_granularity; enum pmem2_granularity expected_granularity; }; /* * init_test -- initialize basic parameters for test */ static void init_test(char *file, struct test_ctx *ctx, enum pmem2_granularity granularity) { set_eadr(); ctx->fd = OPEN(file, O_RDWR); ctx->requested_granularity = granularity; int is_eadr = atoi(os_getenv("IS_EADR")); int is_pmem = atoi(os_getenv("IS_PMEM")); if (is_eadr) { if (is_pmem) ctx->expected_granularity = PMEM2_GRANULARITY_BYTE; else UT_FATAL("invalid configuration IS_EADR && !IS_PMEM"); } else if (is_pmem) { ctx->expected_granularity = PMEM2_GRANULARITY_CACHE_LINE; } else { ctx->expected_granularity = PMEM2_GRANULARITY_PAGE; } } /* * init_cfg -- initialize basic pmem2 config */ static void init_cfg(struct pmem2_config *cfg, struct pmem2_source **src, struct test_ctx *ctx) { pmem2_config_init(cfg); int ret = pmem2_source_from_fd(src, ctx->fd); UT_PMEM2_EXPECT_RETURN(ret, 0); } /* * cleanup -- cleanup the environment after test */ static void cleanup(struct pmem2_source *src, struct test_ctx *ctx) { #ifdef _WIN32 CloseHandle(src->value.handle); #else CLOSE(ctx->fd); #endif } /* * map_with_available_granularity -- map the range with valid granularity, * includes cleanup */ static void map_with_available_granularity(struct pmem2_config *cfg, struct pmem2_source *src, struct test_ctx *ctx) { cfg->requested_max_granularity = ctx->requested_granularity; struct pmem2_map *map; int ret = pmem2_map(cfg, src, &map); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTne(map, NULL); UT_ASSERTeq(ctx->expected_granularity, pmem2_map_get_store_granularity(map)); /* cleanup after the test */ pmem2_unmap(&map); } /* * map_with_unavailable_granularity -- map the range with invalid * granularity (unsuccessful) */ static void map_with_unavailable_granularity(struct pmem2_config *cfg, struct pmem2_source *src, struct test_ctx *ctx) { cfg->requested_max_granularity = ctx->requested_granularity; struct pmem2_map *map; int ret = pmem2_map(cfg, src, &map); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_GRANULARITY_NOT_SUPPORTED); UT_ERR("%s", pmem2_errormsg()); UT_ASSERTeq(map, NULL); } typedef void(*map_func)(struct pmem2_config *cfg, struct pmem2_source *src, struct test_ctx *ctx); /* * granularity_template -- template for testing granularity in pmem2 */ static int granularity_template(const struct test_case *tc, int argc, char *argv[], map_func map_do, enum pmem2_granularity granularity) { char *file = NULL; int ret = parse_args(tc, argc, argv, &file); struct test_ctx ctx = { 0 }; init_test(file, &ctx, granularity); struct pmem2_config cfg; struct pmem2_source *src; init_cfg(&cfg, &src, &ctx); map_do(&cfg, src, &ctx); cleanup(src, &ctx); pmem2_source_delete(&src); return ret; } /* * test_granularity_req_byte_avail_byte -- require byte granularity, * when byte granularity is available */ static int test_granularity_req_byte_avail_byte(const struct test_case *tc, int argc, char *argv[]) { return granularity_template(tc, argc, argv, map_with_available_granularity, PMEM2_GRANULARITY_BYTE); } /* * test_granularity_req_byte_avail_cl -- require byte granularity, * when cache line granularity is available */ static int test_granularity_req_byte_avail_cl(const struct test_case *tc, int argc, char *argv[]) { return granularity_template(tc, argc, argv, map_with_unavailable_granularity, PMEM2_GRANULARITY_BYTE); } /* * test_granularity_req_byte_avail_page -- require byte granularity, * when page granularity is available */ static int test_granularity_req_byte_avail_page(const struct test_case *tc, int argc, char *argv[]) { return granularity_template(tc, argc, argv, map_with_unavailable_granularity, PMEM2_GRANULARITY_BYTE); } /* * test_granularity_req_cl_avail_byte -- require cache line granularity, * when byte granularity is available */ static int test_granularity_req_cl_avail_byte(const struct test_case *tc, int argc, char *argv[]) { return granularity_template(tc, argc, argv, map_with_available_granularity, PMEM2_GRANULARITY_CACHE_LINE); } /* * test_granularity_req_cl_avail_cl -- require cache line granularity, * when cache line granularity is available */ static int test_granularity_req_cl_avail_cl(const struct test_case *tc, int argc, char *argv[]) { return granularity_template(tc, argc, argv, map_with_available_granularity, PMEM2_GRANULARITY_CACHE_LINE); } /* * test_granularity_req_cl_avail_page -- require cache line granularity, * when page granularity is available */ static int test_granularity_req_cl_avail_page(const struct test_case *tc, int argc, char *argv[]) { return granularity_template(tc, argc, argv, map_with_unavailable_granularity, PMEM2_GRANULARITY_CACHE_LINE); } /* * test_granularity_req_page_avail_byte -- require page granularity, * when byte granularity is available */ static int test_granularity_req_page_avail_byte(const struct test_case *tc, int argc, char *argv[]) { return granularity_template(tc, argc, argv, map_with_available_granularity, PMEM2_GRANULARITY_PAGE); } /* * test_granularity_req_byte_avail_cl -- require page granularity, * when byte cache line is available */ static int test_granularity_req_page_avail_cl(const struct test_case *tc, int argc, char *argv[]) { return granularity_template(tc, argc, argv, map_with_available_granularity, PMEM2_GRANULARITY_PAGE); } /* * test_granularity_req_page_avail_page -- require page granularity, * when page granularity is available */ static int test_granularity_req_page_avail_page(const struct test_case *tc, int argc, char *argv[]) { return granularity_template(tc, argc, argv, map_with_available_granularity, PMEM2_GRANULARITY_PAGE); } /* * test_cases -- available test cases */ static struct test_case test_cases[] = { TEST_CASE(test_granularity_req_byte_avail_byte), TEST_CASE(test_granularity_req_byte_avail_cl), TEST_CASE(test_granularity_req_byte_avail_page), TEST_CASE(test_granularity_req_cl_avail_byte), TEST_CASE(test_granularity_req_cl_avail_cl), TEST_CASE(test_granularity_req_cl_avail_page), TEST_CASE(test_granularity_req_page_avail_byte), TEST_CASE(test_granularity_req_page_avail_cl), TEST_CASE(test_granularity_req_page_avail_page), }; #define NTESTS ARRAY_SIZE(test_cases) int main(int argc, char *argv[]) { START(argc, argv, "pmem2_granularity"); out_init("pmem2_granularity", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0); TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS); out_fini(); DONE(NULL); } #ifdef _MSC_VER MSVC_CONSTR(libpmem2_init) MSVC_DESTR(libpmem2_fini) #endif
7,665
23.106918
74
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_granularity/mocks_posix.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ /* * mocks_posix.c -- mocked functions used in auto_flush_linux.c */ #include <fts.h> #include "map.h" #include "../common/mmap.h" #include "fs.h" #include "unittest.h" #define BUS_DEVICE_PATH "/sys/bus/nd/devices" /* * mmap - mock mmap */ FUNC_MOCK(mmap, void *, void *addr, size_t len, int prot, int flags, int fd, __off_t offset) FUNC_MOCK_RUN_DEFAULT { char *str_map_sync = os_getenv("IS_PMEM"); const int ms = MAP_SYNC | MAP_SHARED_VALIDATE; int map_sync_try = ((flags & ms) == ms) ? 1 : 0; if (str_map_sync && atoi(str_map_sync) == 1) { if (map_sync_try) { flags &= ~ms; flags |= MAP_SHARED; return _FUNC_REAL(mmap)(addr, len, prot, flags, fd, offset); } } else if (map_sync_try) { errno = EINVAL; return MAP_FAILED; } return _FUNC_REAL(mmap)(addr, len, prot, flags, fd, offset); } FUNC_MOCK_END /* * open -- open mock */ FUNC_MOCK(open, int, const char *path, int flags, ...) FUNC_MOCK_RUN_DEFAULT { va_list ap; va_start(ap, flags); int mode = va_arg(ap, int); va_end(ap); char *is_bus_device_path = strstr(path, BUS_DEVICE_PATH); if (!is_bus_device_path || (is_bus_device_path && strstr(path, "region"))) return _FUNC_REAL(open)(path, flags, mode); const char *mock_path = os_getenv("BUS_DEVICE_PATH"); return _FUNC_REAL(open)(mock_path, flags, mode); } FUNC_MOCK_END struct fs { FTS *ft; struct fs_entry entry; }; /* * fs_new -- creates fs traversal instance */ FUNC_MOCK(fs_new, struct fs *, const char *path) FUNC_MOCK_RUN_DEFAULT { char *is_bus_device_path = strstr(path, BUS_DEVICE_PATH); if (!is_bus_device_path || (is_bus_device_path && strstr(path, "region"))) return _FUNC_REAL(fs_new)(path); const char *mock_path = os_getenv("BUS_DEVICE_PATH"); return _FUNC_REAL(fs_new)(mock_path); } FUNC_MOCK_END /* * os_stat -- os_stat mock to handle sysfs path */ FUNC_MOCK(os_stat, int, const char *path, os_stat_t *buf) FUNC_MOCK_RUN_DEFAULT { char *is_bus_device_path = strstr(path, BUS_DEVICE_PATH); if (!is_bus_device_path || (is_bus_device_path && strstr(path, "region"))) return _FUNC_REAL(os_stat)(path, buf); const char *mock_path = os_getenv("BUS_DEVICE_PATH"); return _FUNC_REAL(os_stat)(mock_path, buf); } FUNC_MOCK_END
2,302
23.5
63
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_granularity/mocks_dax_windows.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ /* * mocks_dax_windows.c -- mocked function required to control * FILE_DAX_VOLUME value reported by the OS APIs */ #include "unittest.h" FUNC_MOCK_DLLIMPORT(GetVolumeInformationByHandleW, BOOL, HANDLE hFile, LPWSTR lpVolumeNameBuffer, DWORD nVolumeNameSize, LPDWORD lpVolumeSerialNumber, LPDWORD lpMaximumComponentLength, LPDWORD lpFileSystemFlags, LPWSTR lpFileSystemNameBuffer, DWORD nFileSystemNameSize) FUNC_MOCK_RUN_DEFAULT { size_t is_pmem = atoi(os_getenv("IS_PMEM")); if (is_pmem) *lpFileSystemFlags = FILE_DAX_VOLUME; else *lpFileSystemFlags = 0; return TRUE; } FUNC_MOCK_END
688
22.758621
61
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_granularity/mocks_dax_windows.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ /* * mocks_dax_windows.h -- redefinitions of GetVolumeInformationByHandleW * * This file is Windows-specific. * * This file should be included (i.e. using Forced Include) by libpmem2 * files, when compiled for the purpose of pmem2_granularity test. * It would replace default implementation with mocked functions defined * in mocks_windows.c * * This WRAP_REAL define could also be passed as a preprocessor definition. */ #ifndef MOCKS_WINDOWS_H #define MOCKS_WINDOWS_H 1 #include <windows.h> #ifndef WRAP_REAL #define GetVolumeInformationByHandleW __wrap_GetVolumeInformationByHandleW BOOL __wrap_GetVolumeInformationByHandleW(HANDLE hFile, LPWSTR lpVolumeNameBuffer, DWORD nVolumeNameSize, LPDWORD lpVolumeSerialNumber, LPDWORD lpMaximumComponentLength, LPDWORD lpFileSystemFlags, LPWSTR lpFileSystemNameBuffer, DWORD nFileSystemNameSize); #endif #endif
956
28.90625
77
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/rpmem_fip/rpmem_fip_test.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2020, Intel Corporation */ /* * rpmem_fip_test.c -- tests for rpmem_fip and rpmemd_fip modules */ #include <netdb.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include "unittest.h" #include "pmemcommon.h" #include "librpmem.h" #include "rpmem.h" #include "rpmem_proto.h" #include "rpmem_common.h" #include "rpmem_util.h" #include "rpmem_fip_common.h" #include "rpmem_fip_oob.h" #include "rpmemd_fip.h" #include "rpmemd_log.h" #include "rpmemd_util.h" #include "rpmem_fip.h" #include "os.h" #define SIZE_PER_LANE 64 #define COUNT_PER_LANE 32 #define NLANES 1024 #define SOCK_NLANES 32 #define NTHREADS 32 #define TOTAL_PER_LANE (SIZE_PER_LANE * COUNT_PER_LANE) #define POOL_SIZE (NLANES * TOTAL_PER_LANE) static uint8_t lpool[POOL_SIZE]; static uint8_t rpool[POOL_SIZE]; TEST_CASE_DECLARE(client_init); TEST_CASE_DECLARE(server_init); TEST_CASE_DECLARE(client_connect); TEST_CASE_DECLARE(server_connect); TEST_CASE_DECLARE(server_process); TEST_CASE_DECLARE(client_flush); TEST_CASE_DECLARE(client_flush_mt); TEST_CASE_DECLARE(client_persist); TEST_CASE_DECLARE(client_persist_mt); TEST_CASE_DECLARE(client_read); TEST_CASE_DECLARE(client_wq_size); struct fip_client { enum rpmem_provider provider; unsigned max_wq_size; unsigned nlanes; }; #define FIP_CLIENT_DEFAULT {RPMEM_PROV_UNKNOWN, 0, NLANES} /* * get_persist_method -- parse persist method */ static enum rpmem_persist_method get_persist_method(const char *pm) { if (strcmp(pm, "GPSPM") == 0) return RPMEM_PM_GPSPM; else if (strcmp(pm, "APM") == 0) return RPMEM_PM_APM; else UT_FATAL("unknown method"); } /* * get_provider -- get provider for given target */ static void get_provider(const char *target, const char *prov_name, struct fip_client *client) { struct rpmem_fip_probe probe; int ret; int any = 0; if (strcmp(prov_name, "any") == 0) any = 1; ret = rpmem_fip_probe_get(target, &probe); UT_ASSERTeq(ret, 0); UT_ASSERT(rpmem_fip_probe_any(probe)); if (any) { /* return verbs in first place */ if (rpmem_fip_probe(probe, RPMEM_PROV_LIBFABRIC_VERBS)) client->provider = RPMEM_PROV_LIBFABRIC_VERBS; else if (rpmem_fip_probe(probe, RPMEM_PROV_LIBFABRIC_SOCKETS)) client->provider = RPMEM_PROV_LIBFABRIC_SOCKETS; else UT_ASSERT(0); } else { client->provider = rpmem_provider_from_str(prov_name); UT_ASSERTne(client->provider, RPMEM_PROV_UNKNOWN); UT_ASSERT(rpmem_fip_probe(probe, client->provider)); } /* * Decrease number of lanes for socket provider because * the test may be too long. */ if (client->provider == RPMEM_PROV_LIBFABRIC_SOCKETS) client->nlanes = min(client->nlanes, SOCK_NLANES); client->max_wq_size = probe.max_wq_size[client->provider]; } /* * set_pool_data -- set pools data to well known values */ static void set_pool_data(uint8_t *pool, int inverse) { for (unsigned l = 0; l < NLANES; l++) { for (unsigned i = 0; i < COUNT_PER_LANE; i++) { size_t offset = l * TOTAL_PER_LANE + i * SIZE_PER_LANE; unsigned val = i + l; if (inverse) val = ~val; memset(&pool[offset], (int)val, SIZE_PER_LANE); } } } /* * flush_arg -- arguments for client persist and flush / drain threads */ struct flush_arg { struct rpmem_fip *fip; unsigned lane; }; typedef void *(*flush_fn)(void *arg); /* * client_flush_thread -- thread callback for flush / drain operation */ static void * client_flush_thread(void *arg) { struct flush_arg *args = arg; int ret; /* persist with len == 0 should always succeed */ ret = rpmem_fip_flush(args->fip, args->lane * TOTAL_PER_LANE, 0, args->lane, RPMEM_FLUSH_WRITE); UT_ASSERTeq(ret, 0); for (unsigned i = 0; i < COUNT_PER_LANE; i++) { size_t offset = args->lane * TOTAL_PER_LANE + i * SIZE_PER_LANE; unsigned val = args->lane + i; memset(&lpool[offset], (int)val, SIZE_PER_LANE); ret = rpmem_fip_flush(args->fip, offset, SIZE_PER_LANE, args->lane, RPMEM_FLUSH_WRITE); UT_ASSERTeq(ret, 0); } ret = rpmem_fip_drain(args->fip, args->lane); UT_ASSERTeq(ret, 0); return NULL; } /* * client_persist_thread -- thread callback for persist operation */ static void * client_persist_thread(void *arg) { struct flush_arg *args = arg; int ret; /* persist with len == 0 should always succeed */ ret = rpmem_fip_persist(args->fip, args->lane * TOTAL_PER_LANE, 0, args->lane, RPMEM_FLUSH_WRITE); UT_ASSERTeq(ret, 0); for (unsigned i = 0; i < COUNT_PER_LANE; i++) { size_t offset = args->lane * TOTAL_PER_LANE + i * SIZE_PER_LANE; unsigned val = args->lane + i; memset(&lpool[offset], (int)val, SIZE_PER_LANE); ret = rpmem_fip_persist(args->fip, offset, SIZE_PER_LANE, args->lane, RPMEM_FLUSH_WRITE); UT_ASSERTeq(ret, 0); } return NULL; } /* * client_init -- test case for client initialization */ int client_init(const struct test_case *tc, int argc, char *argv[]) { if (argc < 3) UT_FATAL("usage: %s <target> <provider> <persist method>", tc->name); char *target = argv[0]; char *prov_name = argv[1]; char *persist_method = argv[2]; set_rpmem_cmd("server_init %s", persist_method); char fip_service[NI_MAXSERV]; struct rpmem_target_info *info; info = rpmem_target_parse(target); UT_ASSERTne(info, NULL); struct fip_client fip_client = FIP_CLIENT_DEFAULT; get_provider(info->node, prov_name, &fip_client); client_t *client; struct rpmem_resp_attr resp; client = client_exchange(info, fip_client.nlanes, fip_client.provider, &resp); struct rpmem_fip_attr attr = { .provider = fip_client.provider, .max_wq_size = fip_client.max_wq_size, .persist_method = resp.persist_method, .laddr = lpool, .size = POOL_SIZE, .nlanes = resp.nlanes, .raddr = (void *)resp.raddr, .rkey = resp.rkey, }; ssize_t sret = SNPRINTF(fip_service, NI_MAXSERV, "%u", resp.port); UT_ASSERT(sret > 0); /* * Tune the maximum number of lanes according to environment. */ rpmem_util_get_env_max_nlanes(&Rpmem_max_nlanes); struct rpmem_fip *fip; fip = rpmem_fip_init(info->node, fip_service, &attr, &fip_client.nlanes); UT_ASSERTne(fip, NULL); client_close_begin(client); client_close_end(client); rpmem_fip_fini(fip); rpmem_target_free(info); return 3; } /* * server_init -- test case for server initialization */ int server_init(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <persist method>", tc->name); enum rpmem_persist_method persist_method = get_persist_method(argv[0]); unsigned nlanes; enum rpmem_provider provider; char *addr = NULL; int ret; server_exchange_begin(&nlanes, &provider, &addr); UT_ASSERTne(addr, NULL); struct rpmemd_fip_attr attr = { .addr = rpool, .size = POOL_SIZE, .nlanes = nlanes, .provider = provider, .persist_method = persist_method, .nthreads = NTHREADS, }; ret = rpmemd_apply_pm_policy(&attr.persist_method, &attr.persist, &attr.memcpy_persist, 1 /* is pmem */); UT_ASSERTeq(ret, 0); struct rpmem_resp_attr resp; struct rpmemd_fip *fip; enum rpmem_err err; fip = rpmemd_fip_init(addr, NULL, &attr, &resp, &err); UT_ASSERTne(fip, NULL); server_exchange_end(resp); server_close_begin(); server_close_end(); rpmemd_fip_fini(fip); FREE(addr); return 1; } /* * client_connect -- test case for establishing connection - client side */ int client_connect(const struct test_case *tc, int argc, char *argv[]) { if (argc < 3) UT_FATAL("usage: %s <target> <provider> <persist method>", tc->name); char *target = argv[0]; char *prov_name = argv[1]; char *persist_method = argv[2]; set_rpmem_cmd("server_connect %s", persist_method); char fip_service[NI_MAXSERV]; struct rpmem_target_info *info; int ret; info = rpmem_target_parse(target); UT_ASSERTne(info, NULL); struct fip_client fip_client = FIP_CLIENT_DEFAULT; get_provider(info->node, prov_name, &fip_client); client_t *client; struct rpmem_resp_attr resp; client = client_exchange(info, fip_client.nlanes, fip_client.provider, &resp); struct rpmem_fip_attr attr = { .provider = fip_client.provider, .max_wq_size = fip_client.max_wq_size, .persist_method = resp.persist_method, .laddr = lpool, .size = POOL_SIZE, .nlanes = resp.nlanes, .raddr = (void *)resp.raddr, .rkey = resp.rkey, }; ssize_t sret = SNPRINTF(fip_service, NI_MAXSERV, "%u", resp.port); UT_ASSERT(sret > 0); struct rpmem_fip *fip; fip = rpmem_fip_init(info->node, fip_service, &attr, &fip_client.nlanes); UT_ASSERTne(fip, NULL); ret = rpmem_fip_connect(fip); UT_ASSERTeq(ret, 0); client_close_begin(client); ret = rpmem_fip_close(fip); UT_ASSERTeq(ret, 0); client_close_end(client); rpmem_fip_fini(fip); rpmem_target_free(info); return 3; } /* * server_connect -- test case for establishing connection - server side */ int server_connect(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <persist method>", tc->name); enum rpmem_persist_method persist_method = get_persist_method(argv[0]); unsigned nlanes; enum rpmem_provider provider; char *addr = NULL; server_exchange_begin(&nlanes, &provider, &addr); UT_ASSERTne(addr, NULL); struct rpmemd_fip_attr attr = { .addr = rpool, .size = POOL_SIZE, .nlanes = nlanes, .provider = provider, .persist_method = persist_method, .nthreads = NTHREADS, }; int ret; struct rpmem_resp_attr resp; struct rpmemd_fip *fip; enum rpmem_err err; ret = rpmemd_apply_pm_policy(&attr.persist_method, &attr.persist, &attr.memcpy_persist, 1 /* is pmem */); UT_ASSERTeq(ret, 0); fip = rpmemd_fip_init(addr, NULL, &attr, &resp, &err); UT_ASSERTne(fip, NULL); server_exchange_end(resp); ret = rpmemd_fip_accept(fip, -1); UT_ASSERTeq(ret, 0); server_close_begin(); server_close_end(); ret = rpmemd_fip_wait_close(fip, -1); UT_ASSERTeq(ret, 0); ret = rpmemd_fip_close(fip); UT_ASSERTeq(ret, 0); rpmemd_fip_fini(fip); FREE(addr); return 1; } /* * server_process -- test case for processing data on server side */ int server_process(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <persist method>", tc->name); enum rpmem_persist_method persist_method = get_persist_method(argv[0]); set_pool_data(rpool, 1); unsigned nlanes; enum rpmem_provider provider; char *addr = NULL; server_exchange_begin(&nlanes, &provider, &addr); UT_ASSERTne(addr, NULL); struct rpmemd_fip_attr attr = { .addr = rpool, .size = POOL_SIZE, .nlanes = nlanes, .provider = provider, .persist_method = persist_method, .nthreads = NTHREADS, }; int ret; struct rpmem_resp_attr resp; struct rpmemd_fip *fip; enum rpmem_err err; ret = rpmemd_apply_pm_policy(&attr.persist_method, &attr.persist, &attr.memcpy_persist, 1 /* is pmem */); UT_ASSERTeq(ret, 0); fip = rpmemd_fip_init(addr, NULL, &attr, &resp, &err); UT_ASSERTne(fip, NULL); server_exchange_end(resp); ret = rpmemd_fip_accept(fip, -1); UT_ASSERTeq(ret, 0); ret = rpmemd_fip_process_start(fip); server_close_begin(); ret = rpmemd_fip_process_stop(fip); UT_ASSERTeq(ret, 0); server_close_end(); ret = rpmemd_fip_wait_close(fip, -1); UT_ASSERTeq(ret, 0); ret = rpmemd_fip_close(fip); UT_ASSERTeq(ret, 0); rpmemd_fip_fini(fip); FREE(addr); return 1; } /* * flush_common -- common part for single-threaded persist and flush / drain * test cases */ static void flush_common(char *target, char *prov_name, char *persist_method, flush_fn flush_func) { set_rpmem_cmd("server_process %s", persist_method); char fip_service[NI_MAXSERV]; struct rpmem_target_info *info; info = rpmem_target_parse(target); UT_ASSERTne(info, NULL); int ret; set_pool_data(lpool, 1); set_pool_data(rpool, 1); struct fip_client fip_client = FIP_CLIENT_DEFAULT; get_provider(info->node, prov_name, &fip_client); client_t *client; struct rpmem_resp_attr resp; client = client_exchange(info, fip_client.nlanes, fip_client.provider, &resp); struct rpmem_fip_attr attr = { .provider = fip_client.provider, .max_wq_size = fip_client.max_wq_size, .persist_method = resp.persist_method, .laddr = lpool, .size = POOL_SIZE, .nlanes = resp.nlanes, .raddr = (void *)resp.raddr, .rkey = resp.rkey, }; ssize_t sret = SNPRINTF(fip_service, NI_MAXSERV, "%u", resp.port); UT_ASSERT(sret > 0); struct rpmem_fip *fip; fip = rpmem_fip_init(info->node, fip_service, &attr, &fip_client.nlanes); UT_ASSERTne(fip, NULL); ret = rpmem_fip_connect(fip); UT_ASSERTeq(ret, 0); struct flush_arg arg = { .fip = fip, .lane = 0, }; flush_func(&arg); ret = rpmem_fip_read(fip, rpool, POOL_SIZE, 0, 0); UT_ASSERTeq(ret, 0); client_close_begin(client); ret = rpmem_fip_close(fip); UT_ASSERTeq(ret, 0); client_close_end(client); rpmem_fip_fini(fip); ret = memcmp(rpool, lpool, POOL_SIZE); UT_ASSERTeq(ret, 0); rpmem_target_free(info); } /* * flush_common_mt -- common part for multi-threaded persist and flush / drain * test cases */ static int flush_common_mt(char *target, char *prov_name, char *persist_method, flush_fn flush_thread_func) { set_rpmem_cmd("server_process %s", persist_method); char fip_service[NI_MAXSERV]; struct rpmem_target_info *info; int ret; info = rpmem_target_parse(target); UT_ASSERTne(info, NULL); set_pool_data(lpool, 1); set_pool_data(rpool, 1); struct fip_client fip_client = FIP_CLIENT_DEFAULT; get_provider(info->node, prov_name, &fip_client); client_t *client; struct rpmem_resp_attr resp; client = client_exchange(info, fip_client.nlanes, fip_client.provider, &resp); struct rpmem_fip_attr attr = { .provider = fip_client.provider, .max_wq_size = fip_client.max_wq_size, .persist_method = resp.persist_method, .laddr = lpool, .size = POOL_SIZE, .nlanes = resp.nlanes, .raddr = (void *)resp.raddr, .rkey = resp.rkey, }; ssize_t sret = SNPRINTF(fip_service, NI_MAXSERV, "%u", resp.port); UT_ASSERT(sret > 0); struct rpmem_fip *fip; fip = rpmem_fip_init(info->node, fip_service, &attr, &fip_client.nlanes); UT_ASSERTne(fip, NULL); ret = rpmem_fip_connect(fip); UT_ASSERTeq(ret, 0); os_thread_t *flush_thread = MALLOC(resp.nlanes * sizeof(os_thread_t)); struct flush_arg *args = MALLOC(resp.nlanes * sizeof(struct flush_arg)); for (unsigned i = 0; i < fip_client.nlanes; i++) { args[i].fip = fip; args[i].lane = i; THREAD_CREATE(&flush_thread[i], NULL, flush_thread_func, &args[i]); } for (unsigned i = 0; i < fip_client.nlanes; i++) THREAD_JOIN(&flush_thread[i], NULL); ret = rpmem_fip_read(fip, rpool, POOL_SIZE, 0, 0); UT_ASSERTeq(ret, 0); client_close_begin(client); ret = rpmem_fip_close(fip); UT_ASSERTeq(ret, 0); client_close_end(client); rpmem_fip_fini(fip); FREE(flush_thread); FREE(args); ret = memcmp(rpool, lpool, POOL_SIZE); UT_ASSERTeq(ret, 0); rpmem_target_free(info); return 3; } /* * client_flush -- test case for single-threaded flush / drain operation */ int client_flush(const struct test_case *tc, int argc, char *argv[]) { if (argc < 3) UT_FATAL("usage: %s <target> <provider> <persist method>", tc->name); char *target = argv[0]; char *prov_name = argv[1]; char *persist_method = argv[2]; flush_common(target, prov_name, persist_method, client_flush_thread); return 3; } /* * client_flush_mt -- test case for multi-threaded flush / drain operation */ int client_flush_mt(const struct test_case *tc, int argc, char *argv[]) { if (argc < 3) UT_FATAL("usage: %s <target> <provider> <persist method>", tc->name); char *target = argv[0]; char *prov_name = argv[1]; char *persist_method = argv[2]; flush_common_mt(target, prov_name, persist_method, client_flush_thread); return 3; } /* * client_persist -- test case for single-threaded persist operation */ int client_persist(const struct test_case *tc, int argc, char *argv[]) { if (argc < 3) UT_FATAL("usage: %s <target> <provider> <persist method>", tc->name); char *target = argv[0]; char *prov_name = argv[1]; char *persist_method = argv[2]; flush_common(target, prov_name, persist_method, client_persist_thread); return 3; } /* * client_persist_mt -- test case for multi-threaded persist operation */ int client_persist_mt(const struct test_case *tc, int argc, char *argv[]) { if (argc < 3) UT_FATAL("usage: %s <target> <provider> <persist method>", tc->name); char *target = argv[0]; char *prov_name = argv[1]; char *persist_method = argv[2]; flush_common_mt(target, prov_name, persist_method, client_persist_thread); return 3; } /* * client_read -- test case for read operation */ int client_read(const struct test_case *tc, int argc, char *argv[]) { if (argc < 3) UT_FATAL("usage: %s <target> <provider> <persist method>", tc->name); char *target = argv[0]; char *prov_name = argv[1]; char *persist_method = argv[2]; set_rpmem_cmd("server_process %s", persist_method); char fip_service[NI_MAXSERV]; struct rpmem_target_info *info; int ret; info = rpmem_target_parse(target); UT_ASSERTne(info, NULL); set_pool_data(lpool, 0); set_pool_data(rpool, 1); struct fip_client fip_client = FIP_CLIENT_DEFAULT; get_provider(info->node, prov_name, &fip_client); client_t *client; struct rpmem_resp_attr resp; client = client_exchange(info, fip_client.nlanes, fip_client.provider, &resp); struct rpmem_fip_attr attr = { .provider = fip_client.provider, .max_wq_size = fip_client.max_wq_size, .persist_method = resp.persist_method, .laddr = lpool, .size = POOL_SIZE, .nlanes = resp.nlanes, .raddr = (void *)resp.raddr, .rkey = resp.rkey, }; ssize_t sret = SNPRINTF(fip_service, NI_MAXSERV, "%u", resp.port); UT_ASSERT(sret > 0); struct rpmem_fip *fip; fip = rpmem_fip_init(info->node, fip_service, &attr, &fip_client.nlanes); UT_ASSERTne(fip, NULL); ret = rpmem_fip_connect(fip); UT_ASSERTeq(ret, 0); /* read with len == 0 should always succeed */ ret = rpmem_fip_read(fip, lpool, 0, 0, 0); UT_ASSERTeq(ret, 0); ret = rpmem_fip_read(fip, lpool, POOL_SIZE, 0, 0); UT_ASSERTeq(ret, 0); client_close_begin(client); ret = rpmem_fip_close(fip); UT_ASSERTeq(ret, 0); client_close_end(client); rpmem_fip_fini(fip); ret = memcmp(rpool, lpool, POOL_SIZE); UT_ASSERTeq(ret, 0); rpmem_target_free(info); return 3; } #define LT_MAX_WQ_SIZE "LT_MAX_WQ_SIZE" /* < max_wq_size */ #define EQ_MAX_WQ_SIZE "EQ_MAX_WQ_SIZE" /* == max_wq_size */ #define GT_MAX_WQ_SIZE "GT_MAX_WQ_SIZE" /* > max_wq_size */ /* * client_wq_size -- test case for WQ size adjustment */ int client_wq_size(const struct test_case *tc, int argc, char *argv[]) { if (argc < 3) UT_FATAL("usage: %s <target> <provider> <persist method>" "<wq_size>", tc->name); char *target = argv[0]; char *prov_name = argv[1]; char *persist_method = argv[2]; char *wq_size_env_str = argv[3]; set_rpmem_cmd("server_process %s", persist_method); char fip_service[NI_MAXSERV]; struct rpmem_target_info *info; int ret; info = rpmem_target_parse(target); UT_ASSERTne(info, NULL); struct fip_client fip_client = FIP_CLIENT_DEFAULT; get_provider(info->node, prov_name, &fip_client); rpmem_util_get_env_max_nlanes(&fip_client.nlanes); client_t *client; struct rpmem_resp_attr resp; client = client_exchange(info, fip_client.nlanes, fip_client.provider, &resp); struct rpmem_fip_attr attr = { .provider = fip_client.provider, .max_wq_size = fip_client.max_wq_size, .persist_method = resp.persist_method, .laddr = lpool, .size = POOL_SIZE, .nlanes = resp.nlanes, .raddr = (void *)resp.raddr, .rkey = resp.rkey, }; ssize_t sret = SNPRINTF(fip_service, NI_MAXSERV, "%u", resp.port); UT_ASSERT(sret > 0); /* check RPMEM_WORK_QUEUE_SIZE env processing */ unsigned wq_size_default = Rpmem_wq_size; if (strcmp(wq_size_env_str, LT_MAX_WQ_SIZE) == 0) { Rpmem_wq_size = fip_client.max_wq_size - 1; } else if (strcmp(wq_size_env_str, EQ_MAX_WQ_SIZE) == 0) { Rpmem_wq_size = fip_client.max_wq_size; } else if (strcmp(wq_size_env_str, GT_MAX_WQ_SIZE) == 0) { Rpmem_wq_size = fip_client.max_wq_size + 1; } else { long wq_size_env = STRTOL(wq_size_env_str, NULL, 10); rpmem_util_get_env_wq_size(&Rpmem_wq_size); if (wq_size_env > 0) { if (wq_size_env < UINT_MAX) UT_ASSERT(Rpmem_wq_size == wq_size_env); else UT_ASSERT(Rpmem_wq_size == UINT_MAX); } else UT_ASSERT(Rpmem_wq_size == wq_size_default); } struct rpmem_fip *fip; fip = rpmem_fip_init(info->node, fip_service, &attr, &fip_client.nlanes); UT_ASSERTne(fip, NULL); size_t req_wq_size = rpmem_fip_wq_size( resp.persist_method, RPMEM_FIP_NODE_CLIENT); size_t eff_wq_size = rpmem_fip_get_wq_size(fip); /* max supported meets minimal requirements */ UT_ASSERT(fip_client.max_wq_size >= req_wq_size); /* calculated meets minimal requirements */ UT_ASSERT(eff_wq_size >= req_wq_size); /* calculated is supported */ UT_ASSERT(eff_wq_size <= fip_client.max_wq_size); /* if forced by env meets minimal requirements */ if (Rpmem_wq_size > req_wq_size) { /* and it is supported */ if (Rpmem_wq_size <= fip_client.max_wq_size) { /* calculated is >= to forced */ UT_ASSERT(eff_wq_size >= Rpmem_wq_size); } else { /* calculated is clipped to max supported */ UT_ASSERT(eff_wq_size == fip_client.max_wq_size); } } ret = rpmem_fip_connect(fip); UT_ASSERTeq(ret, 0); client_close_begin(client); ret = rpmem_fip_close(fip); UT_ASSERTeq(ret, 0); client_close_end(client); rpmem_fip_fini(fip); rpmem_target_free(info); return 4; } /* * test_cases -- available test cases */ static struct test_case test_cases[] = { TEST_CASE(client_init), TEST_CASE(server_init), TEST_CASE(client_connect), TEST_CASE(server_connect), TEST_CASE(client_flush), TEST_CASE(client_flush_mt), TEST_CASE(client_persist), TEST_CASE(client_persist_mt), TEST_CASE(server_process), TEST_CASE(client_read), TEST_CASE(client_wq_size) }; #define NTESTS (sizeof(test_cases) / sizeof(test_cases[0])) int main(int argc, char *argv[]) { /* workaround for left-opened files by libfabric */ rpmem_fip_probe_get("localhost", NULL); START(argc, argv, "rpmem_obc"); common_init("rpmem_fip", "RPMEM_LOG_LEVEL", "RPMEM_LOG_FILE", 0, 0); rpmem_util_cmds_init(); rpmemd_log_init("rpmemd", os_getenv("RPMEMD_LOG_FILE"), 0); rpmemd_log_level = rpmemd_log_level_from_str( os_getenv("RPMEMD_LOG_LEVEL")); TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS); common_fini(); rpmemd_log_close(); rpmem_util_cmds_fini(); DONE(NULL); }
22,586
21.97762
78
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/rpmem_fip/rpmem_fip_oob.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * rpmem_fip_sock.h -- simple oob connection implementation for exchanging * required RDMA related data */ #include <stdint.h> #include <netinet/in.h> typedef struct rpmem_ssh client_t; client_t *client_exchange(struct rpmem_target_info *info, unsigned nlanes, enum rpmem_provider provider, struct rpmem_resp_attr *resp); void client_close_begin(client_t *c); void client_close_end(client_t *c); void server_exchange_begin(unsigned *lanes, enum rpmem_provider *provider, char **addr); void server_exchange_end(struct rpmem_resp_attr resp); void server_close_begin(void); void server_close_end(void); void set_rpmem_cmd(const char *fmt, ...);
743
24.655172
74
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_source/pmem2_source.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * pmem2_source.c -- pmem2_source unittests */ #include "fault_injection.h" #include "libpmem2.h" #include "unittest.h" #include "ut_pmem2_utils.h" #include "ut_pmem2_config.h" #include "source.h" #include "out.h" /* * verify_fd -- verify value fd or handle in source */ static void verify_fd(struct pmem2_source *src, int fd) { #ifdef WIN32 UT_ASSERTeq(src->type, PMEM2_SOURCE_HANDLE); UT_ASSERTeq(src->value.handle, fd != INVALID_FD ? (HANDLE)_get_osfhandle(fd) : INVALID_HANDLE_VALUE); #else UT_ASSERTeq(src->type, PMEM2_SOURCE_FD); UT_ASSERTeq(src->value.fd, fd); #endif } /* * test_set_rw_fd - test setting O_RDWR fd */ static int test_set_rw_fd(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_set_rw_fd <file>"); char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_source *src; int ret = pmem2_source_from_fd(&src, fd); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTne(src, NULL); verify_fd(src, fd); ret = pmem2_source_delete(&src); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTeq(src, NULL); CLOSE(fd); return 1; } /* * test_set_ro_fd - test setting O_RDONLY fd */ static int test_set_ro_fd(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_set_ro_fd <file>"); char *file = argv[0]; int fd = OPEN(file, O_RDONLY); struct pmem2_source *src; int ret = pmem2_source_from_fd(&src, fd); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTne(src, NULL); verify_fd(src, fd); ret = pmem2_source_delete(&src); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTeq(src, NULL); CLOSE(fd); return 1; } /* * test_set_invalid_fd - test setting invalid fd */ static int test_set_invalid_fd(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_set_invalid_fd <file>"); char *file = argv[0]; /* open and close the file to get invalid fd */ int fd = OPEN(file, O_WRONLY); CLOSE(fd); ut_suppress_crt_assert(); struct pmem2_source *src; int ret = pmem2_source_from_fd(&src, fd); ut_unsuppress_crt_assert(); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_FILE_HANDLE); UT_ASSERTeq(src, NULL); return 1; } /* * test_set_wronly_fd - test setting wronly fd */ static int test_set_wronly_fd(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_set_wronly_fd <file>"); char *file = argv[0]; int fd = OPEN(file, O_WRONLY); struct pmem2_source *src; int ret = pmem2_source_from_fd(&src, fd); #ifdef _WIN32 /* windows doesn't validate open flags */ UT_PMEM2_EXPECT_RETURN(ret, 0); verify_fd(src, fd); ret = pmem2_source_delete(&src); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTeq(src, NULL); #else UT_ASSERTeq(src, NULL); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_FILE_HANDLE); #endif CLOSE(fd); return 1; } /* * test_alloc_src_enomem - test pmem2_source allocation with error injection */ static int test_alloc_src_enomem(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_alloc_src_enomem <file>"); char *file = argv[0]; struct pmem2_source *src; if (!core_fault_injection_enabled()) { return 1; } int fd = OPEN(file, O_RDWR); core_inject_fault_at(PMEM_MALLOC, 1, "pmem2_malloc"); int ret = pmem2_source_from_fd(&src, fd); UT_PMEM2_EXPECT_RETURN(ret, -ENOMEM); UT_ASSERTeq(src, NULL); CLOSE(fd); return 1; } /* * test_delete_null_config - test pmem2_source_delete on NULL config */ static int test_delete_null_config(const struct test_case *tc, int argc, char *argv[]) { struct pmem2_source *src = NULL; /* should not crash */ int ret = pmem2_source_delete(&src); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTeq(src, NULL); return 0; } #ifdef WIN32 /* * test_set_handle - test setting valid handle */ static int test_set_handle(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_set_handle <file>"); char *file = argv[0]; HANDLE h = CreateFile(file, GENERIC_READ | GENERIC_WRITE, 0, NULL, OPEN_ALWAYS, 0, NULL); UT_ASSERTne(h, INVALID_HANDLE_VALUE); struct pmem2_source *src; int ret = pmem2_source_from_handle(&src, h); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTeq(src->value.handle, h); CloseHandle(h); pmem2_source_delete(&src); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTeq(src, NULL); return 1; } /* * test_set_null_handle - test resetting handle */ static int test_set_null_handle(const struct test_case *tc, int argc, char *argv[]) { struct pmem2_source *src; int ret = pmem2_source_from_handle(&src, INVALID_HANDLE_VALUE); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_FILE_HANDLE); UT_ASSERTeq(src, NULL); return 0; } /* * test_set_invalid_handle - test setting invalid handle */ static int test_set_invalid_handle(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_set_invalid_handle <file>"); char *file = argv[0]; struct pmem2_source *src; HANDLE h = CreateFile(file, GENERIC_READ | GENERIC_WRITE, 0, NULL, OPEN_ALWAYS, 0, NULL); UT_ASSERTne(h, INVALID_HANDLE_VALUE); CloseHandle(h); int ret = pmem2_source_from_handle(&src, h); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_FILE_HANDLE); return 1; } /* * test_set_directory_handle - test setting a directory handle */ static int test_set_directory_handle(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_set_directory_handle <file>"); char *file = argv[0]; struct pmem2_source *src; HANDLE h = CreateFile(file, GENERIC_READ | GENERIC_WRITE, 0, NULL, OPEN_ALWAYS, FILE_FLAG_BACKUP_SEMANTICS, NULL); UT_ASSERTne(h, INVALID_HANDLE_VALUE); int ret = pmem2_source_from_handle(&src, h); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_FILE_TYPE); UT_ASSERTeq(src, NULL); CloseHandle(h); return 1; } /* * test_set_directory_handle - test setting a mutex handle */ static int test_set_mutex_handle(const struct test_case *tc, int argc, char *argv[]) { struct pmem2_source *src; HANDLE h = CreateMutex(NULL, FALSE, NULL); UT_ASSERTne(h, INVALID_HANDLE_VALUE); int ret = pmem2_source_from_handle(&src, h); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_FILE_HANDLE); UT_ASSERTeq(src, NULL); CloseHandle(h); return 0; } #else /* * test_set_directory_handle - test setting directory's fd */ static int test_set_directory_fd(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_set_directory_fd <file>"); char *file = argv[0]; struct pmem2_source *src; int fd = OPEN(file, O_RDONLY); int ret = pmem2_source_from_fd(&src, fd); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_FILE_TYPE); CLOSE(fd); return 1; } #endif /* * test_cases -- available test cases */ static struct test_case test_cases[] = { TEST_CASE(test_set_rw_fd), TEST_CASE(test_set_ro_fd), TEST_CASE(test_set_invalid_fd), TEST_CASE(test_set_wronly_fd), TEST_CASE(test_alloc_src_enomem), TEST_CASE(test_delete_null_config), #ifdef _WIN32 TEST_CASE(test_set_handle), TEST_CASE(test_set_null_handle), TEST_CASE(test_set_invalid_handle), TEST_CASE(test_set_directory_handle), TEST_CASE(test_set_mutex_handle), #else TEST_CASE(test_set_directory_fd), #endif }; #define NTESTS (sizeof(test_cases) / sizeof(test_cases[0])) int main(int argc, char **argv) { START(argc, argv, "pmem2_source"); util_init(); out_init("pmem2_source", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0); TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS); out_fini(); DONE(NULL); }
7,608
20.433803
77
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/tools/ddmap/ddmap.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2018, Intel Corporation */ /* * ddmap.c -- simple app for reading and writing data from/to a regular file or * dax device using mmap instead of file io API */ #include <stdio.h> #include <unistd.h> #include <getopt.h> #include <stdlib.h> #include <sys/mman.h> #include <errno.h> #include <fcntl.h> #include <inttypes.h> #include "common.h" #include "output.h" #include "mmap.h" #include "file.h" #include "util.h" #include "os.h" /* * ddmap_context -- context and arguments */ struct ddmap_context { char *file_in; /* input file name */ char *file_out; /* output file name */ char *str; /* string data to write */ size_t offset_in; /* offset from beginning of input file for */ /* read/write operations expressed in blocks */ size_t offset_out; /* offset from beginning of output file for */ /* read/write operations expressed in blocks */ size_t bytes; /* size of blocks to write at the time */ size_t count; /* number of blocks to read/write */ int checksum; /* compute checksum */ int runlen; /* print bytes as runlen/char sequence */ }; /* * the default context, with all fields initialized to zero or NULL */ static struct ddmap_context ddmap_default; /* * print_usage -- print short description of usage */ static void print_usage(void) { printf("Usage: ddmap [option] ...\n"); printf("Valid options:\n"); printf("-i FILE - read from FILE\n"); printf("-o FILE - write to FILE\n"); printf("-d STRING - STRING to be written\n"); printf("-s N - skip N blocks at start of input\n"); printf("-q N - skip N blocks at start of output\n"); printf("-b N - read/write N bytes at a time\n"); printf("-n N - copy N input blocks\n"); printf("-c - compute checksum\n"); printf("-r - print file content as runlen/char pairs\n"); printf("-h - print this usage info\n"); } /* * long_options -- command line options */ static const struct option long_options[] = { {"input-file", required_argument, NULL, 'i'}, {"output-file", required_argument, NULL, 'o'}, {"string", required_argument, NULL, 'd'}, {"offset-in", required_argument, NULL, 's'}, {"offset-out", required_argument, NULL, 'q'}, {"block-size", required_argument, NULL, 'b'}, {"count", required_argument, NULL, 'n'}, {"checksum", no_argument, NULL, 'c'}, {"runlen", no_argument, NULL, 'r'}, {"help", no_argument, NULL, 'h'}, {NULL, 0, NULL, 0 }, }; /* * ddmap_print_char -- (internal) print single char * * Printable ASCII characters are printed normally, * NUL character is printed as a little circle (the degree symbol), * non-printable ASCII characters are printed as centered dots. */ static void ddmap_print_char(char c) { if (c == '\0') /* print the degree symbol for NUL */ printf("\u00B0"); else if (c >= ' ' && c <= '~') /* print printable ASCII character */ printf("%c", c); else /* print centered dot for non-printable character */ printf("\u00B7"); } /* * ddmap_print_runlen -- (internal) print file content as length/char pairs * * For each sequence of chars of the same value (could be just 1 byte) * print length of the sequence and the char value. */ static void ddmap_print_runlen(char *addr, size_t len) { char c = '\0'; ssize_t cnt = 0; for (size_t i = 0; i < len; i++) { if (i > 0 && c != addr[i] && cnt != 0) { printf("%zd ", cnt); ddmap_print_char(c); printf("\n"); cnt = 0; } c = addr[i]; cnt++; } if (cnt) { printf("%zd ", cnt); ddmap_print_char(c); printf("\n"); } } /* * ddmap_print_bytes -- (internal) print array of bytes */ static void ddmap_print_bytes(const char *data, size_t len) { for (size_t i = 0; i < len; ++i) { ddmap_print_char(data[i]); } printf("\n"); } /* * ddmap_read -- (internal) read a string from the file at the offset and * print it to stdout */ static int ddmap_read(const char *path, size_t offset_in, size_t bytes, size_t count, int runlen) { size_t len = bytes * count; os_off_t offset = (os_off_t)(bytes * offset_in); char *read_buff = Zalloc(len + 1); if (read_buff == NULL) { outv_err("Zalloc(%zu) failed\n", len + 1); return -1; } ssize_t read_len = util_file_pread(path, read_buff, len, offset); if (read_len < 0) { outv_err("pread failed"); Free(read_buff); return -1; } else if ((size_t)read_len < len) { outv(1, "read less bytes than requested: %zd vs. %zu\n", read_len, len); } if (runlen) ddmap_print_runlen(read_buff, (size_t)read_len); else ddmap_print_bytes(read_buff, (size_t)read_len); Free(read_buff); return 0; } /* * ddmap_zero -- (internal) zero a range of data in the file */ static int ddmap_zero(const char *path, size_t offset, size_t len) { void *addr; ssize_t filesize = util_file_get_size(path); if (filesize < 0) { outv_err("invalid file size"); return -1; } if (offset + len > (size_t)filesize) len = (size_t)filesize - offset; addr = util_file_map_whole(path); if (addr == NULL) { outv_err("map failed"); return -1; } memset((char *)addr + offset, 0, len); util_unmap(addr, (size_t)filesize); return 0; } /* * ddmap_write_data -- (internal) write data to a file */ static int ddmap_write_data(const char *path, const char *data, os_off_t offset, size_t len) { if (util_file_pwrite(path, data, len, offset) < 0) { outv_err("pwrite for dax device failed: path %s," " len %zu, offset %zd", path, len, offset); return -1; } return 0; } /* * ddmap_write_from_file -- (internal) write data from file to dax device or * file */ static int ddmap_write_from_file(const char *path_in, const char *path_out, size_t offset_in, size_t offset_out, size_t bytes, size_t count) { char *src, *tmp_src; os_off_t offset; ssize_t file_in_size = util_file_get_size(path_in); size_t data_left, len; util_init(); src = util_file_map_whole(path_in); src += (os_off_t)(offset_in * bytes); offset = (os_off_t)(offset_out * bytes); data_left = (size_t)file_in_size; tmp_src = src; do { len = MIN(data_left, bytes); ddmap_write_data(path_out, tmp_src, offset, len); tmp_src += len; data_left -= len; if (data_left == 0) { data_left = (size_t)file_in_size; tmp_src = src; } offset += (os_off_t)len; count--; } while (count > 0); util_unmap(src, (size_t)file_in_size); return 0; } /* * ddmap_write -- (internal) write the string to the file */ static int ddmap_write(const char *path, const char *str, size_t offset_in, size_t bytes, size_t count) { /* calculate how many characters from the string are to be written */ size_t length; size_t str_len = (str != NULL) ? strlen(str) + 1 : 0; os_off_t offset = (os_off_t)(bytes * offset_in); size_t len = bytes * count; if (len == 0) length = str_len; else length = min(len, str_len); /* write the string */ if (length > 0) { if (ddmap_write_data(path, str, offset, length)) return -1; } /* zero the rest of requested range */ if (length < len) { if (ddmap_zero(path, (size_t)offset + length, len - length)) return -1; } return 0; } /* * ddmap_checksum -- (internal) compute checksum of a slice of an input file */ static int ddmap_checksum(const char *path, size_t bytes, size_t count, size_t offset_in) { char *src; uint64_t checksum; ssize_t filesize = util_file_get_size(path); os_off_t offset = (os_off_t)(bytes * offset_in); size_t len = bytes * count; if ((size_t)filesize < len + (size_t)offset) { outv_err("offset with length exceed file size"); return -1; } util_init(); src = util_file_map_whole(path); util_checksum(src + offset, len, &checksum, 1, 0); util_unmap(src, (size_t)filesize); printf("%" PRIu64 "\n", checksum); return 0; } /* * parse_args -- (internal) parse command line arguments */ static int parse_args(struct ddmap_context *ctx, int argc, char *argv[]) { int opt; char *endptr; size_t offset; size_t count; size_t bytes; while ((opt = getopt_long(argc, argv, "i:o:d:s:q:b:n:crhv", long_options, NULL)) != -1) { switch (opt) { case 'i': ctx->file_in = optarg; break; case 'o': ctx->file_out = optarg; break; case 'd': ctx->str = optarg; if (ctx->count == 0) ctx->count = strlen(ctx->str); if (ctx->bytes == 0) ctx->bytes = 1; break; case 's': errno = 0; offset = strtoul(optarg, &endptr, 0); if ((endptr && *endptr != '\0') || errno) { outv_err("'%s' -- invalid input offset", optarg); return -1; } ctx->offset_in = offset; break; case 'q': errno = 0; offset = strtoul(optarg, &endptr, 0); if ((endptr && *endptr != '\0') || errno) { outv_err("'%s' -- invalid output offset", optarg); return -1; } ctx->offset_out = offset; break; case 'b': errno = 0; bytes = strtoull(optarg, &endptr, 0); if ((endptr && *endptr != '\0') || errno) { outv_err("'%s' -- invalid block size", optarg); return -1; } ctx->bytes = bytes; break; case 'n': errno = 0; count = strtoull(optarg, &endptr, 0); if ((endptr && *endptr != '\0') || errno) { outv_err("'%s' -- invalid count", optarg); return -1; } ctx->count = count; break; case 'c': ctx->checksum = 1; break; case 'r': ctx->runlen = 1; break; case 'h': print_usage(); exit(EXIT_SUCCESS); case 'v': out_set_vlevel(1); break; default: print_usage(); exit(EXIT_FAILURE); } } return 0; } /* * validate_args -- (internal) validate arguments */ static int validate_args(struct ddmap_context *ctx) { if ((ctx->file_in == NULL) && (ctx->file_out == NULL)) { outv_err("an input file and/or an output file must be " "provided"); return -1; } else if (ctx->file_out == NULL) { if (ctx->bytes == 0) { outv_err("number of bytes to read has to be provided"); return -1; } } else if (ctx->file_in == NULL) { /* ddmap_write requirements */ if (ctx->str == NULL && (ctx->count * ctx->bytes) == 0) { outv_err("when writing, 'data' or 'count' and 'bytes' " "have to be provided"); return -1; } } else { /* scenarios other than ddmap_write requirement */ if ((ctx->bytes * ctx->count) == 0) { outv_err("number of bytes and count must be provided"); return -1; } } return 0; } /* * do_ddmap -- (internal) perform ddmap */ static int do_ddmap(struct ddmap_context *ctx) { if ((ctx->file_in != NULL) && (ctx->file_out != NULL)) { if (ddmap_write_from_file(ctx->file_in, ctx->file_out, ctx->offset_in, ctx->offset_out, ctx->bytes, ctx->count)) return -1; return 0; } if ((ctx->checksum == 1) && (ctx->file_in != NULL)) { if (ddmap_checksum(ctx->file_in, ctx->bytes, ctx->count, ctx->offset_in)) return -1; return 0; } if (ctx->file_in != NULL) { if (ddmap_read(ctx->file_in, ctx->offset_in, ctx->bytes, ctx->count, ctx->runlen)) return -1; } else { /* ctx->file_out != NULL */ if (ddmap_write(ctx->file_out, ctx->str, ctx->offset_in, ctx->bytes, ctx->count)) return -1; } return 0; } int main(int argc, char *argv[]) { #ifdef _WIN32 util_suppress_errmsg(); wchar_t **wargv = CommandLineToArgvW(GetCommandLineW(), &argc); for (int i = 0; i < argc; i++) { argv[i] = util_toUTF8(wargv[i]); if (argv[i] == NULL) { for (i--; i >= 0; i--) free(argv[i]); outv_err("Error during arguments conversion\n"); return 1; } } #endif int ret = 0; struct ddmap_context ctx = ddmap_default; if ((ret = parse_args(&ctx, argc, argv))) goto out; if ((ret = validate_args(&ctx))) goto out; if ((ret = do_ddmap(&ctx))) { outv_err("failed to perform ddmap\n"); if (errno) outv_err("errno: %s\n", strerror(errno)); ret = -1; goto out; } out: #ifdef _WIN32 for (int i = argc; i > 0; i--) free(argv[i - 1]); #endif return ret; }
11,872
22.280392
79
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/tools/dllview/dllview.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2018, Intel Corporation */ /* * dllview.c -- a simple utility displaying the list of symbols exported by DLL * * usage: dllview filename */ #include <windows.h> #include <stdio.h> #include <winnt.h> #include <imagehlp.h> #include "util.h" int main(int argc, char *argv[]) { util_suppress_errmsg(); if (argc < 2) { fprintf(stderr, "usage: %s dllname\n", argv[0]); exit(1); } const char *dllname = argv[1]; LOADED_IMAGE img; if (MapAndLoad(dllname, NULL, &img, 1, 1) == FALSE) { fprintf(stderr, "cannot load DLL image\n"); exit(2); } IMAGE_EXPORT_DIRECTORY *dir; ULONG dirsize; dir = (IMAGE_EXPORT_DIRECTORY *)ImageDirectoryEntryToData( img.MappedAddress, 0 /* mapped as image */, IMAGE_DIRECTORY_ENTRY_EXPORT, &dirsize); if (dir == NULL) { fprintf(stderr, "cannot read image directory\n"); UnMapAndLoad(&img); exit(3); } DWORD *rva; rva = (DWORD *)ImageRvaToVa(img.FileHeader, img.MappedAddress, dir->AddressOfNames, NULL); for (DWORD i = 0; i < dir->NumberOfNames; i++) { char *name = (char *)ImageRvaToVa(img.FileHeader, img.MappedAddress, rva[i], NULL); printf("%s\n", name); } UnMapAndLoad(&img); return 0; }
1,233
20.649123
79
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/tools/cmpmap/cmpmap.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2019, Intel Corporation */ /* * cmpmap -- a tool for comparing files using mmap */ #include <stdlib.h> #include <stdio.h> #include <getopt.h> #include <sys/mman.h> #include <assert.h> #include <string.h> #include <errno.h> #include <unistd.h> #include "file.h" #include "fcntl.h" #include "mmap.h" #include "os.h" #include "util.h" #define CMPMAP_ZERO (1<<0) #define ADDR_SUM(vp, lp) ((void *)((char *)(vp) + (lp))) /* arguments */ static char *File1 = NULL; /* file1 name */ static char *File2 = NULL; /* file2 name */ static size_t Length = 0; /* number of bytes to read */ static os_off_t Offset = 0; /* offset from beginning of file */ static int Opts = 0; /* options flag */ /* * print_usage -- print short description of usage */ static void print_usage(void) { printf("Usage: cmpmap [options] file1 [file2]\n"); printf("Valid options:\n"); printf("-l, --length=N - compare up to N bytes\n"); printf("-o, --offset=N - skip N bytes at start of the files\n"); printf("-z, --zero - compare bytes of the file1 to NUL\n"); printf("-h, --help - print this usage info\n"); } /* * long_options -- command line options */ static const struct option long_options[] = { {"length", required_argument, NULL, 'l'}, {"offset", required_argument, NULL, 'o'}, {"zero", no_argument, NULL, 'z'}, {"help", no_argument, NULL, 'h'}, {NULL, 0, NULL, 0 }, }; /* * parse_args -- (internal) parse command line arguments */ static int parse_args(int argc, char *argv[]) { int opt; char *endptr; os_off_t off; ssize_t len; while ((opt = getopt_long(argc, argv, "l:o:zh", long_options, NULL)) != -1) { switch (opt) { case 'l': errno = 0; len = strtoll(optarg, &endptr, 0); if ((endptr && *endptr != '\0') || errno || len < 0) { fprintf(stderr, "'%s' -- invalid length", optarg); return -1; } Length = (size_t)len; break; case 'o': errno = 0; off = strtol(optarg, &endptr, 0); if ((endptr && *endptr != '\0') || errno || off < 0) { fprintf(stderr, "'%s' -- invalid offset", optarg); return -1; } Offset = off; break; case 'z': Opts |= CMPMAP_ZERO; break; case 'h': print_usage(); return 0; default: print_usage(); return -1; } } if (optind < argc) { File1 = argv[optind]; if (optind + 1 < argc) File2 = argv[optind + 1]; } else { print_usage(); return -1; } return 0; } /* * validate_args -- (internal) validate arguments */ static int validate_args(void) { if (File1 == NULL) { fprintf(stderr, "no file provided"); return -1; } else if (File2 == NULL && Length == 0) { fprintf(stderr, "length of the file has to be provided"); return -1; } return 0; } /* * do_cmpmap -- (internal) perform cmpmap */ static int do_cmpmap(void) { int ret = 0; int fd1; int fd2; size_t size1; size_t size2; /* open the first file */ if ((fd1 = os_open(File1, O_RDONLY)) < 0) { fprintf(stderr, "opening %s failed, errno %d\n", File1, errno); return -1; } ssize_t size_tmp = util_fd_get_size(fd1); if (size_tmp < 0) { fprintf(stderr, "getting size of %s failed, errno %d\n", File1, errno); ret = -1; goto out_close1; } size1 = (size_t)size_tmp; int flag = MAP_SHARED; if (Opts & CMPMAP_ZERO) { /* when checking if bytes are zeroed */ fd2 = -1; size2 = (size_t)Offset + Length; flag |= MAP_ANONYMOUS; } else if (File2 != NULL) { /* when comparing two files */ /* open the second file */ if ((fd2 = os_open(File2, O_RDONLY)) < 0) { fprintf(stderr, "opening %s failed, errno %d\n", File2, errno); ret = -1; goto out_close1; } size_tmp = util_fd_get_size(fd2); if (size_tmp < 0) { fprintf(stderr, "getting size of %s failed, errno %d\n", File2, errno); ret = -1; goto out_close2; } size2 = (size_t)size_tmp; /* basic check */ size_t min_size = (size1 < size2) ? size1 : size2; if ((size_t)Offset + Length > min_size) { if (size1 != size2) { fprintf(stdout, "%s %s differ in size: %zu" " %zu\n", File1, File2, size1, size2); ret = -1; goto out_close2; } else { Length = min_size - (size_t)Offset; } } } else { assert(0); } /* initialize utils */ util_init(); /* map the first file */ void *addr1; if ((addr1 = util_map(fd1, 0, size1, MAP_SHARED, 1, 0, NULL)) == MAP_FAILED) { fprintf(stderr, "mmap failed, file %s, length %zu, offset 0," " errno %d\n", File1, size1, errno); ret = -1; goto out_close2; } /* map the second file, or do anonymous mapping to get zeroed bytes */ void *addr2; if ((addr2 = util_map(fd2, 0, size2, flag, 1, 0, NULL)) == MAP_FAILED) { fprintf(stderr, "mmap failed, file %s, length %zu, errno %d\n", File2 ? File2 : "(anonymous)", size2, errno); ret = -1; goto out_unmap1; } /* compare bytes of memory */ if ((ret = memcmp(ADDR_SUM(addr1, Offset), ADDR_SUM(addr2, Offset), Length))) { if (Opts & CMPMAP_ZERO) fprintf(stderr, "%s is not zeroed\n", File1); else fprintf(stderr, "%s %s differ\n", File1, File2); ret = -1; } munmap(addr2, size2); out_unmap1: munmap(addr1, size1); out_close2: if (File2 != NULL) (void) os_close(fd2); out_close1: (void) os_close(fd1); return ret; } int main(int argc, char *argv[]) { #ifdef _WIN32 util_suppress_errmsg(); wchar_t **wargv = CommandLineToArgvW(GetCommandLineW(), &argc); for (int i = 0; i < argc; i++) { argv[i] = util_toUTF8(wargv[i]); if (argv[i] == NULL) { for (i--; i >= 0; i--) free(argv[i]); fprintf(stderr, "Error during arguments conversion\n"); return 1; } } #endif int ret = EXIT_FAILURE; if (parse_args(argc, argv)) goto end; if (validate_args()) goto end; if (do_cmpmap()) goto end; ret = EXIT_SUCCESS; end: #ifdef _WIN32 for (int i = argc; i > 0; i--) free(argv[i - 1]); #endif exit(ret); }
5,918
20.291367
73
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/tools/ctrld/signals_linux.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017, Intel Corporation */ /* * signals_linux.h - Signal definitions for Linux */ #ifndef _SIGNALS_LINUX_H #define _SIGNALS_LINUX_H 1 #define SIGNAL_2_STR(sig) [sig] = #sig static const char *signal2str[] = { SIGNAL_2_STR(SIGHUP), /* 1 */ SIGNAL_2_STR(SIGINT), /* 2 */ SIGNAL_2_STR(SIGQUIT), /* 3 */ SIGNAL_2_STR(SIGILL), /* 4 */ SIGNAL_2_STR(SIGTRAP), /* 5 */ SIGNAL_2_STR(SIGABRT), /* 6 */ SIGNAL_2_STR(SIGBUS), /* 7 */ SIGNAL_2_STR(SIGFPE), /* 8 */ SIGNAL_2_STR(SIGKILL), /* 9 */ SIGNAL_2_STR(SIGUSR1), /* 10 */ SIGNAL_2_STR(SIGSEGV), /* 11 */ SIGNAL_2_STR(SIGUSR2), /* 12 */ SIGNAL_2_STR(SIGPIPE), /* 13 */ SIGNAL_2_STR(SIGALRM), /* 14 */ SIGNAL_2_STR(SIGTERM), /* 15 */ SIGNAL_2_STR(SIGSTKFLT), /* 16 */ SIGNAL_2_STR(SIGCHLD), /* 17 */ SIGNAL_2_STR(SIGCONT), /* 18 */ SIGNAL_2_STR(SIGSTOP), /* 19 */ SIGNAL_2_STR(SIGTSTP), /* 20 */ SIGNAL_2_STR(SIGTTIN), /* 21 */ SIGNAL_2_STR(SIGTTOU), /* 22 */ SIGNAL_2_STR(SIGURG), /* 23 */ SIGNAL_2_STR(SIGXCPU), /* 24 */ SIGNAL_2_STR(SIGXFSZ), /* 25 */ SIGNAL_2_STR(SIGVTALRM), /* 26 */ SIGNAL_2_STR(SIGPROF), /* 27 */ SIGNAL_2_STR(SIGWINCH), /* 28 */ SIGNAL_2_STR(SIGPOLL), /* 29 */ SIGNAL_2_STR(SIGPWR), /* 30 */ SIGNAL_2_STR(SIGSYS) /* 31 */ }; #define SIGNALMAX SIGSYS #endif
1,322
27.148936
49
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/tools/ctrld/signals_freebsd.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017, Intel Corporation */ /* * signals_fbsd.h - Signal definitions for FreeBSD */ #ifndef _SIGNALS_FBSD_H #define _SIGNALS_FBSD_H 1 #define SIGNAL_2_STR(sig) [sig] = #sig static const char *signal2str[] = { SIGNAL_2_STR(SIGHUP), /* 1 */ SIGNAL_2_STR(SIGINT), /* 2 */ SIGNAL_2_STR(SIGQUIT), /* 3 */ SIGNAL_2_STR(SIGILL), /* 4 */ SIGNAL_2_STR(SIGTRAP), /* 5 */ SIGNAL_2_STR(SIGABRT), /* 6 */ SIGNAL_2_STR(SIGEMT), /* 7 */ SIGNAL_2_STR(SIGFPE), /* 8 */ SIGNAL_2_STR(SIGKILL), /* 9 */ SIGNAL_2_STR(SIGBUS), /* 10 */ SIGNAL_2_STR(SIGSEGV), /* 11 */ SIGNAL_2_STR(SIGSYS), /* 12 */ SIGNAL_2_STR(SIGPIPE), /* 13 */ SIGNAL_2_STR(SIGALRM), /* 14 */ SIGNAL_2_STR(SIGTERM), /* 15 */ SIGNAL_2_STR(SIGURG), /* 16 */ SIGNAL_2_STR(SIGSTOP), /* 17 */ SIGNAL_2_STR(SIGTSTP), /* 18 */ SIGNAL_2_STR(SIGCONT), /* 19 */ SIGNAL_2_STR(SIGCHLD), /* 20 */ SIGNAL_2_STR(SIGTTIN), /* 21 */ SIGNAL_2_STR(SIGTTOU), /* 22 */ SIGNAL_2_STR(SIGIO), /* 23 */ SIGNAL_2_STR(SIGXCPU), /* 24 */ SIGNAL_2_STR(SIGXFSZ), /* 25 */ SIGNAL_2_STR(SIGVTALRM), /* 26 */ SIGNAL_2_STR(SIGPROF), /* 27 */ SIGNAL_2_STR(SIGWINCH), /* 28 */ SIGNAL_2_STR(SIGINFO), /* 29 */ SIGNAL_2_STR(SIGUSR1), /* 30 */ SIGNAL_2_STR(SIGUSR2), /* 31 */ SIGNAL_2_STR(SIGTHR), /* 32 */ SIGNAL_2_STR(SIGLIBRT) /* 33 */ }; #define SIGNALMAX SIGLIBRT #endif
1,386
26.74
50
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_locks/obj_locks.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2020, Intel Corporation */ /* * obj_locks.c -- unit test for PMEMmutex, PMEMrwlock and PMEMcond */ #include <sys/param.h> #include <string.h> #include "unittest.h" #include "libpmemobj.h" #define LAYOUT_NAME "obj_locks" #define NUM_THREADS 16 #define MAX_FUNC 5 TOID_DECLARE(struct locks, 0); struct locks { PMEMobjpool *pop; PMEMmutex mtx; PMEMrwlock rwlk; PMEMcond cond; int data; }; struct thread_args { os_thread_t t; TOID(struct locks) lock; int t_id; }; typedef void *(*fn_lock)(void *arg); static struct thread_args threads[NUM_THREADS]; /* * do_mutex_lock -- lock and unlock the mutex */ static void * do_mutex_lock(void *arg) { struct thread_args *t = (struct thread_args *)arg; struct locks *lock = D_RW(t->lock); pmemobj_mutex_lock(lock->pop, &lock->mtx); lock->data++; pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data)); pmemobj_mutex_unlock(lock->pop, &lock->mtx); return NULL; } /* * do_rwlock_wrlock -- lock and unlock the write rwlock */ static void * do_rwlock_wrlock(void *arg) { struct thread_args *t = (struct thread_args *)arg; struct locks *lock = D_RW(t->lock); pmemobj_rwlock_wrlock(lock->pop, &lock->rwlk); lock->data++; pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data)); pmemobj_rwlock_unlock(lock->pop, &lock->rwlk); return NULL; } /* * do_rwlock_rdlock -- lock and unlock the read rwlock */ static void * do_rwlock_rdlock(void *arg) { struct thread_args *t = (struct thread_args *)arg; struct locks *lock = D_RW(t->lock); pmemobj_rwlock_rdlock(lock->pop, &lock->rwlk); pmemobj_rwlock_unlock(lock->pop, &lock->rwlk); return NULL; } /* * do_cond_signal -- lock block on a condition variables, * and unlock them by signal */ static void * do_cond_signal(void *arg) { struct thread_args *t = (struct thread_args *)arg; struct locks *lock = D_RW(t->lock); if (t->t_id == 0) { pmemobj_mutex_lock(lock->pop, &lock->mtx); while (lock->data < (NUM_THREADS - 1)) pmemobj_cond_wait(lock->pop, &lock->cond, &lock->mtx); lock->data++; pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data)); pmemobj_mutex_unlock(lock->pop, &lock->mtx); } else { pmemobj_mutex_lock(lock->pop, &lock->mtx); lock->data++; pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data)); pmemobj_cond_signal(lock->pop, &lock->cond); pmemobj_mutex_unlock(lock->pop, &lock->mtx); } return NULL; } /* * do_cond_broadcast -- lock block on a condition variables and unlock * by broadcasting */ static void * do_cond_broadcast(void *arg) { struct thread_args *t = (struct thread_args *)arg; struct locks *lock = D_RW(t->lock); if (t->t_id < (NUM_THREADS / 2)) { pmemobj_mutex_lock(lock->pop, &lock->mtx); while (lock->data < (NUM_THREADS / 2)) pmemobj_cond_wait(lock->pop, &lock->cond, &lock->mtx); lock->data++; pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data)); pmemobj_mutex_unlock(lock->pop, &lock->mtx); } else { pmemobj_mutex_lock(lock->pop, &lock->mtx); lock->data++; pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data)); pmemobj_cond_broadcast(lock->pop, &lock->cond); pmemobj_mutex_unlock(lock->pop, &lock->mtx); } return NULL; } static fn_lock do_lock[MAX_FUNC] = {do_mutex_lock, do_rwlock_wrlock, do_rwlock_rdlock, do_cond_signal, do_cond_broadcast}; /* * do_lock_init -- initialize all types of locks */ static void do_lock_init(struct locks *lock) { pmemobj_mutex_zero(lock->pop, &lock->mtx); pmemobj_rwlock_zero(lock->pop, &lock->rwlk); pmemobj_cond_zero(lock->pop, &lock->cond); } /* * do_lock_mt -- perform multithread lock operations */ static void do_lock_mt(TOID(struct locks) lock, unsigned f_num) { D_RW(lock)->data = 0; for (int i = 0; i < NUM_THREADS; ++i) { threads[i].lock = lock; threads[i].t_id = i; THREAD_CREATE(&threads[i].t, NULL, do_lock[f_num], &threads[i]); } for (int i = 0; i < NUM_THREADS; ++i) THREAD_JOIN(&threads[i].t, NULL); /* * If all threads passed function properly and used every lock, there * should be every element in data array incremented exactly one time * by every thread. */ UT_ASSERT((D_RO(lock)->data == NUM_THREADS) || (D_RO(lock)->data == 0)); } int main(int argc, char *argv[]) { START(argc, argv, "obj_locks"); if (argc != 2) UT_FATAL("usage: %s [file]", argv[0]); PMEMobjpool *pop; if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create"); TOID(struct locks) lock; POBJ_ALLOC(pop, &lock, struct locks, sizeof(struct locks), NULL, NULL); D_RW(lock)->pop = pop; do_lock_init(D_RW(lock)); for (unsigned i = 0; i < MAX_FUNC; i++) do_lock_mt(lock, i); POBJ_FREE(&lock); pmemobj_close(pop); DONE(NULL); }
4,821
22.99005
72
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/libpmempool_feature/libpmempool_feature.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * libpmempool_feature -- pmempool_feature_(enable|disable|query) test * */ #include <stddef.h> #include <stdlib.h> #include <stdio.h> #include "libpmempool.h" #include "pool_hdr.h" #include "unittest.h" #define EMPTY_FLAGS 0 /* * print_usage -- print usage of program */ static void print_usage(const char *name) { UT_OUT("usage: %s <pool_path> (e|d|q) <feature-name>", name); UT_OUT("feature-name: SINGLEHDR, CKSUM_2K, SHUTDOWN_STATE"); } /* * str2pmempool_feature -- convert feature name to pmempool_feature enum */ static enum pmempool_feature str2pmempool_feature(const char *app, const char *str) { uint32_t fval = util_str2pmempool_feature(str); if (fval == UINT32_MAX) { print_usage(app); UT_FATAL("unknown feature: %s", str); } return (enum pmempool_feature)fval; } int main(int argc, char *argv[]) { START(argc, argv, "libpmempool_feature"); if (argc < 4) { print_usage(argv[0]); UT_FATAL("insufficient number of arguments: %d", argc - 1); } const char *path = argv[1]; char cmd = argv[2][0]; enum pmempool_feature feature = str2pmempool_feature(argv[0], argv[3]); int ret; switch (cmd) { case 'e': return pmempool_feature_enable(path, feature, EMPTY_FLAGS); case 'd': return pmempool_feature_disable(path, feature, EMPTY_FLAGS); case 'q': ret = pmempool_feature_query(path, feature, EMPTY_FLAGS); if (ret < 0) return 1; UT_OUT("query %s result is %d", argv[3], ret); return 0; default: print_usage(argv[0]); UT_FATAL("unknown command: %c", cmd); } DONE(NULL); }
1,622
20.077922
72
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_tx_flow/obj_tx_flow.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * obj_tx_flow.c -- unit test for transaction flow */ #include "unittest.h" #include "obj.h" #define LAYOUT_NAME "direct" #define TEST_VALUE_A 5 #define TEST_VALUE_B 10 #define TEST_VALUE_C 15 #define OPS_NUM 9 TOID_DECLARE(struct test_obj, 1); struct test_obj { int a; int b; int c; }; static void do_tx_macro_commit(PMEMobjpool *pop, TOID(struct test_obj) *obj) { TX_BEGIN(pop) { D_RW(*obj)->a = TEST_VALUE_A; } TX_ONCOMMIT { UT_ASSERT(D_RW(*obj)->a == TEST_VALUE_A); D_RW(*obj)->b = TEST_VALUE_B; } TX_ONABORT { /* not called */ D_RW(*obj)->a = TEST_VALUE_B; } TX_FINALLY { UT_ASSERT(D_RW(*obj)->b == TEST_VALUE_B); D_RW(*obj)->c = TEST_VALUE_C; } TX_END } static void do_tx_macro_abort(PMEMobjpool *pop, TOID(struct test_obj) *obj) { D_RW(*obj)->a = TEST_VALUE_A; D_RW(*obj)->b = TEST_VALUE_B; TX_BEGIN(pop) { TX_ADD(*obj); D_RW(*obj)->a = TEST_VALUE_B; pmemobj_tx_abort(EINVAL); D_RW(*obj)->b = TEST_VALUE_A; } TX_ONCOMMIT { /* not called */ D_RW(*obj)->a = TEST_VALUE_B; } TX_ONABORT { UT_ASSERT(D_RW(*obj)->a == TEST_VALUE_A); UT_ASSERT(D_RW(*obj)->b == TEST_VALUE_B); D_RW(*obj)->b = TEST_VALUE_B; } TX_FINALLY { UT_ASSERT(D_RW(*obj)->b == TEST_VALUE_B); D_RW(*obj)->c = TEST_VALUE_C; } TX_END } static void do_tx_macro_commit_nested(PMEMobjpool *pop, TOID(struct test_obj) *obj) { TX_BEGIN(pop) { TX_BEGIN(pop) { D_RW(*obj)->a = TEST_VALUE_A; } TX_ONCOMMIT { UT_ASSERT(D_RW(*obj)->a == TEST_VALUE_A); D_RW(*obj)->b = TEST_VALUE_B; } TX_END } TX_ONCOMMIT { D_RW(*obj)->c = TEST_VALUE_C; } TX_END } static void do_tx_macro_abort_nested(PMEMobjpool *pop, TOID(struct test_obj) *obj) { volatile int a = 0; volatile int b = 0; volatile int c = 0; D_RW(*obj)->a = TEST_VALUE_A; D_RW(*obj)->b = TEST_VALUE_B; TX_BEGIN(pop) { TX_ADD(*obj); D_RW(*obj)->a = TEST_VALUE_B; a = TEST_VALUE_C; TX_BEGIN(pop) { D_RW(*obj)->b = TEST_VALUE_C; a = TEST_VALUE_A; pmemobj_tx_abort(EINVAL); a = TEST_VALUE_B; } TX_ONCOMMIT { /* not called */ a = TEST_VALUE_C; } TX_ONABORT { UT_ASSERT(a == TEST_VALUE_A); b = TEST_VALUE_B; } TX_FINALLY { UT_ASSERT(b == TEST_VALUE_B); c = TEST_VALUE_C; } TX_END a = TEST_VALUE_B; } TX_ONCOMMIT { /* not called */ UT_ASSERT(a == TEST_VALUE_A); c = TEST_VALUE_C; } TX_ONABORT { UT_ASSERT(a == TEST_VALUE_A); UT_ASSERT(b == TEST_VALUE_B); UT_ASSERT(c == TEST_VALUE_C); b = TEST_VALUE_A; } TX_FINALLY { UT_ASSERT(b == TEST_VALUE_A); D_RW(*obj)->c = TEST_VALUE_C; a = TEST_VALUE_B; } TX_END UT_ASSERT(a == TEST_VALUE_B); } static void do_tx_macro_abort_nested_begin(PMEMobjpool *pop, TOID(struct test_obj) *obj) { errno = 0; TX_BEGIN(pop) { D_RW(*obj)->a = TEST_VALUE_A; D_RW(*obj)->b = TEST_VALUE_B; pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN); TX_BEGIN((PMEMobjpool *)(uintptr_t)7) { } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERT(errno == EINVAL); } TX_ONABORT { D_RW(*obj)->c = TEST_VALUE_C; } TX_ONCOMMIT { /* not called */ D_RW(*obj)->a = TEST_VALUE_B; } TX_END } static void do_tx_commit(PMEMobjpool *pop, TOID(struct test_obj) *obj) { pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE); D_RW(*obj)->a = TEST_VALUE_A; TX_ADD(*obj); D_RW(*obj)->b = TEST_VALUE_B; pmemobj_tx_commit(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONCOMMIT); D_RW(*obj)->c = TEST_VALUE_C; pmemobj_tx_end(); } static void do_tx_commit_nested(PMEMobjpool *pop, TOID(struct test_obj) *obj) { pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE); TX_ADD(*obj); D_RW(*obj)->a = TEST_VALUE_A; pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE); TX_ADD(*obj); D_RW(*obj)->b = TEST_VALUE_B; pmemobj_tx_commit(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONCOMMIT); pmemobj_tx_end(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_WORK); pmemobj_tx_commit(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONCOMMIT); D_RW(*obj)->c = TEST_VALUE_C; pmemobj_tx_end(); } static void do_tx_abort(PMEMobjpool *pop, TOID(struct test_obj) *obj) { D_RW(*obj)->a = TEST_VALUE_A; pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE); D_RW(*obj)->b = TEST_VALUE_B; TX_ADD(*obj); D_RW(*obj)->a = 0; pmemobj_tx_abort(EINVAL); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONABORT); D_RW(*obj)->c = TEST_VALUE_C; pmemobj_tx_end(); } static void do_tx_abort_nested(PMEMobjpool *pop, TOID(struct test_obj) *obj) { D_RW(*obj)->a = TEST_VALUE_A; D_RW(*obj)->b = TEST_VALUE_B; pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE); TX_ADD(*obj); D_RW(*obj)->a = 0; pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE); TX_ADD(*obj); D_RW(*obj)->b = 0; pmemobj_tx_abort(EINVAL); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONABORT); pmemobj_tx_end(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONABORT); D_RW(*obj)->c = TEST_VALUE_C; pmemobj_tx_end(); } typedef void (*fn_op)(PMEMobjpool *pop, TOID(struct test_obj) *obj); static fn_op tx_op[OPS_NUM] = {do_tx_macro_commit, do_tx_macro_abort, do_tx_macro_commit_nested, do_tx_macro_abort_nested, do_tx_macro_abort_nested_begin, do_tx_commit, do_tx_commit_nested, do_tx_abort, do_tx_abort_nested}; static void do_tx_process(PMEMobjpool *pop) { pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_WORK); pmemobj_tx_process(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONCOMMIT); pmemobj_tx_process(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_FINALLY); pmemobj_tx_process(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_NONE); pmemobj_tx_end(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_NONE); } static void do_tx_process_nested(PMEMobjpool *pop) { pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_WORK); pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE); pmemobj_tx_process(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONCOMMIT); pmemobj_tx_process(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_FINALLY); pmemobj_tx_end(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_WORK); pmemobj_tx_abort(EINVAL); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONABORT); pmemobj_tx_process(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_FINALLY); pmemobj_tx_process(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_NONE); pmemobj_tx_end(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_NONE); } static void do_fault_injection(PMEMobjpool *pop) { if (!pmemobj_fault_injection_enabled()) return; pmemobj_inject_fault_at(PMEM_MALLOC, 1, "pmemobj_tx_begin"); int ret = pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE); UT_ASSERTne(ret, 0); UT_ASSERTeq(errno, ENOMEM); } int main(int argc, char *argv[]) { START(argc, argv, "obj_tx_flow"); if (argc != 3) UT_FATAL("usage: %s [file]", argv[0]); PMEMobjpool *pop; if ((pop = pmemobj_create(argv[2], LAYOUT_NAME, PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create"); TOID(struct test_obj) obj; POBJ_ZNEW(pop, &obj, struct test_obj); for (int i = 0; i < OPS_NUM; i++) { D_RW(obj)->a = 0; D_RW(obj)->b = 0; D_RW(obj)->c = 0; tx_op[i](pop, &obj); UT_ASSERT(D_RO(obj)->a == TEST_VALUE_A); UT_ASSERT(D_RO(obj)->b == TEST_VALUE_B); UT_ASSERT(D_RO(obj)->c == TEST_VALUE_C); } switch (argv[1][0]) { case 't': do_tx_process(pop); do_tx_process_nested(pop); break; case 'f': do_fault_injection(pop); break; default: UT_FATAL("usage: %s [t|f]", argv[0]); } pmemobj_close(pop); DONE(NULL); }
7,445
23.574257
76
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/util_pool_hdr/util_pool_hdr.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2020, Intel Corporation */ /* * util_pool_hdr.c -- unit test for pool_hdr layout and default values * * This test should be modified after every layout change. It's here to prevent * any accidental layout changes. */ #include "util.h" #include "unittest.h" #include "set.h" #include "pool_hdr.h" #define POOL_HDR_SIG_LEN_V1 (8) #define POOL_HDR_UNUSED_LEN_V1 (1904) #define POOL_HDR_UNUSED2_LEN_V1 (1976) #define POOL_HDR_2K_CHECKPOINT (2048UL) #define FEATURES_T_SIZE_V1 (12) #define ARCH_FLAGS_SIZE_V1 (16) #define ARCH_FLAGS_RESERVED_LEN_V1 (4) #define SHUTDOWN_STATE_SIZE_V1 (64) #define SHUTDOWN_STATE_RESERVED_LEN_V1 (39) /* * test_layout -- test pool_hdr layout */ static void test_layout() { ASSERT_ALIGNED_BEGIN(struct pool_hdr); ASSERT_ALIGNED_FIELD(struct pool_hdr, signature); ASSERT_FIELD_SIZE(signature, POOL_HDR_SIG_LEN_V1); ASSERT_ALIGNED_FIELD(struct pool_hdr, major); ASSERT_ALIGNED_FIELD(struct pool_hdr, features); ASSERT_ALIGNED_FIELD(struct pool_hdr, poolset_uuid); ASSERT_ALIGNED_FIELD(struct pool_hdr, uuid); ASSERT_ALIGNED_FIELD(struct pool_hdr, prev_part_uuid); ASSERT_ALIGNED_FIELD(struct pool_hdr, next_part_uuid); ASSERT_ALIGNED_FIELD(struct pool_hdr, prev_repl_uuid); ASSERT_ALIGNED_FIELD(struct pool_hdr, next_repl_uuid); ASSERT_ALIGNED_FIELD(struct pool_hdr, crtime); ASSERT_ALIGNED_FIELD(struct pool_hdr, arch_flags); ASSERT_ALIGNED_FIELD(struct pool_hdr, unused); ASSERT_FIELD_SIZE(unused, POOL_HDR_UNUSED_LEN_V1); ASSERT_OFFSET_CHECKPOINT(struct pool_hdr, POOL_HDR_2K_CHECKPOINT); ASSERT_ALIGNED_FIELD(struct pool_hdr, unused2); ASSERT_FIELD_SIZE(unused2, POOL_HDR_UNUSED2_LEN_V1); ASSERT_ALIGNED_FIELD(struct pool_hdr, sds); ASSERT_ALIGNED_FIELD(struct pool_hdr, checksum); #if PMEM_PAGESIZE > 4096 ASSERT_ALIGNED_FIELD(struct pool_hdr, align_pad); #endif ASSERT_ALIGNED_CHECK(struct pool_hdr); ASSERT_ALIGNED_BEGIN(features_t); ASSERT_ALIGNED_FIELD(features_t, compat); ASSERT_ALIGNED_FIELD(features_t, incompat); ASSERT_ALIGNED_FIELD(features_t, ro_compat); ASSERT_ALIGNED_CHECK(features_t); UT_COMPILE_ERROR_ON(sizeof(features_t) != FEATURES_T_SIZE_V1); ASSERT_ALIGNED_BEGIN(struct arch_flags); ASSERT_ALIGNED_FIELD(struct arch_flags, alignment_desc); ASSERT_ALIGNED_FIELD(struct arch_flags, machine_class); ASSERT_ALIGNED_FIELD(struct arch_flags, data); ASSERT_ALIGNED_FIELD(struct arch_flags, reserved); ASSERT_FIELD_SIZE(reserved, ARCH_FLAGS_RESERVED_LEN_V1); ASSERT_ALIGNED_FIELD(struct arch_flags, machine); ASSERT_ALIGNED_CHECK(struct arch_flags); UT_COMPILE_ERROR_ON(sizeof(struct arch_flags) != ARCH_FLAGS_SIZE_V1); ASSERT_ALIGNED_BEGIN(struct shutdown_state); ASSERT_ALIGNED_FIELD(struct shutdown_state, usc); ASSERT_ALIGNED_FIELD(struct shutdown_state, uuid); ASSERT_ALIGNED_FIELD(struct shutdown_state, dirty); ASSERT_ALIGNED_FIELD(struct shutdown_state, reserved); ASSERT_FIELD_SIZE(reserved, SHUTDOWN_STATE_RESERVED_LEN_V1); ASSERT_ALIGNED_FIELD(struct shutdown_state, checksum); ASSERT_ALIGNED_CHECK(struct shutdown_state); UT_COMPILE_ERROR_ON(sizeof(struct shutdown_state) != SHUTDOWN_STATE_SIZE_V1); } /* incompat features - final values */ #define POOL_FEAT_SINGLEHDR_FINAL 0x0001U #define POOL_FEAT_CKSUM_2K_FINAL 0x0002U #define POOL_FEAT_SDS_FINAL 0x0004U /* incompat features effective values */ #if defined(_WIN32) || NDCTL_ENABLED #ifdef SDS_ENABLED #define POOL_E_FEAT_SDS_FINAL POOL_FEAT_SDS_FINAL #else #define POOL_E_FEAT_SDS_FINAL 0x0000U /* empty */ #endif #else /* * shutdown state support on Linux requires root access on kernel < 4.20 with * ndctl < 63 so it is disabled by default */ #define POOL_E_FEAT_SDS_FINAL 0x0000U /* empty */ #endif #define POOL_FEAT_INCOMPAT_DEFAULT_V1 \ (POOL_FEAT_CKSUM_2K_FINAL | POOL_E_FEAT_SDS_FINAL) #ifdef _WIN32 #define SDS_AT_CREATE_EXPECTED 1 #else #define SDS_AT_CREATE_EXPECTED 0 #endif /* * test_default_values -- test default values */ static void test_default_values() { UT_COMPILE_ERROR_ON(POOL_FEAT_SINGLEHDR != POOL_FEAT_SINGLEHDR_FINAL); UT_COMPILE_ERROR_ON(POOL_FEAT_CKSUM_2K != POOL_FEAT_CKSUM_2K_FINAL); UT_COMPILE_ERROR_ON(POOL_FEAT_SDS != POOL_FEAT_SDS_FINAL); UT_COMPILE_ERROR_ON(SDS_at_create != SDS_AT_CREATE_EXPECTED); UT_COMPILE_ERROR_ON(POOL_FEAT_INCOMPAT_DEFAULT != POOL_FEAT_INCOMPAT_DEFAULT_V1); } int main(int argc, char *argv[]) { START(argc, argv, "util_pool_hdr"); test_layout(); test_default_values(); DONE(NULL); }
4,508
30.531469
79
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/util_map_proc/util_map_proc.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2018, Intel Corporation */ /* * util_map_proc.c -- unit test for util_map() /proc parsing * * usage: util_map_proc maps_file len [len]... */ #define _GNU_SOURCE #include <dlfcn.h> #include "unittest.h" #include "util.h" #include "mmap.h" #define GIGABYTE ((uintptr_t)1 << 30) #define TERABYTE ((uintptr_t)1 << 40) int main(int argc, char *argv[]) { START(argc, argv, "util_map_proc"); util_init(); util_mmap_init(); if (argc < 3) UT_FATAL("usage: %s maps_file len [len]...", argv[0]); Mmap_mapfile = argv[1]; UT_OUT("redirecting " OS_MAPFILE " to %s", Mmap_mapfile); for (int arg = 2; arg < argc; arg++) { size_t len = (size_t)strtoull(argv[arg], NULL, 0); size_t align = 2 * MEGABYTE; if (len >= 2 * GIGABYTE) align = GIGABYTE; void *h1 = util_map_hint_unused((void *)TERABYTE, len, GIGABYTE); void *h2 = util_map_hint(len, 0); if (h1 != MAP_FAILED && h1 != NULL) UT_ASSERTeq((uintptr_t)h1 & (GIGABYTE - 1), 0); if (h2 != MAP_FAILED && h2 != NULL) UT_ASSERTeq((uintptr_t)h2 & (align - 1), 0); if (h1 == NULL) /* XXX portability */ UT_OUT("len %zu: (nil) %p", len, h2); else if (h2 == NULL) UT_OUT("len %zu: %p (nil)", len, h1); else UT_OUT("len %zu: %p %p", len, h1, h2); } util_mmap_fini(); DONE(NULL); }
1,335
21.644068
60
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/win_lists/win_lists.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * win_lists.c -- test list routines used in windows implementation */ #include "unittest.h" #include "queue.h" typedef struct TEST_LIST_NODE { PMDK_LIST_ENTRY(TEST_LIST_NODE) ListEntry; int dummy; } *PTEST_LIST_NODE; PMDK_LIST_HEAD(TestList, TEST_LIST_NODE); static void dump_list(struct TestList *head) { PTEST_LIST_NODE pNode = NULL; pNode = (PTEST_LIST_NODE)PMDK_LIST_FIRST(head); while (pNode != NULL) { UT_OUT("Node value: %d", pNode->dummy); pNode = (PTEST_LIST_NODE)PMDK_LIST_NEXT(pNode, ListEntry); } } static int get_list_count(struct TestList *head) { PTEST_LIST_NODE pNode = NULL; int listCount = 0; pNode = (PTEST_LIST_NODE)PMDK_LIST_FIRST(head); while (pNode != NULL) { listCount++; pNode = (PTEST_LIST_NODE)PMDK_LIST_NEXT(pNode, ListEntry); } return listCount; } /* * test_list - Do some basic list manipulations and output to log for * script comparison. Only testing the macros we use. */ static void test_list(void) { PTEST_LIST_NODE pNode = NULL; struct TestList head = PMDK_LIST_HEAD_INITIALIZER(head); PMDK_LIST_INIT(&head); UT_ASSERT_rt(PMDK_LIST_EMPTY(&head)); pNode = MALLOC(sizeof(struct TEST_LIST_NODE)); pNode->dummy = 0; PMDK_LIST_INSERT_HEAD(&head, pNode, ListEntry); UT_ASSERTeq_rt(1, get_list_count(&head)); dump_list(&head); /* Remove one node */ PMDK_LIST_REMOVE(pNode, ListEntry); UT_ASSERTeq_rt(0, get_list_count(&head)); dump_list(&head); free(pNode); /* Add a bunch of nodes */ for (int i = 1; i < 10; i++) { pNode = MALLOC(sizeof(struct TEST_LIST_NODE)); pNode->dummy = i; PMDK_LIST_INSERT_HEAD(&head, pNode, ListEntry); } UT_ASSERTeq_rt(9, get_list_count(&head)); dump_list(&head); /* Remove all of them */ while (!PMDK_LIST_EMPTY(&head)) { pNode = (PTEST_LIST_NODE)PMDK_LIST_FIRST(&head); PMDK_LIST_REMOVE(pNode, ListEntry); free(pNode); } UT_ASSERTeq_rt(0, get_list_count(&head)); dump_list(&head); } typedef struct TEST_SORTEDQ_NODE { PMDK_SORTEDQ_ENTRY(TEST_SORTEDQ_NODE) queue_link; int dummy; } TEST_SORTEDQ_NODE, *PTEST_SORTEDQ_NODE; PMDK_SORTEDQ_HEAD(TEST_SORTEDQ, TEST_SORTEDQ_NODE); static int sortedq_node_comparer(TEST_SORTEDQ_NODE *a, TEST_SORTEDQ_NODE *b) { return a->dummy - b->dummy; } struct TEST_DATA_SORTEDQ { int count; int data[10]; }; /* * test_sortedq - Do some basic operations on SORTEDQ and make sure that the * queue is sorted for different input sequences. */ void test_sortedq(void) { PTEST_SORTEDQ_NODE node = NULL; struct TEST_SORTEDQ head = PMDK_SORTEDQ_HEAD_INITIALIZER(head); struct TEST_DATA_SORTEDQ test_data[] = { {5, {5, 7, 9, 100, 101}}, {7, {1, 2, 3, 4, 5, 6, 7}}, {5, {100, 90, 80, 70, 40}}, {6, {10, 9, 8, 7, 6, 5}}, {5, {23, 13, 27, 4, 15}}, {5, {2, 2, 2, 2, 2}} }; PMDK_SORTEDQ_INIT(&head); UT_ASSERT_rt(PMDK_SORTEDQ_EMPTY(&head)); for (int i = 0; i < _countof(test_data); i++) { for (int j = 0; j < test_data[i].count; j++) { node = MALLOC(sizeof(TEST_SORTEDQ_NODE)); node->dummy = test_data[i].data[j]; PMDK_SORTEDQ_INSERT(&head, node, queue_link, TEST_SORTEDQ_NODE, sortedq_node_comparer); } int prev = MININT; int num_entries = 0; PMDK_SORTEDQ_FOREACH(node, &head, queue_link) { UT_ASSERT(prev <= node->dummy); num_entries++; } UT_ASSERT(num_entries == test_data[i].count); while (!PMDK_SORTEDQ_EMPTY(&head)) { node = PMDK_SORTEDQ_FIRST(&head); PMDK_SORTEDQ_REMOVE(&head, node, queue_link); FREE(node); } } } int main(int argc, char *argv[]) { START(argc, argv, "win_lists - testing %s", (argc > 1) ? argv[1] : "list"); if (argc == 1 || (stricmp(argv[1], "list") == 0)) test_list(); if (argc > 1 && (stricmp(argv[1], "sortedq") == 0)) test_sortedq(); DONE(NULL); }
5,431
27
76
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_pool/obj_pool.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * obj_pool.c -- unit test for pmemobj_create() and pmemobj_open() * Also tests pmemobj_(set/get)_user_data(). * * usage: obj_pool op path layout [poolsize mode] * * op can be: * c - create * o - open * * "poolsize" and "mode" arguments are ignored for "open" */ #include "unittest.h" #include "../libpmemobj/obj.h" #define MB ((size_t)1 << 20) #define USER_DATA_V (void *) 123456789ULL static void pool_create(const char *path, const char *layout, size_t poolsize, unsigned mode) { PMEMobjpool *pop = pmemobj_create(path, layout, poolsize, mode); if (pop == NULL) UT_OUT("!%s: pmemobj_create: %s", path, pmemobj_errormsg()); else { /* Test pmemobj_(get/set)_user data */ UT_ASSERTeq(NULL, pmemobj_get_user_data(pop)); pmemobj_set_user_data(pop, USER_DATA_V); UT_ASSERTeq(USER_DATA_V, pmemobj_get_user_data(pop)); os_stat_t stbuf; STAT(path, &stbuf); UT_OUT("%s: file size %zu mode 0%o", path, stbuf.st_size, stbuf.st_mode & 0777); pmemobj_close(pop); int result = pmemobj_check(path, layout); if (result < 0) UT_OUT("!%s: pmemobj_check", path); else if (result == 0) UT_OUT("%s: pmemobj_check: not consistent", path); } } static void pool_open(const char *path, const char *layout) { PMEMobjpool *pop = pmemobj_open(path, layout); if (pop == NULL) UT_OUT("!%s: pmemobj_open: %s", path, pmemobj_errormsg()); else { UT_OUT("%s: pmemobj_open: Success", path); UT_ASSERTeq(NULL, pmemobj_get_user_data(pop)); pmemobj_close(pop); } } static void test_fault_injection(const char *path, const char *layout, size_t poolsize, unsigned mode) { if (!pmemobj_fault_injection_enabled()) return; pmemobj_inject_fault_at(PMEM_MALLOC, 1, "tx_params_new"); PMEMobjpool *pop = pmemobj_create(path, layout, poolsize, mode); UT_ASSERTeq(pop, NULL); UT_ASSERTeq(errno, ENOMEM); } int main(int argc, char *argv[]) { START(argc, argv, "obj_pool"); if (argc < 4) UT_FATAL("usage: %s op path layout [poolsize mode]", argv[0]); char *layout = NULL; size_t poolsize; unsigned mode; if (strcmp(argv[3], "EMPTY") == 0) layout = ""; else if (strcmp(argv[3], "NULL") != 0) layout = argv[3]; switch (argv[1][0]) { case 'c': poolsize = strtoull(argv[4], NULL, 0) * MB; /* in megabytes */ mode = strtoul(argv[5], NULL, 8); pool_create(argv[2], layout, poolsize, mode); break; case 'o': pool_open(argv[2], layout); break; case 'f': os_setenv("PMEMOBJ_CONF", "invalid-query", 1); pool_open(argv[2], layout); os_unsetenv("PMEMOBJ_CONF"); pool_open(argv[2], layout); break; case 't': poolsize = strtoull(argv[4], NULL, 0) * MB; /* in megabytes */ mode = strtoul(argv[5], NULL, 8); test_fault_injection(argv[2], layout, poolsize, mode); break; default: UT_FATAL("unknown operation"); } DONE(NULL); }
2,905
21.527132
75
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_memset/pmem2_memset.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * pmem_memset.c -- unit test for doing a memset * * usage: pmem_memset file offset length */ #include "unittest.h" #include "file.h" #include "ut_pmem2.h" #include "memset_common.h" static void do_memset_variants(int fd, char *dest, const char *file_name, size_t dest_off, size_t bytes, persist_fn p, memset_fn fn) { for (int i = 0; i < ARRAY_SIZE(Flags); ++i) { do_memset(fd, dest, file_name, dest_off, bytes, fn, Flags[i], p); if (Flags[i] & PMEMOBJ_F_MEM_NOFLUSH) p(dest, bytes); } } int main(int argc, char *argv[]) { int fd; char *dest; struct pmem2_config *cfg; struct pmem2_source *src; struct pmem2_map *map; if (argc != 4) UT_FATAL("usage: %s file offset length", argv[0]); const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD"); const char *avx = os_getenv("PMEM_AVX"); const char *avx512f = os_getenv("PMEM_AVX512F"); START(argc, argv, "pmem2_memset %s %s %s %savx %savx512f", argv[2], argv[3], thr ? thr : "default", avx ? "" : "!", avx512f ? "" : "!"); fd = OPEN(argv[1], O_RDWR); PMEM2_CONFIG_NEW(&cfg); PMEM2_SOURCE_FROM_FD(&src, fd); PMEM2_CONFIG_SET_GRANULARITY(cfg, PMEM2_GRANULARITY_PAGE); int ret = pmem2_map(cfg, src, &map); UT_PMEM2_EXPECT_RETURN(ret, 0); PMEM2_CONFIG_DELETE(&cfg); dest = pmem2_map_get_address(map); if (dest == NULL) UT_FATAL("!could not map file: %s", argv[1]); size_t dest_off = strtoul(argv[2], NULL, 0); size_t bytes = strtoul(argv[3], NULL, 0); pmem2_persist_fn persist = pmem2_get_persist_fn(map); pmem2_memset_fn memset_fn = pmem2_get_memset_fn(map); do_memset_variants(fd, dest, argv[1], dest_off, bytes, persist, memset_fn); ret = pmem2_unmap(&map); UT_ASSERTeq(ret, 0); CLOSE(fd); DONE(NULL); }
1,810
21.6375
78
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_memset/memset_common.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * memset_common.c -- common part for tests doing a persistent memset */ #include "unittest.h" #include "memset_common.h" /* * do_memset - worker function for memset */ void do_memset(int fd, char *dest, const char *file_name, size_t dest_off, size_t bytes, memset_fn fn, unsigned flags, persist_fn persist) { char *buf = MALLOC(bytes); char *dest1; char *ret; memset(dest, 0, bytes); persist(dest, bytes); dest1 = MALLOC(bytes); memset(dest1, 0, bytes); /* * This is used to verify that the value of what a non persistent * memset matches the outcome of the persistent memset. The * persistent memset will match the file but may not be the * correct or expected value. */ memset(dest1 + dest_off, 0x5A, bytes / 4); memset(dest1 + dest_off + (bytes / 4), 0x46, bytes / 4); /* Test the corner cases */ ret = fn(dest + dest_off, 0x5A, 0, flags); UT_ASSERTeq(ret, dest + dest_off); UT_ASSERTeq(*(char *)(dest + dest_off), 0); /* * Do the actual memset with persistence. */ ret = fn(dest + dest_off, 0x5A, bytes / 4, flags); UT_ASSERTeq(ret, dest + dest_off); ret = fn(dest + dest_off + (bytes / 4), 0x46, bytes / 4, flags); UT_ASSERTeq(ret, dest + dest_off + (bytes / 4)); if (memcmp(dest, dest1, bytes / 2)) UT_FATAL("%s: first %zu bytes do not match", file_name, bytes / 2); LSEEK(fd, 0, SEEK_SET); if (READ(fd, buf, bytes / 2) == bytes / 2) { if (memcmp(buf, dest, bytes / 2)) UT_FATAL("%s: first %zu bytes do not match", file_name, bytes / 2); } FREE(dest1); FREE(buf); } unsigned Flags[] = { 0, PMEM_F_MEM_NODRAIN, PMEM_F_MEM_NONTEMPORAL, PMEM_F_MEM_TEMPORAL, PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL, PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_NODRAIN, PMEM_F_MEM_WC, PMEM_F_MEM_WB, PMEM_F_MEM_NOFLUSH, /* all possible flags */ PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH | PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL | PMEM_F_MEM_WC | PMEM_F_MEM_WB, };
2,043
24.55
69
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_memset/memset_common.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * memset_common.h -- header file for common memset utilities */ #ifndef MEMSET_COMMON_H #define MEMSET_COMMON_H 1 #include "unittest.h" #include "file.h" extern unsigned Flags[10]; typedef void *(*memset_fn)(void *pmemdest, int c, size_t len, unsigned flags); typedef void (*persist_fn)(const void *ptr, size_t len); void do_memset(int fd, char *dest, const char *file_name, size_t dest_off, size_t bytes, memset_fn fn, unsigned flags, persist_fn p); #endif
552
22.041667
78
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_pmalloc_basic/obj_pmalloc_basic.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * obj_pmalloc_basic.c -- unit test for pmalloc interface */ #include <stdint.h> #include "heap.h" #include "obj.h" #include "pmalloc.h" #include "unittest.h" #include "valgrind_internal.h" #include "set.h" #define MOCK_POOL_SIZE (PMEMOBJ_MIN_POOL * 3) #define TEST_MEGA_ALLOC_SIZE (10 * 1024 * 1024) #define TEST_HUGE_ALLOC_SIZE (4 * 255 * 1024) #define TEST_SMALL_ALLOC_SIZE (1000) #define TEST_MEDIUM_ALLOC_SIZE (1024 * 200) #define TEST_TINY_ALLOC_SIZE (64) #define TEST_RUNS 2 #define MAX_MALLOC_FREE_LOOP 1000 #define MALLOC_FREE_SIZE 8000 #define PAD_SIZE (PMEM_PAGESIZE - LANE_TOTAL_SIZE) struct mock_pop { PMEMobjpool p; char lanes[LANE_TOTAL_SIZE]; char padding[PAD_SIZE]; /* to page boundary */ uint64_t ptr; }; static struct mock_pop *addr; static PMEMobjpool *mock_pop; /* * drain_empty -- (internal) empty function for drain on non-pmem memory */ static void drain_empty(void) { /* do nothing */ } /* * obj_persist -- pmemobj version of pmem_persist w/o replication */ static int obj_persist(void *ctx, const void *addr, size_t len, unsigned flags) { PMEMobjpool *pop = ctx; pop->persist_local(addr, len); return 0; } /* * obj_flush -- pmemobj version of pmem_flush w/o replication */ static int obj_flush(void *ctx, const void *addr, size_t len, unsigned flags) { PMEMobjpool *pop = ctx; pop->flush_local(addr, len); return 0; } /* * obj_drain -- pmemobj version of pmem_drain w/o replication */ static void obj_drain(void *ctx) { PMEMobjpool *pop = ctx; pop->drain_local(); } static void obj_msync_nofail(const void *addr, size_t size) { if (pmem_msync(addr, size)) UT_FATAL("!pmem_msync"); } /* * obj_memcpy -- pmemobj version of memcpy w/o replication */ static void * obj_memcpy(void *ctx, void *dest, const void *src, size_t len, unsigned flags) { pmem_memcpy(dest, src, len, flags); return dest; } static void * obj_memset(void *ctx, void *ptr, int c, size_t sz, unsigned flags) { pmem_memset(ptr, c, sz, flags); return ptr; } static size_t test_oom_allocs(size_t size) { uint64_t max_allocs = MOCK_POOL_SIZE / size; uint64_t *allocs = CALLOC(max_allocs, sizeof(*allocs)); size_t count = 0; for (;;) { if (pmalloc(mock_pop, &addr->ptr, size, 0, 0)) { break; } UT_ASSERT(addr->ptr != 0); allocs[count++] = addr->ptr; } for (int i = 0; i < count; ++i) { addr->ptr = allocs[i]; pfree(mock_pop, &addr->ptr); UT_ASSERT(addr->ptr == 0); } UT_ASSERT(count != 0); FREE(allocs); return count; } static size_t test_oom_resrv(size_t size) { uint64_t max_allocs = MOCK_POOL_SIZE / size; uint64_t *allocs = CALLOC(max_allocs, sizeof(*allocs)); struct pobj_action *resvs = CALLOC(max_allocs, sizeof(*resvs)); size_t count = 0; for (;;) { if (palloc_reserve(&mock_pop->heap, size, NULL, NULL, 0, 0, 0, 0, &resvs[count]) != 0) break; allocs[count] = resvs[count].heap.offset; UT_ASSERT(allocs[count] != 0); count++; } for (size_t i = 0; i < count; ) { size_t nresv = MIN(count - i, 10); struct operation_context *ctx = pmalloc_operation_hold(mock_pop); palloc_publish(&mock_pop->heap, &resvs[i], nresv, ctx); pmalloc_operation_release(mock_pop); i += nresv; } for (int i = 0; i < count; ++i) { addr->ptr = allocs[i]; pfree(mock_pop, &addr->ptr); UT_ASSERT(addr->ptr == 0); } UT_ASSERT(count != 0); FREE(allocs); FREE(resvs); return count; } static void test_malloc_free_loop(size_t size) { int err; for (int i = 0; i < MAX_MALLOC_FREE_LOOP; ++i) { err = pmalloc(mock_pop, &addr->ptr, size, 0, 0); UT_ASSERTeq(err, 0); pfree(mock_pop, &addr->ptr); } } static void test_realloc(size_t org, size_t dest) { int err; struct palloc_heap *heap = &mock_pop->heap; err = pmalloc(mock_pop, &addr->ptr, org, 0, 0); UT_ASSERTeq(err, 0); UT_ASSERT(palloc_usable_size(heap, addr->ptr) >= org); err = prealloc(mock_pop, &addr->ptr, dest, 0, 0); UT_ASSERTeq(err, 0); UT_ASSERT(palloc_usable_size(heap, addr->ptr) >= dest); pfree(mock_pop, &addr->ptr); } #define PMALLOC_EXTRA 20 #define PALLOC_FLAG (1 << 15) #define FIRST_SIZE 1 /* use the first allocation class */ #define FIRST_USIZE 112 /* the usable size is 128 - 16 */ static void test_pmalloc_extras(PMEMobjpool *pop) { uint64_t val; int ret = pmalloc(pop, &val, FIRST_SIZE, PMALLOC_EXTRA, PALLOC_FLAG); UT_ASSERTeq(ret, 0); UT_ASSERTeq(palloc_extra(&pop->heap, val), PMALLOC_EXTRA); UT_ASSERT((palloc_flags(&pop->heap, val) & PALLOC_FLAG) == PALLOC_FLAG); UT_ASSERT(palloc_usable_size(&pop->heap, val) == FIRST_USIZE); pfree(pop, &val); } #define PMALLOC_ELEMENTS 20 static void test_pmalloc_first_next(PMEMobjpool *pop) { uint64_t vals[PMALLOC_ELEMENTS]; for (unsigned i = 0; i < PMALLOC_ELEMENTS; ++i) { int ret = pmalloc(pop, &vals[i], FIRST_SIZE, i, i); UT_ASSERTeq(ret, 0); } uint64_t off = palloc_first(&pop->heap); UT_ASSERTne(off, 0); int nvalues = 0; do { UT_ASSERTeq(vals[nvalues], off); UT_ASSERTeq(palloc_extra(&pop->heap, off), nvalues); UT_ASSERTeq(palloc_flags(&pop->heap, off), nvalues); UT_ASSERT(palloc_usable_size(&pop->heap, off) == FIRST_USIZE); nvalues ++; } while ((off = palloc_next(&pop->heap, off)) != 0); UT_ASSERTeq(nvalues, PMALLOC_ELEMENTS); for (int i = 0; i < PMALLOC_ELEMENTS; ++i) pfree(pop, &vals[i]); } static void test_mock_pool_allocs(void) { addr = MMAP_ANON_ALIGNED(MOCK_POOL_SIZE, Ut_mmap_align); mock_pop = &addr->p; mock_pop->addr = addr; mock_pop->rdonly = 0; mock_pop->is_pmem = 0; mock_pop->heap_offset = offsetof(struct mock_pop, ptr); UT_ASSERTeq(mock_pop->heap_offset % Ut_pagesize, 0); mock_pop->nlanes = 1; mock_pop->lanes_offset = sizeof(PMEMobjpool); mock_pop->is_master_replica = 1; mock_pop->persist_local = obj_msync_nofail; mock_pop->flush_local = obj_msync_nofail; mock_pop->drain_local = drain_empty; mock_pop->p_ops.persist = obj_persist; mock_pop->p_ops.flush = obj_flush; mock_pop->p_ops.drain = obj_drain; mock_pop->p_ops.memcpy = obj_memcpy; mock_pop->p_ops.memset = obj_memset; mock_pop->p_ops.base = mock_pop; mock_pop->set = MALLOC(sizeof(*(mock_pop->set))); mock_pop->set->options = 0; mock_pop->set->directory_based = 0; void *heap_start = (char *)mock_pop + mock_pop->heap_offset; uint64_t heap_size = MOCK_POOL_SIZE - mock_pop->heap_offset; struct stats *s = stats_new(mock_pop); UT_ASSERTne(s, NULL); heap_init(heap_start, heap_size, &mock_pop->heap_size, &mock_pop->p_ops); heap_boot(&mock_pop->heap, heap_start, heap_size, &mock_pop->heap_size, mock_pop, &mock_pop->p_ops, s, mock_pop->set); heap_buckets_init(&mock_pop->heap); /* initialize runtime lanes structure */ mock_pop->lanes_desc.runtime_nlanes = (unsigned)mock_pop->nlanes; lane_boot(mock_pop); UT_ASSERTne(mock_pop->heap.rt, NULL); test_pmalloc_extras(mock_pop); test_pmalloc_first_next(mock_pop); test_malloc_free_loop(MALLOC_FREE_SIZE); size_t medium_resv = test_oom_resrv(TEST_MEDIUM_ALLOC_SIZE); /* * Allocating till OOM and freeing the objects in a loop for different * buckets covers basically all code paths except error cases. */ size_t medium0 = test_oom_allocs(TEST_MEDIUM_ALLOC_SIZE); size_t mega0 = test_oom_allocs(TEST_MEGA_ALLOC_SIZE); size_t huge0 = test_oom_allocs(TEST_HUGE_ALLOC_SIZE); size_t small0 = test_oom_allocs(TEST_SMALL_ALLOC_SIZE); size_t tiny0 = test_oom_allocs(TEST_TINY_ALLOC_SIZE); size_t huge1 = test_oom_allocs(TEST_HUGE_ALLOC_SIZE); size_t small1 = test_oom_allocs(TEST_SMALL_ALLOC_SIZE); size_t mega1 = test_oom_allocs(TEST_MEGA_ALLOC_SIZE); size_t tiny1 = test_oom_allocs(TEST_TINY_ALLOC_SIZE); size_t medium1 = test_oom_allocs(TEST_MEDIUM_ALLOC_SIZE); UT_ASSERTeq(mega0, mega1); UT_ASSERTeq(huge0, huge1); UT_ASSERTeq(small0, small1); UT_ASSERTeq(tiny0, tiny1); UT_ASSERTeq(medium0, medium1); UT_ASSERTeq(medium0, medium_resv); /* realloc to the same size shouldn't affect anything */ for (size_t i = 0; i < tiny1; ++i) test_realloc(TEST_TINY_ALLOC_SIZE, TEST_TINY_ALLOC_SIZE); size_t tiny2 = test_oom_allocs(TEST_TINY_ALLOC_SIZE); UT_ASSERTeq(tiny1, tiny2); test_realloc(TEST_SMALL_ALLOC_SIZE, TEST_MEDIUM_ALLOC_SIZE); test_realloc(TEST_HUGE_ALLOC_SIZE, TEST_MEGA_ALLOC_SIZE); stats_delete(mock_pop, s); lane_cleanup(mock_pop); heap_cleanup(&mock_pop->heap); FREE(mock_pop->set); MUNMAP_ANON_ALIGNED(addr, MOCK_POOL_SIZE); } static void test_spec_compliance(void) { uint64_t max_alloc = MAX_MEMORY_BLOCK_SIZE - sizeof(struct allocation_header_legacy); UT_ASSERTeq(max_alloc, PMEMOBJ_MAX_ALLOC_SIZE); } int main(int argc, char *argv[]) { START(argc, argv, "obj_pmalloc_basic"); for (int i = 0; i < TEST_RUNS; ++i) test_mock_pool_allocs(); test_spec_compliance(); DONE(NULL); } #ifdef _MSC_VER /* * Since libpmemobj is linked statically, we need to invoke its ctor/dtor. */ MSVC_CONSTR(libpmemobj_init) MSVC_DESTR(libpmemobj_fini) #endif
8,962
23.15903
78
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/win_common/win_common.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017, Intel Corporation */ /* * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * win_common.c -- test common POSIX or Linux API that were implemented * for Windows by our library. */ #include "unittest.h" /* * test_setunsetenv - test the setenv and unsetenv APIs */ static void test_setunsetenv(void) { os_unsetenv("TEST_SETUNSETENV_ONE"); /* set a new variable without overwriting - expect the new value */ UT_ASSERT(os_setenv("TEST_SETUNSETENV_ONE", "test_setunsetenv_one", 0) == 0); UT_ASSERT(strcmp(os_getenv("TEST_SETUNSETENV_ONE"), "test_setunsetenv_one") == 0); /* set an existing variable without overwriting - expect old value */ UT_ASSERT(os_setenv("TEST_SETUNSETENV_ONE", "test_setunsetenv_two", 0) == 0); UT_ASSERT(strcmp(os_getenv("TEST_SETUNSETENV_ONE"), "test_setunsetenv_one") == 0); /* set an existing variable with overwriting - expect the new value */ UT_ASSERT(os_setenv("TEST_SETUNSETENV_ONE", "test_setunsetenv_two", 1) == 0); UT_ASSERT(strcmp(os_getenv("TEST_SETUNSETENV_ONE"), "test_setunsetenv_two") == 0); /* unset our test value - expect it to be empty */ UT_ASSERT(os_unsetenv("TEST_SETUNSETENV_ONE") == 0); UT_ASSERT(os_getenv("TEST_SETUNSETENV_ONE") == NULL); } int main(int argc, char *argv[]) { START(argc, argv, "win_common - testing %s", (argc > 1) ? argv[1] : "setunsetenv"); if (argc == 1 || (stricmp(argv[1], "setunsetenv") == 0)) test_setunsetenv(); DONE(NULL); }
3,080
35.678571
74
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_realloc/obj_realloc.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * obj_realloc.c -- unit test for pmemobj_realloc and pmemobj_zrealloc */ #include <sys/param.h> #include <string.h> #include "unittest.h" #include "heap.h" #include "alloc_class.h" #include "obj.h" #include "util.h" #define MAX_ALLOC_MUL 8 #define MAX_ALLOC_CLASS 5 POBJ_LAYOUT_BEGIN(realloc); POBJ_LAYOUT_ROOT(realloc, struct root); POBJ_LAYOUT_TOID(realloc, struct object); POBJ_LAYOUT_END(realloc); struct object { size_t value; char data[]; }; struct root { TOID(struct object) obj; char data[CHUNKSIZE - sizeof(TOID(struct object))]; }; static struct alloc_class_collection *alloc_classes; /* * test_alloc -- test allocation using realloc */ static void test_alloc(PMEMobjpool *pop, size_t size) { TOID(struct root) root = POBJ_ROOT(pop, struct root); UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj)); int ret = pmemobj_realloc(pop, &D_RW(root)->obj.oid, size, TOID_TYPE_NUM(struct object)); UT_ASSERTeq(ret, 0); UT_ASSERT(!TOID_IS_NULL(D_RO(root)->obj)); UT_ASSERT(pmemobj_alloc_usable_size(D_RO(root)->obj.oid) >= size); } /* * test_free -- test free using realloc */ static void test_free(PMEMobjpool *pop) { TOID(struct root) root = POBJ_ROOT(pop, struct root); UT_ASSERT(!TOID_IS_NULL(D_RO(root)->obj)); int ret = pmemobj_realloc(pop, &D_RW(root)->obj.oid, 0, TOID_TYPE_NUM(struct object)); UT_ASSERTeq(ret, 0); UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj)); } /* * test_huge_size -- test zrealloc with size greater than pool size */ static void test_huge_size(PMEMobjpool *pop) { TOID(struct root) root = POBJ_ROOT(pop, struct root); UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj)); int ret; ret = pmemobj_zrealloc(pop, &D_RW(root)->obj.oid, PMEMOBJ_MAX_ALLOC_SIZE, TOID_TYPE_NUM(struct object)); UT_ASSERTne(ret, 0); UT_ASSERTeq(errno, ENOMEM); UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj)); ret = pmemobj_zrealloc(pop, &D_RW(root)->obj.oid, UINTMAX_MAX, TOID_TYPE_NUM(struct object)); UT_ASSERTne(ret, 0); UT_ASSERTeq(errno, ENOMEM); UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj)); ret = pmemobj_zrealloc(pop, &D_RW(root)->obj.oid, UINTMAX_MAX - 1, TOID_TYPE_NUM(struct object)); UT_ASSERTne(ret, 0); UT_ASSERTeq(errno, ENOMEM); UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj)); } /* test zrealloc passing PMEMoid that points to OID_NULL value */ static void test_null_oid(PMEMobjpool *pop) { TOID(struct root) root = POBJ_ROOT(pop, struct root); UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj)); int ret = pmemobj_zrealloc(pop, &D_RW(root)->obj.oid, 1024, TOID_TYPE_NUM(struct object)); UT_ASSERTeq(ret, 0); UT_ASSERT(!TOID_IS_NULL(D_RO(root)->obj)); pmemobj_free(&D_RW(root)->obj.oid); UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj)); } static int check_integrity = 1; /* * fill_buffer -- fill buffer with random data and return its checksum */ static uint16_t fill_buffer(unsigned char *buf, size_t size) { for (size_t i = 0; i < size; ++i) buf[i] = rand() % 255; pmem_persist(buf, size); return ut_checksum(buf, size); } /* * test_realloc -- test single reallocation */ static void test_realloc(PMEMobjpool *pop, size_t size_from, size_t size_to, uint64_t type_from, uint64_t type_to, int zrealloc) { TOID(struct root) root = POBJ_ROOT(pop, struct root); UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj)); int ret; if (zrealloc) ret = pmemobj_zalloc(pop, &D_RW(root)->obj.oid, size_from, type_from); else ret = pmemobj_alloc(pop, &D_RW(root)->obj.oid, size_from, type_from, NULL, NULL); UT_ASSERTeq(ret, 0); UT_ASSERT(!TOID_IS_NULL(D_RO(root)->obj)); size_t usable_size_from = pmemobj_alloc_usable_size(D_RO(root)->obj.oid); UT_ASSERT(usable_size_from >= size_from); size_t check_size; uint16_t checksum; if (zrealloc) { UT_ASSERT(util_is_zeroed(D_RO(D_RO(root)->obj), size_from)); } else if (check_integrity) { check_size = size_to >= usable_size_from ? usable_size_from : size_to; checksum = fill_buffer((unsigned char *)D_RW(D_RW(root)->obj), check_size); } if (zrealloc) { ret = pmemobj_zrealloc(pop, &D_RW(root)->obj.oid, size_to, type_to); } else { ret = pmemobj_realloc(pop, &D_RW(root)->obj.oid, size_to, type_to); } UT_ASSERTeq(ret, 0); UT_ASSERT(!TOID_IS_NULL(D_RO(root)->obj)); size_t usable_size_to = pmemobj_alloc_usable_size(D_RO(root)->obj.oid); UT_ASSERT(usable_size_to >= size_to); if (size_to < size_from) { UT_ASSERT(usable_size_to <= usable_size_from); } if (zrealloc) { UT_ASSERT(util_is_zeroed(D_RO(D_RO(root)->obj), size_to)); } else if (check_integrity) { uint16_t checksum2 = ut_checksum( (uint8_t *)D_RW(D_RW(root)->obj), check_size); if (checksum2 != checksum) UT_ASSERTinfo(0, "memory corruption"); } pmemobj_free(&D_RW(root)->obj.oid); UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj)); } /* * test_realloc_sizes -- test reallocations from/to specified sizes */ static void test_realloc_sizes(PMEMobjpool *pop, uint64_t type_from, uint64_t type_to, int zrealloc, unsigned size_diff) { for (uint8_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) { struct alloc_class *c = alloc_class_by_id(alloc_classes, i); if (c == NULL) continue; size_t header_size = header_type_to_size[c->header_type]; size_t size_from = c->unit_size - header_size - size_diff; for (unsigned j = 2; j <= MAX_ALLOC_MUL; j++) { size_t inc_size_to = c->unit_size * j - header_size; test_realloc(pop, size_from, inc_size_to, type_from, type_to, zrealloc); size_t dec_size_to = c->unit_size / j; if (dec_size_to <= header_size) dec_size_to = header_size; else dec_size_to -= header_size; test_realloc(pop, size_from, dec_size_to, type_from, type_to, zrealloc); for (int k = 0; k < MAX_ALLOC_CLASS; k++) { struct alloc_class *ck = alloc_class_by_id( alloc_classes, k); if (c == NULL) continue; size_t header_sizek = header_type_to_size[c->header_type]; size_t prev_size = ck->unit_size - header_sizek; test_realloc(pop, size_from, prev_size, type_from, type_to, zrealloc); } } } } int main(int argc, char *argv[]) { START(argc, argv, "obj_realloc"); /* root doesn't count */ UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(realloc) != 1); if (argc < 2) UT_FATAL("usage: %s file [check_integrity]", argv[0]); PMEMobjpool *pop = pmemobj_open(argv[1], POBJ_LAYOUT_NAME(realloc)); if (!pop) UT_FATAL("!pmemobj_open"); if (argc >= 3) check_integrity = atoi(argv[2]); alloc_classes = alloc_class_collection_new(); /* test huge size alloc */ test_huge_size(pop); /* test alloc and free */ test_alloc(pop, 16); test_free(pop); /* test zrealloc passing PMEMoid that points to OID_NULL value */ test_null_oid(pop); /* test realloc without changing type number */ test_realloc_sizes(pop, 0, 0, 0, 0); /* test realloc with changing type number */ test_realloc_sizes(pop, 0, 1, 0, 0); /* test zrealloc without changing type number... */ test_realloc_sizes(pop, 0, 0, 1, 8); test_realloc_sizes(pop, 0, 0, 1, 0); /* test zrealloc with changing type number... */ test_realloc_sizes(pop, 0, 1, 1, 8); test_realloc_sizes(pop, 0, 1, 1, 0); /* test realloc with type number equal to range of long long int */ test_realloc_sizes(pop, 0, UINT64_MAX, 0, 0); test_realloc_sizes(pop, 0, UINT64_MAX - 1, 0, 0); /* test zrealloc with type number equal to range of long long int */ test_realloc_sizes(pop, 0, UINT64_MAX, 1, 0); test_realloc_sizes(pop, 0, (UINT64_MAX - 1), 1, 0); alloc_class_collection_delete(alloc_classes); pmemobj_close(pop); DONE(NULL); } #ifdef _MSC_VER extern "C" { /* * Since libpmemobj is linked statically, * we need to invoke its ctor/dtor. */ MSVC_CONSTR(libpmemobj_init) MSVC_DESTR(libpmemobj_fini) } #endif
7,788
24.371336
70
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem_deep_persist/mocks_posix.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2020, Intel Corporation */ /* * mocks_posix.c -- redefinitions of open/write functions (Posix implementation) */ #include "util.h" #include "os.h" #include "unittest.h" /* * open -- open mock because of Dev DAX without deep_flush * sysfs file, eg. DAX on emulated pmem */ FUNC_MOCK(os_open, int, const char *path, int flags, ...) FUNC_MOCK_RUN_DEFAULT { if (strstr(path, "/sys/bus/nd/devices/region") && strstr(path, "/deep_flush")) { UT_OUT("mocked open, path %s", path); if (os_access(path, R_OK)) return 999; } va_list ap; va_start(ap, flags); int mode = va_arg(ap, int); va_end(ap); return _FUNC_REAL(os_open)(path, flags, mode); } FUNC_MOCK_END /* * write -- write mock */ FUNC_MOCK(write, int, int fd, const void *buffer, size_t count) FUNC_MOCK_RUN_DEFAULT { if (fd == 999) { UT_OUT("mocked write, path %d", fd); return 1; } return _FUNC_REAL(write)(fd, buffer, count); } FUNC_MOCK_END /* * read -- read mock */ FUNC_MOCK(read, size_t, int fd, void *buffer, size_t nbyte) FUNC_MOCK_RUN_DEFAULT { if (fd == 999) { char pattern[2] = {'1', '\n'}; memcpy(buffer, pattern, sizeof(pattern)); UT_OUT("mocked read, fd %d", fd); return sizeof(pattern); } return _FUNC_REAL(read)(fd, buffer, nbyte); } FUNC_MOCK_END
1,326
20.754098
80
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_tx_free/obj_tx_free.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * obj_tx_free.c -- unit test for pmemobj_tx_free */ #include <sys/param.h> #include <string.h> #include "unittest.h" #include "util.h" #include "valgrind_internal.h" #define LAYOUT_NAME "tx_free" #define OBJ_SIZE (200 * 1024) enum type_number { TYPE_FREE_NO_TX, TYPE_FREE_WRONG_UUID, TYPE_FREE_COMMIT, TYPE_FREE_ABORT, TYPE_FREE_COMMIT_NESTED1, TYPE_FREE_COMMIT_NESTED2, TYPE_FREE_ABORT_NESTED1, TYPE_FREE_ABORT_NESTED2, TYPE_FREE_ABORT_AFTER_NESTED1, TYPE_FREE_ABORT_AFTER_NESTED2, TYPE_FREE_OOM, TYPE_FREE_ALLOC, TYPE_FREE_AFTER_ABORT, TYPE_FREE_MANY_TIMES, }; TOID_DECLARE(struct object, 0); struct object { size_t value; char data[OBJ_SIZE - sizeof(size_t)]; }; /* * do_tx_alloc -- do tx allocation with specified type number */ static PMEMoid do_tx_alloc(PMEMobjpool *pop, unsigned type_num) { PMEMoid ret = OID_NULL; TX_BEGIN(pop) { ret = pmemobj_tx_alloc(sizeof(struct object), type_num); } TX_END return ret; } /* * do_tx_free_wrong_uuid -- try to free object with invalid uuid */ static void do_tx_free_wrong_uuid(PMEMobjpool *pop) { volatile int ret = 0; PMEMoid oid = do_tx_alloc(pop, TYPE_FREE_WRONG_UUID); oid.pool_uuid_lo = ~oid.pool_uuid_lo; TX_BEGIN(pop) { ret = pmemobj_tx_free(oid); UT_ASSERTeq(ret, 0); } TX_ONABORT { ret = -1; } TX_END UT_ASSERTeq(ret, -1); /* POBJ_XFREE_NO_ABORT flag is set */ TX_BEGIN(pop) { ret = pmemobj_tx_xfree(oid, POBJ_XFREE_NO_ABORT); } TX_ONCOMMIT { UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END TOID(struct object) obj; TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_WRONG_UUID)); UT_ASSERT(!TOID_IS_NULL(obj)); } /* * do_tx_free_wrong_uuid_abort_on_failure -- try to free object with * invalid uuid in a transaction where pmemobj_tx_set_failure_behavior * was called. */ static void do_tx_free_wrong_uuid_abort_on_failure(PMEMobjpool *pop) { volatile int ret = 0; PMEMoid oid = do_tx_alloc(pop, TYPE_FREE_WRONG_UUID); oid.pool_uuid_lo = ~oid.pool_uuid_lo; /* pmemobj_tx_set_failure_behavior is called */ TX_BEGIN(pop) { pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN); UT_ASSERTeq(pmemobj_tx_get_failure_behavior(), POBJ_TX_FAILURE_RETURN); ret = pmemobj_tx_free(oid); } TX_ONCOMMIT { UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END /* pmemobj_tx_set_failure_behavior is called */ TX_BEGIN(pop) { pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN); UT_ASSERTeq(pmemobj_tx_get_failure_behavior(), POBJ_TX_FAILURE_RETURN); ret = pmemobj_tx_xfree(oid, 0); } TX_ONCOMMIT { UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END /* pmemobj_tx_set_failure_behavior is called in outer tx */ TX_BEGIN(pop) { pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN); TX_BEGIN(pop) { UT_ASSERTeq(pmemobj_tx_get_failure_behavior(), POBJ_TX_FAILURE_RETURN); ret = pmemobj_tx_free(oid); } TX_ONCOMMIT { UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END ret = pmemobj_tx_free(oid); } TX_ONCOMMIT { UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END /* pmemobj_tx_set_failure_behavior is called in neighbour tx */ TX_BEGIN(pop) { TX_BEGIN(pop) { pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN); ret = pmemobj_tx_free(oid); } TX_ONCOMMIT { UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END TX_BEGIN(pop) { UT_ASSERTeq(pmemobj_tx_get_failure_behavior(), POBJ_TX_FAILURE_ABORT); } TX_ONCOMMIT { UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END } TX_ONCOMMIT { UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END /* pmemobj_tx_set_failure_behavior is called in neighbour tx */ TX_BEGIN(pop) { pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN); TX_BEGIN(pop) { pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_ABORT); UT_ASSERTeq(pmemobj_tx_get_failure_behavior(), POBJ_TX_FAILURE_ABORT); } TX_ONCOMMIT { UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END TX_BEGIN(pop) { UT_ASSERTeq(pmemobj_tx_get_failure_behavior(), POBJ_TX_FAILURE_RETURN); ret = pmemobj_tx_free(oid); } TX_ONCOMMIT { UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END } TX_ONCOMMIT { UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END TOID(struct object) obj; TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_WRONG_UUID)); UT_ASSERT(!TOID_IS_NULL(obj)); } /* * do_tx_free_null_oid -- call pmemobj_tx_free with OID_NULL */ static void do_tx_free_null_oid(PMEMobjpool *pop) { volatile int ret = 0; TX_BEGIN(pop) { ret = pmemobj_tx_free(OID_NULL); } TX_ONABORT { ret = -1; } TX_END UT_ASSERTeq(ret, 0); } /* * do_tx_free_commit -- do the basic transactional deallocation of object */ static void do_tx_free_commit(PMEMobjpool *pop) { int ret; PMEMoid oid = do_tx_alloc(pop, TYPE_FREE_COMMIT); TX_BEGIN(pop) { ret = pmemobj_tx_free(oid); UT_ASSERTeq(ret, 0); } TX_ONABORT { UT_ASSERT(0); } TX_END TOID(struct object) obj; TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_COMMIT)); UT_ASSERT(TOID_IS_NULL(obj)); } /* * do_tx_free_abort -- abort deallocation of object */ static void do_tx_free_abort(PMEMobjpool *pop) { int ret; PMEMoid oid = do_tx_alloc(pop, TYPE_FREE_ABORT); TX_BEGIN(pop) { ret = pmemobj_tx_free(oid); UT_ASSERTeq(ret, 0); pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END TOID(struct object) obj; TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ABORT)); UT_ASSERT(!TOID_IS_NULL(obj)); } /* * do_tx_free_commit_nested -- do allocation in nested transaction */ static void do_tx_free_commit_nested(PMEMobjpool *pop) { int ret; PMEMoid oid1 = do_tx_alloc(pop, TYPE_FREE_COMMIT_NESTED1); PMEMoid oid2 = do_tx_alloc(pop, TYPE_FREE_COMMIT_NESTED2); TX_BEGIN(pop) { ret = pmemobj_tx_free(oid1); UT_ASSERTeq(ret, 0); TX_BEGIN(pop) { ret = pmemobj_tx_free(oid2); UT_ASSERTeq(ret, 0); } TX_ONABORT { UT_ASSERT(0); } TX_END } TX_ONABORT { UT_ASSERT(0); } TX_END TOID(struct object) obj; TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_COMMIT_NESTED1)); UT_ASSERT(TOID_IS_NULL(obj)); TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_COMMIT_NESTED2)); UT_ASSERT(TOID_IS_NULL(obj)); } /* * do_tx_free_abort_nested -- abort allocation in nested transaction */ static void do_tx_free_abort_nested(PMEMobjpool *pop) { int ret; PMEMoid oid1 = do_tx_alloc(pop, TYPE_FREE_ABORT_NESTED1); PMEMoid oid2 = do_tx_alloc(pop, TYPE_FREE_ABORT_NESTED2); TX_BEGIN(pop) { ret = pmemobj_tx_free(oid1); UT_ASSERTeq(ret, 0); TX_BEGIN(pop) { ret = pmemobj_tx_free(oid2); UT_ASSERTeq(ret, 0); pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END } TX_ONCOMMIT { UT_ASSERT(0); } TX_END TOID(struct object) obj; TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ABORT_NESTED1)); UT_ASSERT(!TOID_IS_NULL(obj)); TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ABORT_NESTED2)); UT_ASSERT(!TOID_IS_NULL(obj)); } /* * do_tx_free_abort_after_nested -- abort transaction after nested * pmemobj_tx_free */ static void do_tx_free_abort_after_nested(PMEMobjpool *pop) { int ret; PMEMoid oid1 = do_tx_alloc(pop, TYPE_FREE_ABORT_AFTER_NESTED1); PMEMoid oid2 = do_tx_alloc(pop, TYPE_FREE_ABORT_AFTER_NESTED2); TX_BEGIN(pop) { ret = pmemobj_tx_free(oid1); UT_ASSERTeq(ret, 0); TX_BEGIN(pop) { ret = pmemobj_tx_free(oid2); UT_ASSERTeq(ret, 0); } TX_END pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END TOID(struct object) obj; TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ABORT_AFTER_NESTED1)); UT_ASSERT(!TOID_IS_NULL(obj)); TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ABORT_AFTER_NESTED2)); UT_ASSERT(!TOID_IS_NULL(obj)); } /* * do_tx_free_alloc_abort -- free object allocated in the same transaction * and abort transaction */ static void do_tx_free_alloc_abort(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_alloc( sizeof(struct object), TYPE_FREE_ALLOC)); UT_ASSERT(!TOID_IS_NULL(obj)); ret = pmemobj_tx_free(obj.oid); UT_ASSERTeq(ret, 0); pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ALLOC)); UT_ASSERT(TOID_IS_NULL(obj)); } /* * do_tx_free_alloc_abort -- free object allocated in the same transaction * and commit transaction */ static void do_tx_free_alloc_commit(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_alloc( sizeof(struct object), TYPE_FREE_ALLOC)); UT_ASSERT(!TOID_IS_NULL(obj)); ret = pmemobj_tx_free(obj.oid); UT_ASSERTeq(ret, 0); } TX_ONABORT { UT_ASSERT(0); } TX_END TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ALLOC)); UT_ASSERT(TOID_IS_NULL(obj)); } /* * do_tx_free_abort_free - allocate a new object, perform a transactional free * in an aborted transaction and then to actually free the object. * * This can expose any issues with not properly handled free undo log. */ static void do_tx_free_abort_free(PMEMobjpool *pop) { PMEMoid oid = do_tx_alloc(pop, TYPE_FREE_AFTER_ABORT); TX_BEGIN(pop) { pmemobj_tx_free(oid); pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END TX_BEGIN(pop) { pmemobj_tx_free(oid); } TX_ONABORT { UT_ASSERT(0); } TX_END } /* * do_tx_free_many_times -- free enough objects to trigger vector array alloc */ static void do_tx_free_many_times(PMEMobjpool *pop) { #define TX_FREE_COUNT ((1 << 3) + 1) PMEMoid oids[TX_FREE_COUNT]; for (int i = 0; i < TX_FREE_COUNT; ++i) oids[i] = do_tx_alloc(pop, TYPE_FREE_MANY_TIMES); TX_BEGIN(pop) { for (int i = 0; i < TX_FREE_COUNT; ++i) pmemobj_tx_free(oids[i]); } TX_ONABORT { UT_ASSERT(0); } TX_END #undef TX_FREE_COUNT } int main(int argc, char *argv[]) { START(argc, argv, "obj_tx_free"); util_init(); if (argc != 2) UT_FATAL("usage: %s [file]", argv[0]); PMEMobjpool *pop; if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create"); do_tx_free_wrong_uuid(pop); VALGRIND_WRITE_STATS; do_tx_free_wrong_uuid_abort_on_failure(pop); VALGRIND_WRITE_STATS; do_tx_free_null_oid(pop); VALGRIND_WRITE_STATS; do_tx_free_commit(pop); VALGRIND_WRITE_STATS; do_tx_free_abort(pop); VALGRIND_WRITE_STATS; do_tx_free_commit_nested(pop); VALGRIND_WRITE_STATS; do_tx_free_abort_nested(pop); VALGRIND_WRITE_STATS; do_tx_free_abort_after_nested(pop); VALGRIND_WRITE_STATS; do_tx_free_alloc_commit(pop); VALGRIND_WRITE_STATS; do_tx_free_alloc_abort(pop); VALGRIND_WRITE_STATS; do_tx_free_abort_free(pop); VALGRIND_WRITE_STATS; do_tx_free_many_times(pop); VALGRIND_WRITE_STATS; pmemobj_close(pop); DONE(NULL); }
11,423
21.356164
78
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/util_uuid_generate/util_uuid_generate.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * util_uuid_generate.c -- unit test for generating a uuid * * usage: util_uuid_generate [string] [valid|invalid] */ #include "unittest.h" #include "uuid.h" #include <unistd.h> #include <string.h> int main(int argc, char *argv[]) { START(argc, argv, "util_uuid_generate"); uuid_t uuid; uuid_t uuid1; int ret; char conv_uu[POOL_HDR_UUID_STR_LEN]; char uu[POOL_HDR_UUID_STR_LEN]; /* * No string passed in. Generate uuid. */ if (argc == 1) { /* generate a UUID string */ ret = ut_get_uuid_str(uu); UT_ASSERTeq(ret, 0); /* * Convert the string to a uuid, convert generated * uuid back to a string and compare strings. */ ret = util_uuid_from_string(uu, (struct uuid *)&uuid); UT_ASSERTeq(ret, 0); ret = util_uuid_to_string(uuid, conv_uu); UT_ASSERTeq(ret, 0); UT_ASSERT(strncmp(uu, conv_uu, POOL_HDR_UUID_STR_LEN) == 0); /* * Generate uuid from util_uuid_generate and translate to * string then back to uuid to verify they match. */ memset(uuid, 0, sizeof(uuid_t)); memset(uu, 0, POOL_HDR_UUID_STR_LEN); memset(conv_uu, 0, POOL_HDR_UUID_STR_LEN); ret = util_uuid_generate(uuid); UT_ASSERTeq(ret, 0); ret = util_uuid_to_string(uuid, uu); UT_ASSERTeq(ret, 0); ret = util_uuid_from_string(uu, (struct uuid *)&uuid1); UT_ASSERTeq(ret, 0); UT_ASSERT(memcmp(&uuid, &uuid1, sizeof(uuid_t)) == 0); } else { /* * Caller passed in string. */ if (strcmp(argv[2], "valid") == 0) { ret = util_uuid_from_string(argv[1], (struct uuid *)&uuid); UT_ASSERTeq(ret, 0); ret = util_uuid_to_string(uuid, conv_uu); UT_ASSERTeq(ret, 0); } else { ret = util_uuid_from_string(argv[1], (struct uuid *)&uuid); UT_ASSERT(ret < 0); UT_OUT("util_uuid_generate: invalid uuid string"); } } DONE(NULL); }
1,885
21.722892
62
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_pool_lookup/obj_pool_lookup.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * obj_pool_lookup.c -- unit test for pmemobj_pool and pmemobj_pool_of */ #include "unittest.h" #define MAX_PATH_LEN 255 #define LAYOUT_NAME "pool_lookup" #define ALLOC_SIZE 100 static void define_path(char *str, size_t size, const char *dir, unsigned i) { int ret = snprintf(str, size, "%s"OS_DIR_SEP_STR"testfile%d", dir, i); if (ret < 0 || ret >= size) UT_FATAL("snprintf: %d", ret); } int main(int argc, char *argv[]) { START(argc, argv, "obj_pool_lookup"); if (argc != 3) UT_FATAL("usage: %s [directory] [# of pools]", argv[0]); unsigned npools = ATOU(argv[2]); const char *dir = argv[1]; int r; /* check before pool creation */ PMEMoid some_oid = {2, 3}; UT_ASSERTeq(pmemobj_pool_by_ptr(&some_oid), NULL); UT_ASSERTeq(pmemobj_pool_by_oid(some_oid), NULL); PMEMobjpool **pops = MALLOC(npools * sizeof(PMEMobjpool *)); void **guard_after = MALLOC(npools * sizeof(void *)); size_t length = strlen(dir) + MAX_PATH_LEN; char *path = MALLOC(length); for (unsigned i = 0; i < npools; ++i) { define_path(path, length, dir, i); pops[i] = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR); /* * Reserve a page after the pool for address checks, if it * doesn't map precisely at that address - it's OK. */ guard_after[i] = MMAP((char *)pops[i] + PMEMOBJ_MIN_POOL, Ut_pagesize, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); UT_ASSERTne(guard_after[i], NULL); if (pops[i] == NULL) UT_FATAL("!pmemobj_create"); } PMEMoid *oids = MALLOC(npools * sizeof(PMEMoid)); for (unsigned i = 0; i < npools; ++i) { r = pmemobj_alloc(pops[i], &oids[i], ALLOC_SIZE, 1, NULL, NULL); UT_ASSERTeq(r, 0); } PMEMoid invalid = {123, 321}; UT_ASSERTeq(pmemobj_pool_by_oid(OID_NULL), NULL); UT_ASSERTeq(pmemobj_pool_by_oid(invalid), NULL); for (unsigned i = 0; i < npools; ++i) { UT_ASSERTeq(pmemobj_pool_by_oid(oids[i]), pops[i]); } UT_ASSERTeq(pmemobj_pool_by_ptr(NULL), NULL); UT_ASSERTeq(pmemobj_pool_by_ptr((void *)0xCBA), NULL); void *valid_ptr = MALLOC(ALLOC_SIZE); UT_ASSERTeq(pmemobj_pool_by_ptr(valid_ptr), NULL); FREE(valid_ptr); for (unsigned i = 0; i < npools; ++i) { void *before_pool = (char *)pops[i] - 1; void *after_pool = (char *)pops[i] + PMEMOBJ_MIN_POOL + 1; void *start_pool = (char *)pops[i]; void *end_pool = (char *)pops[i] + PMEMOBJ_MIN_POOL - 1; void *edge = (char *)pops[i] + PMEMOBJ_MIN_POOL; void *middle = (char *)pops[i] + (PMEMOBJ_MIN_POOL / 2); void *in_oid = (char *)pmemobj_direct(oids[i]) + (ALLOC_SIZE / 2); UT_ASSERTeq(pmemobj_pool_by_ptr(before_pool), NULL); UT_ASSERTeq(pmemobj_pool_by_ptr(after_pool), NULL); UT_ASSERTeq(pmemobj_pool_by_ptr(start_pool), pops[i]); UT_ASSERTeq(pmemobj_pool_by_ptr(end_pool), pops[i]); UT_ASSERTeq(pmemobj_pool_by_ptr(edge), NULL); UT_ASSERTeq(pmemobj_pool_by_ptr(middle), pops[i]); UT_ASSERTeq(pmemobj_pool_by_ptr(in_oid), pops[i]); pmemobj_close(pops[i]); UT_ASSERTeq(pmemobj_pool_by_ptr(middle), NULL); UT_ASSERTeq(pmemobj_pool_by_ptr(in_oid), NULL); MUNMAP(guard_after[i], Ut_pagesize); } for (unsigned i = 0; i < npools; ++i) { UT_ASSERTeq(pmemobj_pool_by_oid(oids[i]), NULL); define_path(path, length, dir, i); pops[i] = pmemobj_open(path, LAYOUT_NAME); UT_ASSERTne(pops[i], NULL); UT_ASSERTeq(pmemobj_pool_by_oid(oids[i]), pops[i]); pmemobj_close(pops[i]); } FREE(path); FREE(pops); FREE(guard_after); FREE(oids); DONE(NULL); }
3,576
26.305344
70
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_constructor/obj_constructor.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * obj_constructor.c -- tests for constructor */ #include <stddef.h> #include "unittest.h" /* * Command line toggle indicating use of a bigger node structure for querying * pool size expressed in a number of possible allocations. A small node * structure results in a great number of allocations impossible to replicate * in assumed timeout. It is required by unit tests using remote replication to * pass on Travis. */ #define USE_BIG_ALLOC "--big-alloc" /* * Layout definition */ POBJ_LAYOUT_BEGIN(constr); POBJ_LAYOUT_ROOT(constr, struct root); POBJ_LAYOUT_TOID(constr, struct node); POBJ_LAYOUT_TOID(constr, struct node_big); POBJ_LAYOUT_END(constr); struct root { TOID(struct node) n; POBJ_LIST_HEAD(head, struct node) list; POBJ_LIST_HEAD(head_big, struct node_big) list_big; }; struct node { POBJ_LIST_ENTRY(struct node) next; }; struct node_big { POBJ_LIST_ENTRY(struct node_big) next; int weight[2048]; }; static int root_constr_cancel(PMEMobjpool *pop, void *ptr, void *arg) { return 1; } static int node_constr_cancel(PMEMobjpool *pop, void *ptr, void *arg) { return 1; } struct foo { int bar; }; static struct foo *Canceled_ptr; static int vg_test_save_ptr(PMEMobjpool *pop, void *ptr, void *arg) { Canceled_ptr = (struct foo *)ptr; return 1; } int main(int argc, char *argv[]) { START(argc, argv, "obj_constructor"); /* root doesn't count */ UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(constr) != 2); int big = (argc == 3 && strcmp(argv[2], USE_BIG_ALLOC) == 0); size_t node_size; size_t next_off; if (big) { node_size = sizeof(struct node_big); next_off = offsetof(struct node_big, next); } else if (argc == 2) { node_size = sizeof(struct node); next_off = offsetof(struct node, next); } else { UT_FATAL("usage: %s file-name [ %s ]", argv[0], USE_BIG_ALLOC); } const char *path = argv[1]; PMEMobjpool *pop = NULL; int ret; TOID(struct root) root; TOID(struct node) node; TOID(struct node_big) node_big; if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(constr), 0, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create: %s", path); errno = 0; root.oid = pmemobj_root_construct(pop, sizeof(struct root), root_constr_cancel, NULL); UT_ASSERT(TOID_IS_NULL(root)); UT_ASSERTeq(errno, ECANCELED); /* * Allocate memory until OOM, so we can check later if the alloc * cancellation didn't damage the heap in any way. */ int allocs = 0; while (pmemobj_alloc(pop, NULL, node_size, 1, NULL, NULL) == 0) allocs++; UT_ASSERTne(allocs, 0); PMEMoid oid; PMEMoid next; POBJ_FOREACH_SAFE(pop, oid, next) pmemobj_free(&oid); errno = 0; ret = pmemobj_alloc(pop, NULL, node_size, 1, node_constr_cancel, NULL); UT_ASSERTeq(ret, -1); UT_ASSERTeq(errno, ECANCELED); /* the same number of allocations should be possible. */ while (pmemobj_alloc(pop, NULL, node_size, 1, NULL, NULL) == 0) allocs--; UT_ASSERT(allocs <= 0); POBJ_FOREACH_SAFE(pop, oid, next) pmemobj_free(&oid); root.oid = pmemobj_root_construct(pop, sizeof(struct root), NULL, NULL); UT_ASSERT(!TOID_IS_NULL(root)); errno = 0; if (big) { node_big.oid = pmemobj_list_insert_new(pop, next_off, &D_RW(root)->list_big, OID_NULL, 0, node_size, 1, node_constr_cancel, NULL); UT_ASSERT(TOID_IS_NULL(node_big)); } else { node.oid = pmemobj_list_insert_new(pop, next_off, &D_RW(root)->list, OID_NULL, 0, node_size, 1, node_constr_cancel, NULL); UT_ASSERT(TOID_IS_NULL(node)); } UT_ASSERTeq(errno, ECANCELED); pmemobj_alloc(pop, &oid, sizeof(struct foo), 1, vg_test_save_ptr, NULL); UT_ASSERTne(Canceled_ptr, NULL); /* this should generate a valgrind memcheck warning */ Canceled_ptr->bar = 5; pmemobj_persist(pop, &Canceled_ptr->bar, sizeof(Canceled_ptr->bar)); /* * Allocate and cancel a huge object. It should return back to the * heap and it should be possible to allocate it again. */ Canceled_ptr = NULL; ret = pmemobj_alloc(pop, &oid, sizeof(struct foo) + (1 << 22), 1, vg_test_save_ptr, NULL); UT_ASSERTne(Canceled_ptr, NULL); void *first_ptr = Canceled_ptr; Canceled_ptr = NULL; ret = pmemobj_alloc(pop, &oid, sizeof(struct foo) + (1 << 22), 1, vg_test_save_ptr, NULL); UT_ASSERTeq(first_ptr, Canceled_ptr); pmemobj_close(pop); DONE(NULL); }
4,369
22.621622
79
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/unittest/unittest.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * unittest.h -- the mundane stuff shared by all unit tests * * we want unit tests to be very thorough and check absolutely everything * in order to nail down the test case as precisely as possible and flag * anything at all unexpected. as a result, most unit tests are 90% code * checking stuff that isn't really interesting to what is being tested. * to help address this, the macros defined here include all the boilerplate * error checking which prints information and exits on unexpected errors. * * the result changes this code: * * if ((buf = malloc(size)) == NULL) { * fprintf(stderr, "cannot allocate %d bytes for buf\n", size); * exit(1); * } * * into this code: * * buf = MALLOC(size); * * and the error message includes the calling context information (file:line). * in general, using the all-caps version of a call means you're using the * unittest.h version which does the most common checking for you. so * calling VMEM_CREATE() instead of vmem_create() returns the same * thing, but can never return an error since the unit test library checks for * it. * for routines like vmem_delete() there is no corresponding * VMEM_DELETE() because there's no error to check for. * * all unit tests should use the same initialization: * * START(argc, argv, "brief test description", ...); * * all unit tests should use these exit calls: * * DONE("message", ...); * UT_FATAL("message", ...); * * uniform stderr and stdout messages: * * UT_OUT("message", ...); * UT_ERR("message", ...); * * in all cases above, the message is printf-like, taking variable args. * the message can be NULL. it can start with "!" in which case the "!" is * skipped and the message gets the errno string appended to it, like this: * * if (somesyscall(..) < 0) * UT_FATAL("!my message"); */ #ifndef _UNITTEST_H #define _UNITTEST_H 1 #include <libpmem.h> #include <libpmem2.h> #include <libpmemblk.h> #include <libpmemlog.h> #include <libpmemobj.h> #include <libpmempool.h> #ifdef __cplusplus extern "C" { #endif #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <stdarg.h> #include <stdint.h> #include <string.h> #include <strings.h> #include <setjmp.h> #include <time.h> #include <sys/types.h> #include <sys/wait.h> #include <sys/stat.h> #include <sys/mman.h> #include <sys/file.h> #ifndef __FreeBSD__ #include <sys/mount.h> #endif #include <fcntl.h> #include <signal.h> #include <errno.h> #include <dirent.h> /* XXX: move OS abstraction layer out of common */ #include "os.h" #include "os_thread.h" #include "util.h" int ut_get_uuid_str(char *); #define UT_MAX_ERR_MSG 128 #define UT_POOL_HDR_UUID_STR_LEN 37 /* uuid string length */ #define UT_POOL_HDR_UUID_GEN_FILE "/proc/sys/kernel/random/uuid" /* XXX - fix this temp hack dup'ing util_strerror when we get mock for win */ void ut_strerror(int errnum, char *buff, size_t bufflen); /* XXX - eliminate duplicated definitions in unittest.h and util.h */ #ifdef _WIN32 static inline int ut_util_statW(const wchar_t *path, os_stat_t *st_bufp) { int retVal = _wstat64(path, st_bufp); /* clear unused bits to avoid confusion */ st_bufp->st_mode &= 0600; return retVal; } #endif /* * unit test support... */ void ut_start(const char *file, int line, const char *func, int argc, char * const argv[], const char *fmt, ...) __attribute__((format(printf, 6, 7))); void ut_startW(const char *file, int line, const char *func, int argc, wchar_t * const argv[], const char *fmt, ...) __attribute__((format(printf, 6, 7))); void NORETURN ut_done(const char *file, int line, const char *func, const char *fmt, ...) __attribute__((format(printf, 4, 5))); void NORETURN ut_fatal(const char *file, int line, const char *func, const char *fmt, ...) __attribute__((format(printf, 4, 5))); void NORETURN ut_end(const char *file, int line, const char *func, int ret); void ut_out(const char *file, int line, const char *func, const char *fmt, ...) __attribute__((format(printf, 4, 5))); void ut_err(const char *file, int line, const char *func, const char *fmt, ...) __attribute__((format(printf, 4, 5))); /* indicate the start of the test */ #ifndef _WIN32 #define START(argc, argv, ...)\ ut_start(__FILE__, __LINE__, __func__, argc, argv, __VA_ARGS__) #else #define START(argc, argv, ...)\ wchar_t **wargv = CommandLineToArgvW(GetCommandLineW(), &argc);\ for (int i = 0; i < argc; i++) {\ argv[i] = ut_toUTF8(wargv[i]);\ if (argv[i] == NULL) {\ for (i--; i >= 0; i--)\ free(argv[i]);\ UT_FATAL("Error during arguments conversion\n");\ }\ }\ ut_start(__FILE__, __LINE__, __func__, argc, argv, __VA_ARGS__) #endif /* indicate the start of the test */ #define STARTW(argc, argv, ...)\ ut_startW(__FILE__, __LINE__, __func__, argc, argv, __VA_ARGS__) /* normal exit from test */ #ifndef _WIN32 #define DONE(...)\ ut_done(__FILE__, __LINE__, __func__, __VA_ARGS__) #else #define DONE(...)\ for (int i = argc; i > 0; i--)\ free(argv[i - 1]);\ ut_done(__FILE__, __LINE__, __func__, __VA_ARGS__) #endif #define DONEW(...)\ ut_done(__FILE__, __LINE__, __func__, __VA_ARGS__) #define END(ret, ...)\ ut_end(__FILE__, __LINE__, __func__, ret) /* fatal error detected */ #define UT_FATAL(...)\ ut_fatal(__FILE__, __LINE__, __func__, __VA_ARGS__) /* normal output */ #define UT_OUT(...)\ ut_out(__FILE__, __LINE__, __func__, __VA_ARGS__) /* error output */ #define UT_ERR(...)\ ut_err(__FILE__, __LINE__, __func__, __VA_ARGS__) /* * assertions... */ /* assert a condition is true at runtime */ #define UT_ASSERT_rt(cnd)\ ((void)((cnd) || (ut_fatal(__FILE__, __LINE__, __func__,\ "assertion failure: %s", #cnd), 0))) /* assertion with extra info printed if assertion fails at runtime */ #define UT_ASSERTinfo_rt(cnd, info) \ ((void)((cnd) || (ut_fatal(__FILE__, __LINE__, __func__,\ "assertion failure: %s (%s)", #cnd, info), 0))) /* assert two integer values are equal at runtime */ #define UT_ASSERTeq_rt(lhs, rhs)\ ((void)(((lhs) == (rhs)) || (ut_fatal(__FILE__, __LINE__, __func__,\ "assertion failure: %s (0x%llx) == %s (0x%llx)", #lhs,\ (unsigned long long)(lhs), #rhs, (unsigned long long)(rhs)), 0))) /* assert two integer values are not equal at runtime */ #define UT_ASSERTne_rt(lhs, rhs)\ ((void)(((lhs) != (rhs)) || (ut_fatal(__FILE__, __LINE__, __func__,\ "assertion failure: %s (0x%llx) != %s (0x%llx)", #lhs,\ (unsigned long long)(lhs), #rhs, (unsigned long long)(rhs)), 0))) #if defined(__CHECKER__) #define UT_COMPILE_ERROR_ON(cond) #define UT_ASSERT_COMPILE_ERROR_ON(cond) #elif defined(_MSC_VER) #define UT_COMPILE_ERROR_ON(cond) C_ASSERT(!(cond)) /* XXX - can't be done with C_ASSERT() unless we have __builtin_constant_p() */ #define UT_ASSERT_COMPILE_ERROR_ON(cond) (void)(cond) #else #define UT_COMPILE_ERROR_ON(cond) ((void)sizeof(char[(cond) ? -1 : 1])) #ifndef __cplusplus #define UT_ASSERT_COMPILE_ERROR_ON(cond) UT_COMPILE_ERROR_ON(cond) #else /* __cplusplus */ /* * XXX - workaround for https://github.com/pmem/issues/issues/189 */ #define UT_ASSERT_COMPILE_ERROR_ON(cond) UT_ASSERT_rt(!(cond)) #endif /* __cplusplus */ #endif /* _MSC_VER */ /* assert a condition is true */ #define UT_ASSERT(cnd)\ do {\ /*\ * Detect useless asserts on always true expression. Please use\ * UT_COMPILE_ERROR_ON(!cnd) or UT_ASSERT_rt(cnd) in such\ * cases.\ */\ if (__builtin_constant_p(cnd))\ UT_ASSERT_COMPILE_ERROR_ON(cnd);\ UT_ASSERT_rt(cnd);\ } while (0) /* assertion with extra info printed if assertion fails */ #define UT_ASSERTinfo(cnd, info) \ do {\ /* See comment in UT_ASSERT. */\ if (__builtin_constant_p(cnd))\ UT_ASSERT_COMPILE_ERROR_ON(cnd);\ UT_ASSERTinfo_rt(cnd, info);\ } while (0) /* assert two integer values are equal */ #define UT_ASSERTeq(lhs, rhs)\ do {\ /* See comment in UT_ASSERT. */\ if (__builtin_constant_p(lhs) && __builtin_constant_p(rhs))\ UT_ASSERT_COMPILE_ERROR_ON((lhs) == (rhs));\ UT_ASSERTeq_rt(lhs, rhs);\ } while (0) /* assert two integer values are not equal */ #define UT_ASSERTne(lhs, rhs)\ do {\ /* See comment in UT_ASSERT. */\ if (__builtin_constant_p(lhs) && __builtin_constant_p(rhs))\ UT_ASSERT_COMPILE_ERROR_ON((lhs) != (rhs));\ UT_ASSERTne_rt(lhs, rhs);\ } while (0) /* assert pointer is fits range of [start, start + size) */ #define UT_ASSERTrange(ptr, start, size)\ ((void)(((uintptr_t)(ptr) >= (uintptr_t)(start) &&\ (uintptr_t)(ptr) < (uintptr_t)(start) + (uintptr_t)(size)) ||\ (ut_fatal(__FILE__, __LINE__, __func__,\ "assert failure: %s (%p) is outside range [%s (%p), %s (%p))", #ptr,\ (void *)(ptr), #start, (void *)(start), #start"+"#size,\ (void *)((uintptr_t)(start) + (uintptr_t)(size))), 0))) /* * memory allocation... */ void *ut_malloc(const char *file, int line, const char *func, size_t size); void *ut_calloc(const char *file, int line, const char *func, size_t nmemb, size_t size); void ut_free(const char *file, int line, const char *func, void *ptr); void ut_aligned_free(const char *file, int line, const char *func, void *ptr); void *ut_realloc(const char *file, int line, const char *func, void *ptr, size_t size); char *ut_strdup(const char *file, int line, const char *func, const char *str); void *ut_pagealignmalloc(const char *file, int line, const char *func, size_t size); void *ut_memalign(const char *file, int line, const char *func, size_t alignment, size_t size); void *ut_mmap_anon_aligned(const char *file, int line, const char *func, size_t alignment, size_t size); int ut_munmap_anon_aligned(const char *file, int line, const char *func, void *start, size_t size); /* a malloc() that can't return NULL */ #define MALLOC(size)\ ut_malloc(__FILE__, __LINE__, __func__, size) /* a calloc() that can't return NULL */ #define CALLOC(nmemb, size)\ ut_calloc(__FILE__, __LINE__, __func__, nmemb, size) /* a malloc() of zeroed memory */ #define ZALLOC(size)\ ut_calloc(__FILE__, __LINE__, __func__, 1, size) #define FREE(ptr)\ ut_free(__FILE__, __LINE__, __func__, ptr) #define ALIGNED_FREE(ptr)\ ut_aligned_free(__FILE__, __LINE__, __func__, ptr) /* a realloc() that can't return NULL */ #define REALLOC(ptr, size)\ ut_realloc(__FILE__, __LINE__, __func__, ptr, size) /* a strdup() that can't return NULL */ #define STRDUP(str)\ ut_strdup(__FILE__, __LINE__, __func__, str) /* a malloc() that only returns page aligned memory */ #define PAGEALIGNMALLOC(size)\ ut_pagealignmalloc(__FILE__, __LINE__, __func__, size) /* a malloc() that returns memory with given alignment */ #define MEMALIGN(alignment, size)\ ut_memalign(__FILE__, __LINE__, __func__, alignment, size) /* * A mmap() that returns anonymous memory with given alignment and guard * pages. */ #define MMAP_ANON_ALIGNED(size, alignment)\ ut_mmap_anon_aligned(__FILE__, __LINE__, __func__, alignment, size) #define MUNMAP_ANON_ALIGNED(start, size)\ ut_munmap_anon_aligned(__FILE__, __LINE__, __func__, start, size) /* * file operations */ int ut_open(const char *file, int line, const char *func, const char *path, int flags, ...); int ut_wopen(const char *file, int line, const char *func, const wchar_t *path, int flags, ...); int ut_close(const char *file, int line, const char *func, int fd); FILE *ut_fopen(const char *file, int line, const char *func, const char *path, const char *mode); int ut_fclose(const char *file, int line, const char *func, FILE *stream); int ut_unlink(const char *file, int line, const char *func, const char *path); size_t ut_write(const char *file, int line, const char *func, int fd, const void *buf, size_t len); size_t ut_read(const char *file, int line, const char *func, int fd, void *buf, size_t len); os_off_t ut_lseek(const char *file, int line, const char *func, int fd, os_off_t offset, int whence); int ut_posix_fallocate(const char *file, int line, const char *func, int fd, os_off_t offset, os_off_t len); int ut_stat(const char *file, int line, const char *func, const char *path, os_stat_t *st_bufp); int ut_statW(const char *file, int line, const char *func, const wchar_t *path, os_stat_t *st_bufp); int ut_fstat(const char *file, int line, const char *func, int fd, os_stat_t *st_bufp); void *ut_mmap(const char *file, int line, const char *func, void *addr, size_t length, int prot, int flags, int fd, os_off_t offset); int ut_munmap(const char *file, int line, const char *func, void *addr, size_t length); int ut_mprotect(const char *file, int line, const char *func, void *addr, size_t len, int prot); int ut_ftruncate(const char *file, int line, const char *func, int fd, os_off_t length); long long ut_strtoll(const char *file, int line, const char *func, const char *nptr, char **endptr, int base); long ut_strtol(const char *file, int line, const char *func, const char *nptr, char **endptr, int base); int ut_strtoi(const char *file, int line, const char *func, const char *nptr, char **endptr, int base); unsigned long long ut_strtoull(const char *file, int line, const char *func, const char *nptr, char **endptr, int base); unsigned long ut_strtoul(const char *file, int line, const char *func, const char *nptr, char **endptr, int base); unsigned ut_strtou(const char *file, int line, const char *func, const char *nptr, char **endptr, int base); int ut_snprintf(const char *file, int line, const char *func, char *str, size_t size, const char *format, ...); /* an open() that can't return < 0 */ #define OPEN(path, ...)\ ut_open(__FILE__, __LINE__, __func__, path, __VA_ARGS__) /* a _wopen() that can't return < 0 */ #define WOPEN(path, ...)\ ut_wopen(__FILE__, __LINE__, __func__, path, __VA_ARGS__) /* a close() that can't return -1 */ #define CLOSE(fd)\ ut_close(__FILE__, __LINE__, __func__, fd) /* an fopen() that can't return != 0 */ #define FOPEN(path, mode)\ ut_fopen(__FILE__, __LINE__, __func__, path, mode) /* a fclose() that can't return != 0 */ #define FCLOSE(stream)\ ut_fclose(__FILE__, __LINE__, __func__, stream) /* an unlink() that can't return -1 */ #define UNLINK(path)\ ut_unlink(__FILE__, __LINE__, __func__, path) /* a write() that can't return -1 */ #define WRITE(fd, buf, len)\ ut_write(__FILE__, __LINE__, __func__, fd, buf, len) /* a read() that can't return -1 */ #define READ(fd, buf, len)\ ut_read(__FILE__, __LINE__, __func__, fd, buf, len) /* a lseek() that can't return -1 */ #define LSEEK(fd, offset, whence)\ ut_lseek(__FILE__, __LINE__, __func__, fd, offset, whence) #define POSIX_FALLOCATE(fd, off, len)\ ut_posix_fallocate(__FILE__, __LINE__, __func__, fd, off, len) #define FSTAT(fd, st_bufp)\ ut_fstat(__FILE__, __LINE__, __func__, fd, st_bufp) /* a mmap() that can't return MAP_FAILED */ #define MMAP(addr, len, prot, flags, fd, offset)\ ut_mmap(__FILE__, __LINE__, __func__, addr, len, prot, flags, fd, offset); /* a munmap() that can't return -1 */ #define MUNMAP(addr, length)\ ut_munmap(__FILE__, __LINE__, __func__, addr, length); /* a mprotect() that can't return -1 */ #define MPROTECT(addr, len, prot)\ ut_mprotect(__FILE__, __LINE__, __func__, addr, len, prot); #define STAT(path, st_bufp)\ ut_stat(__FILE__, __LINE__, __func__, path, st_bufp) #define STATW(path, st_bufp)\ ut_statW(__FILE__, __LINE__, __func__, path, st_bufp) #define FTRUNCATE(fd, length)\ ut_ftruncate(__FILE__, __LINE__, __func__, fd, length) #define ATOU(nptr) STRTOU(nptr, NULL, 10) #define ATOUL(nptr) STRTOUL(nptr, NULL, 10) #define ATOULL(nptr) STRTOULL(nptr, NULL, 10) #define ATOI(nptr) STRTOI(nptr, NULL, 10) #define ATOL(nptr) STRTOL(nptr, NULL, 10) #define ATOLL(nptr) STRTOLL(nptr, NULL, 10) #define STRTOULL(nptr, endptr, base)\ ut_strtoull(__FILE__, __LINE__, __func__, nptr, endptr, base) #define STRTOUL(nptr, endptr, base)\ ut_strtoul(__FILE__, __LINE__, __func__, nptr, endptr, base) #define STRTOL(nptr, endptr, base)\ ut_strtol(__FILE__, __LINE__, __func__, nptr, endptr, base) #define STRTOLL(nptr, endptr, base)\ ut_strtoll(__FILE__, __LINE__, __func__, nptr, endptr, base) #define STRTOU(nptr, endptr, base)\ ut_strtou(__FILE__, __LINE__, __func__, nptr, endptr, base) #define STRTOI(nptr, endptr, base)\ ut_strtoi(__FILE__, __LINE__, __func__, nptr, endptr, base) #define SNPRINTF(str, size, format, ...) \ ut_snprintf(__FILE__, __LINE__, __func__, \ str, size, format, __VA_ARGS__) #ifndef _WIN32 #define ut_jmp_buf_t sigjmp_buf #define ut_siglongjmp(b) siglongjmp(b, 1) #define ut_sigsetjmp(b) sigsetjmp(b, 1) #else #define ut_jmp_buf_t jmp_buf #define ut_siglongjmp(b) longjmp(b, 1) #define ut_sigsetjmp(b) setjmp(b) #endif void ut_suppress_errmsg(void); void ut_unsuppress_errmsg(void); void ut_suppress_crt_assert(void); void ut_unsuppress_crt_assert(void); /* * signals... */ int ut_sigaction(const char *file, int line, const char *func, int signum, struct sigaction *act, struct sigaction *oldact); /* a sigaction() that can't return an error */ #define SIGACTION(signum, act, oldact)\ ut_sigaction(__FILE__, __LINE__, __func__, signum, act, oldact) /* * pthreads... */ int ut_thread_create(const char *file, int line, const char *func, os_thread_t *__restrict thread, const os_thread_attr_t *__restrict attr, void *(*start_routine)(void *), void *__restrict arg); int ut_thread_join(const char *file, int line, const char *func, os_thread_t *thread, void **value_ptr); /* a os_thread_create() that can't return an error */ #define THREAD_CREATE(thread, attr, start_routine, arg)\ ut_thread_create(__FILE__, __LINE__, __func__,\ thread, attr, start_routine, arg) /* a os_thread_join() that can't return an error */ #define THREAD_JOIN(thread, value_ptr)\ ut_thread_join(__FILE__, __LINE__, __func__, thread, value_ptr) /* * processes... */ #ifdef _WIN32 intptr_t ut_spawnv(int argc, const char **argv, ...); #endif /* * mocks... * * NOTE: On Linux, function mocking is implemented using wrapper functions. * See "--wrap" option of the GNU linker. * There is no such feature in VC++, so on Windows we do the mocking at * compile time, by redefining symbol names: * - all the references to <symbol> are replaced with <__wrap_symbol> * in all the compilation units, except the one where the <symbol> is * defined and the test source file * - the original definition of <symbol> is replaced with <__real_symbol> * - a wrapper function <__wrap_symbol> must be defined in the test program * (it may still call the original function via <__real_symbol>) * Such solution seems to be sufficient for the purpose of our tests, even * though it has some limitations. I.e. it does no work well with malloc/free, * so to wrap the system memory allocator functions, we use the built-in * feature of all the PMDK libraries, allowing to override default memory * allocator with the custom one. */ #ifndef _WIN32 #define _FUNC_REAL_DECL(name, ret_type, ...)\ ret_type __real_##name(__VA_ARGS__) __attribute__((unused)); #else #define _FUNC_REAL_DECL(name, ret_type, ...)\ ret_type name(__VA_ARGS__); #endif #ifndef _WIN32 #define _FUNC_REAL(name)\ __real_##name #else #define _FUNC_REAL(name)\ name #endif #define RCOUNTER(name)\ _rcounter##name #define FUNC_MOCK_RCOUNTER_SET(name, val)\ RCOUNTER(name) = val; #define FUNC_MOCK(name, ret_type, ...)\ _FUNC_REAL_DECL(name, ret_type, ##__VA_ARGS__)\ static unsigned RCOUNTER(name);\ ret_type __wrap_##name(__VA_ARGS__);\ ret_type __wrap_##name(__VA_ARGS__) {\ switch (util_fetch_and_add32(&RCOUNTER(name), 1)) { #define FUNC_MOCK_DLLIMPORT(name, ret_type, ...)\ __declspec(dllimport) _FUNC_REAL_DECL(name, ret_type, ##__VA_ARGS__)\ static unsigned RCOUNTER(name);\ ret_type __wrap_##name(__VA_ARGS__);\ ret_type __wrap_##name(__VA_ARGS__) {\ switch (util_fetch_and_add32(&RCOUNTER(name), 1)) { #define FUNC_MOCK_END\ }} #define FUNC_MOCK_RUN(run)\ case run: #define FUNC_MOCK_RUN_DEFAULT\ default: #define FUNC_MOCK_RUN_RET(run, ret)\ case run: return (ret); #define FUNC_MOCK_RUN_RET_DEFAULT_REAL(name, ...)\ default: return _FUNC_REAL(name)(__VA_ARGS__); #define FUNC_MOCK_RUN_RET_DEFAULT(ret)\ default: return (ret); #define FUNC_MOCK_RET_ALWAYS(name, ret_type, ret, ...)\ FUNC_MOCK(name, ret_type, __VA_ARGS__)\ FUNC_MOCK_RUN_RET_DEFAULT(ret);\ FUNC_MOCK_END #define FUNC_MOCK_RET_ALWAYS_VOID(name, ...)\ FUNC_MOCK(name, void, __VA_ARGS__)\ default: return;\ FUNC_MOCK_END extern unsigned long Ut_pagesize; extern unsigned long long Ut_mmap_align; extern os_mutex_t Sigactions_lock; void ut_dump_backtrace(void); void ut_sighandler(int); void ut_register_sighandlers(void); uint16_t ut_checksum(uint8_t *addr, size_t len); char *ut_toUTF8(const wchar_t *wstr); wchar_t *ut_toUTF16(const char *wstr); struct test_case { const char *name; int (*func)(const struct test_case *tc, int argc, char *argv[]); }; /* * get_tc -- return test case of specified name */ static inline const struct test_case * get_tc(const char *name, const struct test_case *test_cases, size_t ntests) { for (size_t i = 0; i < ntests; i++) { if (strcmp(name, test_cases[i].name) == 0) return &test_cases[i]; } return NULL; } static inline void TEST_CASE_PROCESS(int argc, char *argv[], const struct test_case *test_cases, size_t ntests) { if (argc < 2) UT_FATAL("usage: %s <test case> [<args>]", argv[0]); for (int i = 1; i < argc; i++) { char *str_test = argv[i]; const int args_off = i + 1; const struct test_case *tc = get_tc(str_test, test_cases, ntests); if (!tc) UT_FATAL("unknown test case -- '%s'", str_test); int ret = tc->func(tc, argc - args_off, &argv[args_off]); if (ret < 0) UT_FATAL("test return value cannot be negative"); i += ret; } } #define TEST_CASE_DECLARE(_name)\ int \ _name(const struct test_case *tc, int argc, char *argv[]) #define TEST_CASE(_name)\ {\ .name = #_name,\ .func = (_name),\ } #define STR(x) #x #define ASSERT_ALIGNED_BEGIN(type) do {\ size_t off = 0;\ const char *last = "(none)";\ type t; #define ASSERT_ALIGNED_FIELD(type, field) do {\ if (offsetof(type, field) != off)\ UT_FATAL("%s: padding, missing field or fields not in order between "\ "'%s' and '%s' -- offset %lu, real offset %lu",\ STR(type), last, STR(field), off, offsetof(type, field));\ off += sizeof(t.field);\ last = STR(field);\ } while (0) #define ASSERT_FIELD_SIZE(field, size) do {\ UT_COMPILE_ERROR_ON(size != sizeof(t.field));\ } while (0) #define ASSERT_OFFSET_CHECKPOINT(type, checkpoint) do {\ if (off != checkpoint)\ UT_FATAL("%s: violated offset checkpoint -- "\ "checkpoint %lu, real offset %lu",\ STR(type), checkpoint, off);\ } while (0) #define ASSERT_ALIGNED_CHECK(type)\ if (off != sizeof(type))\ UT_FATAL("%s: missing field or padding after '%s': "\ "sizeof(%s) = %lu, fields size = %lu",\ STR(type), last, STR(type), sizeof(type), off);\ } while (0) /* * AddressSanitizer */ #ifdef __clang__ #if __has_feature(address_sanitizer) #define UT_DEFINE_ASAN_POISON #endif #else #ifdef __SANITIZE_ADDRESS__ #define UT_DEFINE_ASAN_POISON #endif #endif #ifdef UT_DEFINE_ASAN_POISON void __asan_poison_memory_region(void const volatile *addr, size_t size); void __asan_unpoison_memory_region(void const volatile *addr, size_t size); #define ASAN_POISON_MEMORY_REGION(addr, size) \ __asan_poison_memory_region((addr), (size)) #define ASAN_UNPOISON_MEMORY_REGION(addr, size) \ __asan_unpoison_memory_region((addr), (size)) #else #define ASAN_POISON_MEMORY_REGION(addr, size) \ ((void)(addr), (void)(size)) #define ASAN_UNPOISON_MEMORY_REGION(addr, size) \ ((void)(addr), (void)(size)) #endif #ifdef __cplusplus } #endif #endif /* unittest.h */
23,907
29.769627
79
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/unittest/ut_fh.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * ut_fh.c -- implementation of OS-independent file handle / file descriptor * interface */ /* for O_TMPFILE */ #define _GNU_SOURCE #include <fcntl.h> #include "ut_fh.h" #include "unittest.h" struct FHandle { int fd; #ifdef _WIN32 HANDLE h; #endif enum file_handle_type type; }; #ifdef _WIN32 #define HIDWORD(x) ((DWORD)((x) >> 32)) #define LODWORD(x) ((DWORD)((x) & 0xFFFFFFFF)) #endif static void check_invalid_flags(const char *file, int line, const char *func, int flags) { if ((flags & FH_EXCL) && !(flags & FH_CREAT)) { ut_fatal(file, line, func, "FH_EXCL without FH_CREAT is meaningless"); } if ((flags & FH_TRUNC) && (flags & FH_CREAT)) { /* because Windows doesn't support both */ ut_fatal(file, line, func, "FH_TRUNC with FH_CREAT is forbidden"); } } static int ut_fh_open_fd(const char *file, int line, const char *func, const char *path, int flags, mode_t mode) { int sflags = 0; check_invalid_flags(file, line, func, flags); if ((flags & (FH_CREAT | FH_EXCL)) == (FH_CREAT | FH_EXCL)) { flags &= ~(FH_CREAT | FH_EXCL); sflags |= O_CREAT | O_EXCL; } else if (flags & FH_CREAT) { flags &= ~FH_CREAT; sflags |= O_CREAT; /* Windows version doesn't support both O_TRUNC and O_CREAT */ } else if (flags & FH_TRUNC) { flags &= ~FH_TRUNC; sflags |= O_TRUNC; } int acc = flags & FH_ACCMODE; /* Linux version does not have FH_EXEC equivalent */ if ((acc & FH_WRITE) && (acc & FH_READ)) sflags |= O_RDWR; else if (acc & FH_WRITE) sflags |= O_WRONLY; else if (acc & FH_READ) sflags |= O_RDONLY; else ut_fatal(file, line, func, "unknown access mode %d", acc); flags &= ~FH_ACCMODE; if (flags & FH_DIRECTORY) { #ifdef _WIN32 ut_fatal(file, line, func, "FH_DIRECTORY is not supported on Windows using FD interface"); #else flags &= ~FH_DIRECTORY; sflags |= O_DIRECTORY; #endif } if (flags & FH_TMPFILE) { #ifdef O_TMPFILE flags &= ~FH_TMPFILE; sflags |= O_TMPFILE; #else ut_fatal(file, line, func, "FH_TMPFILE is not supported on this system for file descriptors"); #endif } if (flags) ut_fatal(file, line, func, "unsupported flag(s) 0%o", flags); return ut_open(file, line, func, path, sflags, mode); } #ifdef _WIN32 static HANDLE ut_fh_open_handle(const char *file, int line, const char *func, const char *path, int flags, mode_t mode) { DWORD dwDesiredAccess; /* do not allow delete, read or write from another process */ DWORD dwShareMode = 0; LPSECURITY_ATTRIBUTES lpSecurityAttributes = NULL; DWORD dwCreationDisposition; DWORD dwFlagsAndAttributes = FILE_ATTRIBUTE_NORMAL; HANDLE hTemplateFile = NULL; /* XXX sometimes doesn't work, ERROR_ACCESS_DENIED on AppVeyor */ #if 0 /* * FILE_FLAG_DELETE_ON_CLOSE needs a real file (FH_CREAT) * If it already exists refuse to use it (FH_EXCL), because this means * something weird is going on (either there's another process with * the same file opened or FILE_FLAG_DELETE_ON_CLOSE didn't actually * delete the file on close) */ if (flags & FH_TMPFILE) flags |= FH_CREAT | FH_EXCL; #else if (flags & FH_TMPFILE) ut_fatal(file, line, func, "FH_TMPFILE is not supported for file handles"); #endif check_invalid_flags(file, line, func, flags); /* only write permission can be taken out on Windows */ if (!(mode & _S_IWRITE)) dwFlagsAndAttributes |= FILE_ATTRIBUTE_READONLY; if ((flags & (FH_CREAT | FH_EXCL)) == (FH_CREAT | FH_EXCL)) { flags &= ~(FH_CREAT | FH_EXCL); dwCreationDisposition = CREATE_NEW; } else if (flags & FH_CREAT) { flags &= ~FH_CREAT; dwCreationDisposition = OPEN_ALWAYS; } else if (flags & FH_TRUNC) { flags &= ~FH_TRUNC; dwCreationDisposition = TRUNCATE_EXISTING; } else { dwCreationDisposition = OPEN_EXISTING; } int acc = flags & FH_ACCMODE; dwDesiredAccess = 0; if (acc & FH_READ) { dwDesiredAccess |= GENERIC_READ; acc &= ~FH_READ; } if (acc & FH_WRITE) { dwDesiredAccess |= GENERIC_WRITE; acc &= ~FH_WRITE; } if (acc & FH_EXEC) { dwDesiredAccess |= GENERIC_EXECUTE; acc &= ~FH_EXEC; } if (acc) ut_fatal(file, line, func, "unknown access mode %d", acc); flags &= ~FH_ACCMODE; if (flags & FH_DIRECTORY) { flags &= ~FH_DIRECTORY; /* GJ MS */ dwFlagsAndAttributes |= FILE_FLAG_BACKUP_SEMANTICS; } char *full_path = NULL; if (flags & FH_TMPFILE) { flags &= ~FH_TMPFILE; dwFlagsAndAttributes |= FILE_FLAG_DELETE_ON_CLOSE; /* * FILE_FLAG_DELETE_ON_CLOSE needs a real file, * not a directory */ full_path = MALLOC(strlen(path) + 1 + strlen("UT_FH_TMPFILE") + 1); sprintf(full_path, "%s\\UT_FH_TMPFILE", path); path = full_path; } if (flags) ut_fatal(file, line, func, "unsupported flag(s) 0%o", flags); wchar_t *wpath = util_toUTF16(path); if (wpath == NULL) ut_fatal(file, line, func, "conversion to utf16 failed"); HANDLE h = CreateFileW(wpath, dwDesiredAccess, dwShareMode, lpSecurityAttributes, dwCreationDisposition, dwFlagsAndAttributes, hTemplateFile); util_free_UTF16(wpath); if (h == INVALID_HANDLE_VALUE) { ut_fatal(file, line, func, "opening file %s failed: %d", path, GetLastError()); } if (full_path) free(full_path); return h; } #endif struct FHandle * ut_fh_open(const char *file, int line, const char *func, enum file_handle_type type, const char *path, int flags, ...) { struct FHandle *f = MALLOC(sizeof(*f)); mode_t mode = 0; va_list ap; va_start(ap, flags); if ((flags & FH_CREAT) || (flags & FH_TMPFILE)) mode = va_arg(ap, mode_t); va_end(ap); f->type = type; if (type == FH_FD) { f->fd = ut_fh_open_fd(file, line, func, path, flags, mode); } else if (type == FH_HANDLE) { #ifdef _WIN32 f->h = ut_fh_open_handle(file, line, func, path, flags, mode); #else ut_fatal(file, line, func, "FH_HANDLE not supported on !Windows"); #endif } else { ut_fatal(file, line, func, "unknown type value %d", type); } return f; } void ut_fh_truncate(const char *file, int line, const char *func, struct FHandle *f, os_off_t length) { if (f->type == FH_FD) { ut_ftruncate(file, line, func, f->fd, length); } else if (f->type == FH_HANDLE) { #ifdef _WIN32 LONG low = LODWORD(length); LONG high = HIDWORD(length); if (SetFilePointer(f->h, low, &high, FILE_BEGIN) == INVALID_SET_FILE_POINTER && GetLastError() != ERROR_SUCCESS) { ut_fatal(file, line, func, "SetFilePointer failed: %d", GetLastError()); } if (SetEndOfFile(f->h) == 0) { ut_fatal(file, line, func, "SetEndOfFile failed: %d", GetLastError()); } #else ut_fatal(file, line, func, "FH_HANDLE not supported on !Windows"); #endif } else { ut_fatal(file, line, func, "unknown type value %d", f->type); } } void ut_fh_close(const char *file, int line, const char *func, struct FHandle *f) { if (f->type == FH_FD) { CLOSE(f->fd); } else if (f->type == FH_HANDLE) { #ifdef _WIN32 CloseHandle(f->h); #else ut_fatal(file, line, func, "FH_HANDLE not supported on !Windows"); #endif } else { ut_fatal(file, line, func, "unknown type value %d", f->type); } memset(f, 0, sizeof(*f)); FREE(f); } int ut_fh_get_fd(const char *file, int line, const char *func, struct FHandle *f) { if (f->type == FH_FD) return f->fd; ut_fatal(file, line, func, "requested file descriptor on FHandle that doesn't contain it"); } #ifdef _WIN32 HANDLE ut_fh_get_handle(const char *file, int line, const char *func, struct FHandle *f) { if (f->type == FH_HANDLE) return f->h; ut_fatal(file, line, func, "requested file handle on FHandle that doesn't contain it"); } #endif enum file_handle_type ut_fh_get_handle_type(struct FHandle *fh) { return fh->type; }
7,734
22.158683
77
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/unittest/ut_pmem2_config.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * ut_pmem2_config.h -- utility helper functions for libpmem2 config tests */ #ifndef UT_PMEM2_CONFIG_H #define UT_PMEM2_CONFIG_H 1 #include "ut_fh.h" /* a pmem2_config_new() that can't return NULL */ #define PMEM2_CONFIG_NEW(cfg) \ ut_pmem2_config_new(__FILE__, __LINE__, __func__, cfg) /* a pmem2_config_set_required_store_granularity() doesn't return an error */ #define PMEM2_CONFIG_SET_GRANULARITY(cfg, g) \ ut_pmem2_config_set_required_store_granularity \ (__FILE__, __LINE__, __func__, cfg, g) /* a pmem2_config_delete() that can't return NULL */ #define PMEM2_CONFIG_DELETE(cfg) \ ut_pmem2_config_delete(__FILE__, __LINE__, __func__, cfg) void ut_pmem2_config_new(const char *file, int line, const char *func, struct pmem2_config **cfg); void ut_pmem2_config_set_required_store_granularity(const char *file, int line, const char *func, struct pmem2_config *cfg, enum pmem2_granularity g); void ut_pmem2_config_delete(const char *file, int line, const char *func, struct pmem2_config **cfg); #endif /* UT_PMEM2_CONFIG_H */
1,152
30.162162
77
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/unittest/ut_alloc.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * ut_alloc.c -- unit test memory allocation routines */ #include "unittest.h" /* * ut_malloc -- a malloc that cannot return NULL */ void * ut_malloc(const char *file, int line, const char *func, size_t size) { void *retval = malloc(size); if (retval == NULL) ut_fatal(file, line, func, "cannot malloc %zu bytes", size); return retval; } /* * ut_calloc -- a calloc that cannot return NULL */ void * ut_calloc(const char *file, int line, const char *func, size_t nmemb, size_t size) { void *retval = calloc(nmemb, size); if (retval == NULL) ut_fatal(file, line, func, "cannot calloc %zu bytes", size); return retval; } /* * ut_free -- wrapper for free * * technically we don't need to wrap free since there's no return to * check. using this wrapper to add memory allocation tracking later. */ void ut_free(const char *file, int line, const char *func, void *ptr) { free(ptr); } /* * ut_aligned_free -- wrapper for aligned memory free */ void ut_aligned_free(const char *file, int line, const char *func, void *ptr) { #ifndef _WIN32 free(ptr); #else _aligned_free(ptr); #endif } /* * ut_realloc -- a realloc that cannot return NULL */ void * ut_realloc(const char *file, int line, const char *func, void *ptr, size_t size) { void *retval = realloc(ptr, size); if (retval == NULL) ut_fatal(file, line, func, "cannot realloc %zu bytes", size); return retval; } /* * ut_strdup -- a strdup that cannot return NULL */ char * ut_strdup(const char *file, int line, const char *func, const char *str) { char *retval = strdup(str); if (retval == NULL) ut_fatal(file, line, func, "cannot strdup %zu bytes", strlen(str)); return retval; } /* * ut_memalign -- like malloc but page-aligned memory */ void * ut_memalign(const char *file, int line, const char *func, size_t alignment, size_t size) { void *retval; #ifndef _WIN32 if ((errno = posix_memalign(&retval, alignment, size)) != 0) ut_fatal(file, line, func, "!memalign %zu bytes (%zu alignment)", size, alignment); #else retval = _aligned_malloc(size, alignment); if (!retval) { ut_fatal(file, line, func, "!memalign %zu bytes (%zu alignment)", size, alignment); } #endif return retval; } /* * ut_pagealignmalloc -- like malloc but page-aligned memory */ void * ut_pagealignmalloc(const char *file, int line, const char *func, size_t size) { return ut_memalign(file, line, func, (size_t)Ut_pagesize, size); } /* * ut_mmap_anon_aligned -- mmaps anonymous memory with specified (power of two, * multiple of page size) alignment and adds guard * pages around it */ void * ut_mmap_anon_aligned(const char *file, int line, const char *func, size_t alignment, size_t size) { char *d, *d_aligned; uintptr_t di, di_aligned; size_t sz; if (alignment == 0) alignment = Ut_mmap_align; /* alignment must be a multiple of page size */ if (alignment & (Ut_mmap_align - 1)) return NULL; /* power of two */ if (alignment & (alignment - 1)) return NULL; d = ut_mmap(file, line, func, NULL, size + 2 * alignment, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); di = (uintptr_t)d; di_aligned = (di + alignment - 1) & ~(alignment - 1); if (di == di_aligned) di_aligned += alignment; d_aligned = (void *)di_aligned; sz = di_aligned - di; if (sz - Ut_mmap_align) ut_munmap(file, line, func, d, sz - Ut_mmap_align); /* guard page before */ ut_mprotect(file, line, func, d_aligned - Ut_mmap_align, Ut_mmap_align, PROT_NONE); /* guard page after */ ut_mprotect(file, line, func, d_aligned + size, Ut_mmap_align, PROT_NONE); sz = di + size + 2 * alignment - (di_aligned + size) - Ut_mmap_align; if (sz) ut_munmap(file, line, func, d_aligned + size + Ut_mmap_align, sz); return d_aligned; } /* * ut_munmap_anon_aligned -- unmaps anonymous memory allocated by * ut_mmap_anon_aligned */ int ut_munmap_anon_aligned(const char *file, int line, const char *func, void *start, size_t size) { return ut_munmap(file, line, func, (char *)start - Ut_mmap_align, size + 2 * Ut_mmap_align); }
4,238
20.963731
79
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/unittest/ut_pmem2_utils.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ /* * ut_pmem2_utils.c -- utility helper functions for libpmem2 tests */ #include "unittest.h" #include "ut_pmem2_utils.h" /* * ut_pmem2_expect_return -- veryfies error code and prints appropriate * error message in case of error */ void ut_pmem2_expect_return(const char *file, int line, const char *func, int value, int expected) { if (value != expected) { ut_fatal(file, line, func, "unexpected return code (got %d, expected: %d): %s", value, expected, (value == 0 ? "success" : pmem2_errormsg())); } }
608
23.36
73
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/unittest/ut_pmem2_utils.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ /* * ut_pmem2_utils.h -- utility helper functions for libpmem2 tests */ #ifndef UT_PMEM2_UTILS_H #define UT_PMEM2_UTILS_H 1 /* veryfies error code and prints appropriate error message in case of error */ #define UT_PMEM2_EXPECT_RETURN(value, expected) \ ut_pmem2_expect_return(__FILE__, __LINE__, __func__, \ value, expected) void ut_pmem2_expect_return(const char *file, int line, const char *func, int value, int expected); #endif /* UT_PMEM2_UTILS_H */
552
26.65
79
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/unittest/ut_fh.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * ut_fh.h -- OS-independent file handle / file descriptor interface */ #ifndef UT_FH_H #define UT_FH_H #include "os.h" struct FHandle; enum file_handle_type { FH_FD, FH_HANDLE }; #define FH_ACCMODE (7) #define FH_READ (1 << 0) #define FH_WRITE (1 << 1) #define FH_RDWR (FH_READ | FH_WRITE) #define FH_EXEC (1 << 2) #define FH_CREAT (1 << 3) #define FH_EXCL (1 << 4) #define FH_TRUNC (1 << 5) /* needs directory, on Windows it creates publicly visible file */ #define FH_TMPFILE (1 << 6) #define FH_DIRECTORY (1 << 7) #define UT_FH_OPEN(type, path, flags, ...) \ ut_fh_open(__FILE__, __LINE__, __func__, type, path, \ flags, ##__VA_ARGS__) #define UT_FH_TRUNCATE(fhandle, size) \ ut_fh_truncate(__FILE__, __LINE__, __func__, fhandle, size) #define UT_FH_GET_FD(fhandle) \ ut_fh_get_fd(__FILE__, __LINE__, __func__, fhandle) #ifdef _WIN32 #define UT_FH_GET_HANDLE(fhandle) \ ut_fh_get_handle(__FILE__, __LINE__, __func__, fhandle) #endif #define UT_FH_CLOSE(fhandle) \ ut_fh_close(__FILE__, __LINE__, __func__, fhandle) struct FHandle *ut_fh_open(const char *file, int line, const char *func, enum file_handle_type type, const char *path, int flags, ...); void ut_fh_truncate(const char *file, int line, const char *func, struct FHandle *f, os_off_t length); void ut_fh_close(const char *file, int line, const char *func, struct FHandle *f); enum file_handle_type ut_fh_get_handle_type(struct FHandle *fh); int ut_fh_get_fd(const char *file, int line, const char *func, struct FHandle *f); #ifdef _WIN32 HANDLE ut_fh_get_handle(const char *file, int line, const char *func, struct FHandle *f); #endif #endif
1,761
24.536232
72
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/unittest/ut_pmem2_source.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * ut_pmem2_source.h -- utility helper functions for libpmem2 source tests */ #include <libpmem2.h> #include "unittest.h" #include "ut_pmem2_source.h" #include "ut_pmem2_utils.h" /* * ut_pmem2_source_from_fd -- sets fd (cannot fail) */ void ut_pmem2_source_from_fd(const char *file, int line, const char *func, struct pmem2_source **src, int fd) { int ret = pmem2_source_from_fd(src, fd); ut_pmem2_expect_return(file, line, func, ret, 0); } void ut_pmem2_source_from_fh(const char *file, int line, const char *func, struct pmem2_source **src, struct FHandle *f) { enum file_handle_type type = ut_fh_get_handle_type(f); int ret; if (type == FH_FD) { int fd = ut_fh_get_fd(file, line, func, f); #ifdef _WIN32 ret = pmem2_source_from_handle(src, (HANDLE)_get_osfhandle(fd)); #else ret = pmem2_source_from_fd(src, fd); #endif } else if (type == FH_HANDLE) { #ifdef _WIN32 HANDLE h = ut_fh_get_handle(file, line, func, f); ret = pmem2_source_from_handle(src, h); #else ut_fatal(file, line, func, "FH_HANDLE not supported on !Windows"); #endif } else { ut_fatal(file, line, func, "unknown file handle type"); } ut_pmem2_expect_return(file, line, func, ret, 0); } void ut_pmem2_source_alignment(const char *file, int line, const char *func, struct pmem2_source *src, size_t *al) { int ret = pmem2_source_alignment(src, al); ut_pmem2_expect_return(file, line, func, ret, 0); } void ut_pmem2_source_delete(const char *file, int line, const char *func, struct pmem2_source **src) { int ret = pmem2_source_delete(src); ut_pmem2_expect_return(file, line, func, ret, 0); UT_ASSERTeq(*src, NULL); } void ut_pmem2_source_size(const char *file, int line, const char *func, struct pmem2_source *src, size_t *size) { int ret = pmem2_source_size(src, size); ut_pmem2_expect_return(file, line, func, ret, 0); }
1,929
24.064935
74
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/unittest/ut_signal.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * ut_signal.c -- unit test signal operations */ #include "unittest.h" #ifdef _WIN32 /* * On Windows, Access Violation exception does not raise SIGSEGV signal. * The trick is to catch the exception and... call the signal handler. */ /* * Sigactions[] - allows registering more than one signal/exception handler */ static struct sigaction Sigactions[NSIG]; /* * exception_handler -- called for unhandled exceptions */ static LONG CALLBACK exception_handler(_In_ PEXCEPTION_POINTERS ExceptionInfo) { DWORD excode = ExceptionInfo->ExceptionRecord->ExceptionCode; if (excode == EXCEPTION_ACCESS_VIOLATION) Sigactions[SIGSEGV].sa_handler(SIGSEGV); return EXCEPTION_CONTINUE_EXECUTION; } /* * signal_handler_wrapper -- (internal) wrapper for user-defined signal handler * * Before the specified handler function is executed, signal disposition * is reset to SIG_DFL. This wrapper allows to handle subsequent signals * without the need to set the signal disposition again. */ static void signal_handler_wrapper(int signum) { _crt_signal_t retval = signal(signum, signal_handler_wrapper); if (retval == SIG_ERR) UT_FATAL("!signal: %d", signum); if (Sigactions[signum].sa_handler) Sigactions[signum].sa_handler(signum); else UT_FATAL("handler for signal: %d is not defined", signum); } #endif /* * ut_sigaction -- a sigaction that cannot return < 0 */ int ut_sigaction(const char *file, int line, const char *func, int signum, struct sigaction *act, struct sigaction *oldact) { #ifndef _WIN32 int retval = sigaction(signum, act, oldact); if (retval != 0) ut_fatal(file, line, func, "!sigaction: %s", os_strsignal(signum)); return retval; #else UT_ASSERT(signum < NSIG); os_mutex_lock(&Sigactions_lock); if (oldact) *oldact = Sigactions[signum]; if (act) Sigactions[signum] = *act; os_mutex_unlock(&Sigactions_lock); if (signum == SIGABRT) { ut_suppress_errmsg(); } if (signum == SIGSEGV) { AddVectoredExceptionHandler(0, exception_handler); } _crt_signal_t retval = signal(signum, signal_handler_wrapper); if (retval == SIG_ERR) ut_fatal(file, line, func, "!signal: %d", signum); if (oldact != NULL) oldact->sa_handler = retval; return 0; #endif }
2,306
23.806452
79
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/unittest/ut_pthread.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * ut_pthread.c -- unit test wrappers for pthread routines */ #include "unittest.h" /* * ut_thread_create -- a os_thread_create that cannot return an error */ int ut_thread_create(const char *file, int line, const char *func, os_thread_t *__restrict thread, const os_thread_attr_t *__restrict attr, void *(*start_routine)(void *), void *__restrict arg) { if ((errno = os_thread_create(thread, attr, start_routine, arg)) != 0) ut_fatal(file, line, func, "!os_thread_create"); return 0; } /* * ut_thread_join -- a os_thread_join that cannot return an error */ int ut_thread_join(const char *file, int line, const char *func, os_thread_t *thread, void **value_ptr) { if ((errno = os_thread_join(thread, value_ptr)) != 0) ut_fatal(file, line, func, "!os_thread_join"); return 0; }
901
23.378378
71
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/unittest/ut_pmem2_map.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * ut_pmem2_map.h -- utility helper functions for libpmem2 map tests */ #ifndef UT_PMEM2_MAP_H #define UT_PMEM2_MAP_H 1 /* a pmem2_map() that can't return NULL */ #define PMEM2_MAP(cfg, src, map) \ ut_pmem2_map(__FILE__, __LINE__, __func__, cfg, src, map) void ut_pmem2_map(const char *file, int line, const char *func, struct pmem2_config *cfg, struct pmem2_source *src, struct pmem2_map **map); #endif /* UT_PMEM2_MAP_H */
522
25.15
68
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/unittest/ut_pmem2_config.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * ut_pmem2_config.h -- utility helper functions for libpmem2 config tests */ #include <libpmem2.h> #include "unittest.h" #include "ut_pmem2_config.h" #include "ut_pmem2_utils.h" /* * ut_pmem2_config_new -- allocates cfg (cannot fail) */ void ut_pmem2_config_new(const char *file, int line, const char *func, struct pmem2_config **cfg) { int ret = pmem2_config_new(cfg); ut_pmem2_expect_return(file, line, func, ret, 0); UT_ASSERTne(*cfg, NULL); } /* * pmem2_config_set_required_store_granularity -- sets granularity */ void ut_pmem2_config_set_required_store_granularity(const char *file, int line, const char *func, struct pmem2_config *cfg, enum pmem2_granularity g) { int ret = pmem2_config_set_required_store_granularity(cfg, g); ut_pmem2_expect_return(file, line, func, ret, 0); } /* * ut_pmem2_config_delete -- deallocates cfg (cannot fail) */ void ut_pmem2_config_delete(const char *file, int line, const char *func, struct pmem2_config **cfg) { int ret = pmem2_config_delete(cfg); ut_pmem2_expect_return(file, line, func, ret, 0); UT_ASSERTeq(*cfg, NULL); }
1,181
23.122449
74
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/unittest/ut_pmem2_setup_integration.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * ut_pmem2_setup_integration.h -- libpmem2 setup functions using public API * (for integration tests) */ #include <libpmem2.h> #include "ut_pmem2_config.h" #include "ut_pmem2_setup_integration.h" #include "ut_pmem2_source.h" #include "unittest.h" /* * ut_pmem2_prepare_config_integration -- fill pmem2_config in minimal scope */ void ut_pmem2_prepare_config_integration(const char *file, int line, const char *func, struct pmem2_config **cfg, struct pmem2_source **src, int fd, enum pmem2_granularity granularity) { ut_pmem2_config_new(file, line, func, cfg); ut_pmem2_config_set_required_store_granularity(file, line, func, *cfg, granularity); ut_pmem2_source_from_fd(file, line, func, src, fd); }
804
26.758621
76
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/unittest/ut_pmem2_source.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * ut_pmem2_source.h -- utility helper functions for libpmem2 source tests */ #ifndef UT_PMEM2_SOURCE_H #define UT_PMEM2_SOURCE_H 1 #include "ut_fh.h" /* a pmem2_config_set_fd() that can't return NULL */ #define PMEM2_SOURCE_FROM_FD(src, fd) \ ut_pmem2_source_from_fd(__FILE__, __LINE__, __func__, src, fd) /* a pmem2_config_set_fd() that can't return NULL */ #define PMEM2_SOURCE_FROM_FH(src, fh) \ ut_pmem2_source_from_fh(__FILE__, __LINE__, __func__, src, fh) /* a pmem2_source_alignment() that can't return an error */ #define PMEM2_SOURCE_ALIGNMENT(src, al) \ ut_pmem2_source_alignment(__FILE__, __LINE__, __func__, src, al) /* a pmem2_source_delete() that can't return NULL */ #define PMEM2_SOURCE_DELETE(src) \ ut_pmem2_source_delete(__FILE__, __LINE__, __func__, src) /* a pmem2_source_source() that can't return NULL */ #define PMEM2_SOURCE_SIZE(src, size) \ ut_pmem2_source_size(__FILE__, __LINE__, __func__, src, size) void ut_pmem2_source_from_fd(const char *file, int line, const char *func, struct pmem2_source **src, int fd); void ut_pmem2_source_from_fh(const char *file, int line, const char *func, struct pmem2_source **src, struct FHandle *fhandle); void ut_pmem2_source_alignment(const char *file, int line, const char *func, struct pmem2_source *src, size_t *alignment); void ut_pmem2_source_delete(const char *file, int line, const char *func, struct pmem2_source **src); void ut_pmem2_source_size(const char *file, int line, const char *func, struct pmem2_source *src, size_t *size); #endif /* UT_PMEM2_SOURCE_H */
1,667
33.040816
76
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/unittest/ut_pmem2_setup.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * ut_pmem2_setup.h -- libpmem2 setup functions using non-public API * (only for unit tests) */ #include "../../libpmem2/config.h" #include "ut_pmem2_source.h" #include "ut_pmem2_setup.h" #include "unittest.h" /* * ut_pmem2_prepare_config -- fill pmem2_config, this function can not set * the wrong value */ void ut_pmem2_prepare_config(struct pmem2_config *cfg, struct pmem2_source **src, struct FHandle **fh, enum file_handle_type fh_type, const char *file, size_t length, size_t offset, int access) { pmem2_config_init(cfg); cfg->offset = offset; cfg->length = length; cfg->requested_max_granularity = PMEM2_GRANULARITY_PAGE; *fh = UT_FH_OPEN(fh_type, file, access); PMEM2_SOURCE_FROM_FH(src, *fh); }
805
25
76
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/unittest/ut_pmem2_map.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * ut_pmem2_map.h -- utility helper functions for libpmem2 map tests */ #include <libpmem2.h> #include "unittest.h" #include "ut_pmem2_map.h" #include "ut_pmem2_utils.h" /* * ut_pmem2_map -- allocates map (cannot fail) */ void ut_pmem2_map(const char *file, int line, const char *func, struct pmem2_config *cfg, struct pmem2_source *src, struct pmem2_map **map) { int ret = pmem2_map(cfg, src, map); ut_pmem2_expect_return(file, line, func, ret, 0); UT_ASSERTne(*map, NULL); }
572
21.92
68
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/unittest/ut_pmem2_setup_integration.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * ut_pmem2_setup_integration.h -- libpmem2 setup functions using public API * (for integration tests) */ #ifndef UT_PMEM2_SETUP_INTEGRATION_H #define UT_PMEM2_SETUP_INTEGRATION_H 1 #include "ut_fh.h" /* a prepare_config() that can't set wrong value */ #define PMEM2_PREPARE_CONFIG_INTEGRATION(cfg, src, fd, g) \ ut_pmem2_prepare_config_integration( \ __FILE__, __LINE__, __func__, cfg, src, fd, g) void ut_pmem2_prepare_config_integration(const char *file, int line, const char *func, struct pmem2_config **cfg, struct pmem2_source **src, int fd, enum pmem2_granularity granularity); #endif /* UT_PMEM2_SETUP_INTEGRATION_H */
728
29.375
76
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem_movnt_align/pmem_movnt_align.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * pmem_movnt_align.c -- unit test for functions with non-temporal stores * * usage: pmem_movnt_align [C|F|B|S] * * C - pmem_memcpy_persist() * B - pmem_memmove_persist() in backward direction * F - pmem_memmove_persist() in forward direction * S - pmem_memset_persist() */ #include <stdio.h> #include <string.h> #include <unistd.h> #include "libpmem.h" #include "unittest.h" #include "movnt_align_common.h" #define N_BYTES (Ut_pagesize * 2) static int Heavy; static void * pmem_memcpy_persist_wrapper(void *pmemdest, const void *src, size_t len, unsigned flags) { (void) flags; return pmem_memcpy_persist(pmemdest, src, len); } static void * pmem_memcpy_nodrain_wrapper(void *pmemdest, const void *src, size_t len, unsigned flags) { (void) flags; return pmem_memcpy_nodrain(pmemdest, src, len); } static void * pmem_memmove_persist_wrapper(void *pmemdest, const void *src, size_t len, unsigned flags) { (void) flags; return pmem_memmove_persist(pmemdest, src, len); } static void * pmem_memmove_nodrain_wrapper(void *pmemdest, const void *src, size_t len, unsigned flags) { (void) flags; return pmem_memmove_nodrain(pmemdest, src, len); } static void * pmem_memset_persist_wrapper(void *pmemdest, int c, size_t len, unsigned flags) { (void) flags; return pmem_memset_persist(pmemdest, c, len); } static void * pmem_memset_nodrain_wrapper(void *pmemdest, int c, size_t len, unsigned flags) { (void) flags; return pmem_memset_nodrain(pmemdest, c, len); } static void check_memmove_variants(size_t doff, size_t soff, size_t len) { check_memmove(doff, soff, len, pmem_memmove_persist_wrapper, 0); if (!Heavy) return; check_memmove(doff, soff, len, pmem_memmove_nodrain_wrapper, 0); for (int i = 0; i < ARRAY_SIZE(Flags); ++i) check_memmove(doff, soff, len, pmem_memmove, Flags[i]); } static void check_memcpy_variants(size_t doff, size_t soff, size_t len) { check_memcpy(doff, soff, len, pmem_memcpy_persist_wrapper, 0); if (!Heavy) return; check_memcpy(doff, soff, len, pmem_memcpy_nodrain_wrapper, 0); for (int i = 0; i < ARRAY_SIZE(Flags); ++i) check_memcpy(doff, soff, len, pmem_memcpy, Flags[i]); } static void check_memset_variants(size_t off, size_t len) { check_memset(off, len, pmem_memset_persist_wrapper, 0); if (!Heavy) return; check_memset(off, len, pmem_memset_nodrain_wrapper, 0); for (int i = 0; i < ARRAY_SIZE(Flags); ++i) check_memset(off, len, pmem_memset, Flags[i]); } int main(int argc, char *argv[]) { if (argc != 3) UT_FATAL("usage: %s type heavy=[0|1]", argv[0]); char type = argv[1][0]; Heavy = argv[2][0] == '1'; const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD"); const char *avx = os_getenv("PMEM_AVX"); const char *avx512f = os_getenv("PMEM_AVX512F"); START(argc, argv, "pmem_movnt_align %c %s %savx %savx512f", type, thr ? thr : "default", avx ? "" : "!", avx512f ? "" : "!"); size_t page_size = Ut_pagesize; size_t s; switch (type) { case 'C': /* memcpy */ /* mmap with guard pages */ Src = MMAP_ANON_ALIGNED(N_BYTES, 0); Dst = MMAP_ANON_ALIGNED(N_BYTES, 0); if (Src == NULL || Dst == NULL) UT_FATAL("!mmap"); Scratch = MALLOC(N_BYTES); /* check memcpy with 0 size */ check_memcpy_variants(0, 0, 0); /* check memcpy with unaligned size */ for (s = 0; s < CACHELINE_SIZE; s++) check_memcpy_variants(0, 0, N_BYTES - s); /* check memcpy with unaligned begin */ for (s = 0; s < CACHELINE_SIZE; s++) check_memcpy_variants(s, 0, N_BYTES - s); /* check memcpy with unaligned begin and end */ for (s = 0; s < CACHELINE_SIZE; s++) check_memcpy_variants(s, s, N_BYTES - 2 * s); MUNMAP_ANON_ALIGNED(Src, N_BYTES); MUNMAP_ANON_ALIGNED(Dst, N_BYTES); FREE(Scratch); break; case 'B': /* memmove backward */ /* mmap with guard pages */ Src = MMAP_ANON_ALIGNED(2 * N_BYTES - page_size, 0); Dst = Src + N_BYTES - page_size; if (Src == NULL) UT_FATAL("!mmap"); /* check memmove in backward direction with 0 size */ check_memmove_variants(0, 0, 0); /* check memmove in backward direction with unaligned size */ for (s = 0; s < CACHELINE_SIZE; s++) check_memmove_variants(0, 0, N_BYTES - s); /* check memmove in backward direction with unaligned begin */ for (s = 0; s < CACHELINE_SIZE; s++) check_memmove_variants(s, 0, N_BYTES - s); /* * check memmove in backward direction with unaligned begin * and end */ for (s = 0; s < CACHELINE_SIZE; s++) check_memmove_variants(s, s, N_BYTES - 2 * s); MUNMAP_ANON_ALIGNED(Src, 2 * N_BYTES - page_size); break; case 'F': /* memmove forward */ /* mmap with guard pages */ Dst = MMAP_ANON_ALIGNED(2 * N_BYTES - page_size, 0); Src = Dst + N_BYTES - page_size; if (Src == NULL) UT_FATAL("!mmap"); /* check memmove in forward direction with 0 size */ check_memmove_variants(0, 0, 0); /* check memmove in forward direction with unaligned size */ for (s = 0; s < CACHELINE_SIZE; s++) check_memmove_variants(0, 0, N_BYTES - s); /* check memmove in forward direction with unaligned begin */ for (s = 0; s < CACHELINE_SIZE; s++) check_memmove_variants(s, 0, N_BYTES - s); /* * check memmove in forward direction with unaligned begin * and end */ for (s = 0; s < CACHELINE_SIZE; s++) check_memmove_variants(s, s, N_BYTES - 2 * s); MUNMAP_ANON_ALIGNED(Dst, 2 * N_BYTES - page_size); break; case 'S': /* memset */ /* mmap with guard pages */ Dst = MMAP_ANON_ALIGNED(N_BYTES, 0); if (Dst == NULL) UT_FATAL("!mmap"); Scratch = MALLOC(N_BYTES); /* check memset with 0 size */ check_memset_variants(0, 0); /* check memset with unaligned size */ for (s = 0; s < CACHELINE_SIZE; s++) check_memset_variants(0, N_BYTES - s); /* check memset with unaligned begin */ for (s = 0; s < CACHELINE_SIZE; s++) check_memset_variants(s, N_BYTES - s); /* check memset with unaligned begin and end */ for (s = 0; s < CACHELINE_SIZE; s++) check_memset_variants(s, N_BYTES - 2 * s); MUNMAP_ANON_ALIGNED(Dst, N_BYTES); FREE(Scratch); break; default: UT_FATAL("!wrong type of test"); break; } DONE(NULL); }
6,229
23.92
78
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_memblock/obj_memblock.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * obj_memblock.c -- unit test for memblock interface */ #include "memblock.h" #include "memops.h" #include "obj.h" #include "unittest.h" #include "heap.h" #define NCHUNKS 10 static PMEMobjpool *pop; FUNC_MOCK(operation_add_typed_entry, int, struct operation_context *ctx, void *ptr, uint64_t value, ulog_operation_type type, enum operation_log_type en_type) FUNC_MOCK_RUN_DEFAULT { uint64_t *pval = ptr; switch (type) { case ULOG_OPERATION_SET: *pval = value; break; case ULOG_OPERATION_AND: *pval &= value; break; case ULOG_OPERATION_OR: *pval |= value; break; default: UT_ASSERT(0); } return 0; } FUNC_MOCK_END FUNC_MOCK(operation_add_entry, int, struct operation_context *ctx, void *ptr, uint64_t value, ulog_operation_type type) FUNC_MOCK_RUN_DEFAULT { /* just call the mock above - the entry type doesn't matter */ return operation_add_typed_entry(ctx, ptr, value, type, LOG_TRANSIENT); } FUNC_MOCK_END static void test_detect(void) { struct memory_block mhuge_used = { .chunk_id = 0, 0, 0, 0 }; struct memory_block mhuge_free = { .chunk_id = 1, 0, 0, 0 }; struct memory_block mrun = { .chunk_id = 2, 0, 0, 0 }; struct heap_layout *layout = pop->heap.layout; layout->zone0.chunk_headers[0].size_idx = 1; layout->zone0.chunk_headers[0].type = CHUNK_TYPE_USED; layout->zone0.chunk_headers[1].size_idx = 1; layout->zone0.chunk_headers[1].type = CHUNK_TYPE_FREE; layout->zone0.chunk_headers[2].size_idx = 1; layout->zone0.chunk_headers[2].type = CHUNK_TYPE_RUN; memblock_rebuild_state(&pop->heap, &mhuge_used); memblock_rebuild_state(&pop->heap, &mhuge_free); memblock_rebuild_state(&pop->heap, &mrun); UT_ASSERTeq(mhuge_used.type, MEMORY_BLOCK_HUGE); UT_ASSERTeq(mhuge_free.type, MEMORY_BLOCK_HUGE); UT_ASSERTeq(mrun.type, MEMORY_BLOCK_RUN); } static void test_block_size(void) { struct memory_block mhuge = { .chunk_id = 0, 0, 0, 0 }; struct memory_block mrun = { .chunk_id = 1, 0, 0, 0 }; struct palloc_heap *heap = &pop->heap; struct heap_layout *layout = heap->layout; layout->zone0.chunk_headers[0].size_idx = 1; layout->zone0.chunk_headers[0].type = CHUNK_TYPE_USED; layout->zone0.chunk_headers[1].size_idx = 1; layout->zone0.chunk_headers[1].type = CHUNK_TYPE_RUN; struct chunk_run *run = (struct chunk_run *) &layout->zone0.chunks[1]; run->hdr.block_size = 1234; memblock_rebuild_state(&pop->heap, &mhuge); memblock_rebuild_state(&pop->heap, &mrun); UT_ASSERTne(mhuge.m_ops, NULL); UT_ASSERTne(mrun.m_ops, NULL); UT_ASSERTeq(mhuge.m_ops->block_size(&mhuge), CHUNKSIZE); UT_ASSERTeq(mrun.m_ops->block_size(&mrun), 1234); } static void test_prep_hdr(void) { struct memory_block mhuge_used = { .chunk_id = 0, 0, .size_idx = 1, 0 }; struct memory_block mhuge_free = { .chunk_id = 1, 0, .size_idx = 1, 0 }; struct memory_block mrun_used = { .chunk_id = 2, 0, .size_idx = 4, .block_off = 0 }; struct memory_block mrun_free = { .chunk_id = 2, 0, .size_idx = 4, .block_off = 4 }; struct memory_block mrun_large_used = { .chunk_id = 2, 0, .size_idx = 64, .block_off = 64 }; struct memory_block mrun_large_free = { .chunk_id = 2, 0, .size_idx = 64, .block_off = 128 }; struct palloc_heap *heap = &pop->heap; struct heap_layout *layout = heap->layout; layout->zone0.chunk_headers[0].size_idx = 1; layout->zone0.chunk_headers[0].type = CHUNK_TYPE_USED; layout->zone0.chunk_headers[1].size_idx = 1; layout->zone0.chunk_headers[1].type = CHUNK_TYPE_FREE; layout->zone0.chunk_headers[2].size_idx = 1; layout->zone0.chunk_headers[2].type = CHUNK_TYPE_RUN; struct chunk_run *run = (struct chunk_run *)&layout->zone0.chunks[2]; run->hdr.block_size = 128; uint64_t *bitmap = (uint64_t *)run->content; bitmap[0] = 0b1111; bitmap[1] = ~0ULL; bitmap[2] = 0ULL; memblock_rebuild_state(heap, &mhuge_used); memblock_rebuild_state(heap, &mhuge_free); memblock_rebuild_state(heap, &mrun_used); memblock_rebuild_state(heap, &mrun_free); memblock_rebuild_state(heap, &mrun_large_used); memblock_rebuild_state(heap, &mrun_large_free); UT_ASSERTne(mhuge_used.m_ops, NULL); mhuge_used.m_ops->prep_hdr(&mhuge_used, MEMBLOCK_FREE, NULL); UT_ASSERTeq(layout->zone0.chunk_headers[0].type, CHUNK_TYPE_FREE); mhuge_free.m_ops->prep_hdr(&mhuge_free, MEMBLOCK_ALLOCATED, NULL); UT_ASSERTeq(layout->zone0.chunk_headers[1].type, CHUNK_TYPE_USED); mrun_used.m_ops->prep_hdr(&mrun_used, MEMBLOCK_FREE, NULL); UT_ASSERTeq(bitmap[0], 0ULL); mrun_free.m_ops->prep_hdr(&mrun_free, MEMBLOCK_ALLOCATED, NULL); UT_ASSERTeq(bitmap[0], 0b11110000); mrun_large_used.m_ops->prep_hdr(&mrun_large_used, MEMBLOCK_FREE, NULL); UT_ASSERTeq(bitmap[1], 0ULL); mrun_large_free.m_ops->prep_hdr(&mrun_large_free, MEMBLOCK_ALLOCATED, NULL); UT_ASSERTeq(bitmap[2], ~0ULL); } static int fake_persist(void *base, const void *addr, size_t size, unsigned flags) { return 0; } int main(int argc, char *argv[]) { START(argc, argv, "obj_memblock"); PMEMobjpool pool; pop = &pool; pop->heap.layout = ZALLOC(sizeof(struct heap_layout) + NCHUNKS * sizeof(struct chunk)); pop->heap.p_ops.persist = fake_persist; test_detect(); test_block_size(); test_prep_hdr(); FREE(pop->heap.layout); DONE(NULL); }
5,320
27.153439
77
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_memblock/mocks_windows.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * mocks_windows.h -- redefinitions of memops functions * * This file is Windows-specific. * * This file should be included (i.e. using Forced Include) by libpmemobj * files, when compiled for the purpose of obj_memblock test. * It would replace default implementation with mocked functions defined * in obj_memblock.c. * * These defines could be also passed as preprocessor definitions. */ #ifndef WRAP_REAL #define operation_add_typed_entry __wrap_operation_add_typed_entry #define operation_add_entry __wrap_operation_add_entry #endif
634
29.238095
73
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmreorder_flushes/pmreorder_flushes.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * pmreorder_flushes.c -- test for store reordering with flushes * in different barriers * * usage: pmreorder_flushes g|c file * * g - write data in a specific manner - some flushes * of the stores are made in different barriers, * c - check data consistency - stores should be applied only * after flush - no matter in which barrier the flush will happen * */ #include "unittest.h" #include "util.h" #include "valgrind_internal.h" #define STORE_SIZE 64 static FILE *fp; struct stores_fields { char A[STORE_SIZE]; char B[STORE_SIZE]; char C[STORE_SIZE]; char D[STORE_SIZE]; char E[STORE_SIZE]; }; /* * write_consistent -- (internal) write data in a specific order */ static void write_consistent(struct stores_fields *sf) { /* * STORE (A) * STORE (B) * STORE (C) * * FLUSH (A, B) (no flush C) * FENCE */ pmem_memset(&sf->A, -1, sizeof(sf->A), PMEM_F_MEM_NODRAIN); pmem_memset(&sf->B, 2, sizeof(sf->B), PMEM_F_MEM_NODRAIN); pmem_memset(&sf->C, 3, sizeof(sf->C), PMEM_F_MEM_NOFLUSH); pmem_drain(); /* * STORE (A) * STORE (D) * * FLUSH (D) (no flush A, still no flush C) * FENCE */ pmem_memset(sf->A, 1, sizeof(sf->A), PMEM_F_MEM_NOFLUSH); pmem_memset(sf->D, 4, sizeof(sf->D), PMEM_F_MEM_NODRAIN); pmem_drain(); /* * There are two transitive stores now: A (which does not change * it's value) and C (which is modified). * * STORE (D) * STORE (C) * * FLUSH (D) (still no flush A and C) * FENCE */ pmem_memset(sf->D, 5, sizeof(sf->D), PMEM_F_MEM_NODRAIN); pmem_memset(sf->C, 8, sizeof(sf->C), PMEM_F_MEM_NOFLUSH); pmem_drain(); /* * E is modified just to add additional step to the log. * Values of A and C should still be -1, 2. * * STORE (E) * FLUSH (E) * FENCE */ pmem_memset(sf->E, 6, sizeof(sf->E), PMEM_F_MEM_NODRAIN); pmem_drain(); /* * FLUSH (A, C) * FENCE */ pmem_flush(sf->A, sizeof(sf->A)); pmem_flush(sf->C, sizeof(sf->C)); pmem_drain(); } /* * check_consistency -- (internal) check if stores are made in proper manner */ static int check_consistency(struct stores_fields *sf) { fprintf(fp, "A=%d B=%d C=%d D=%d E=%d\n", sf->A[0], sf->B[0], sf->C[0], sf->D[0], sf->E[0]); return 0; } int main(int argc, char *argv[]) { START(argc, argv, "pmreorder_flushes"); util_init(); if ((argc < 4) || (strchr("gc", argv[1][0]) == NULL) || argv[1][1] != '\0') UT_FATAL("usage: %s g|c file log_file", argv[0]); int fd = OPEN(argv[2], O_RDWR); size_t size; /* mmap and register in valgrind pmemcheck */ void *map = pmem_map_file(argv[2], 0, 0, 0, &size, NULL); UT_ASSERTne(map, NULL); struct stores_fields *sf = map; char opt = argv[1][0]; /* clear the struct to get a consistent start state for writing */ if (strchr("g", opt)) pmem_memset_persist(sf, 0, sizeof(*sf)); switch (opt) { case 'g': write_consistent(sf); break; case 'c': fp = os_fopen(argv[3], "a"); if (fp == NULL) UT_FATAL("!fopen"); int ret; ret = check_consistency(sf); fclose(fp); return ret; default: UT_FATAL("Unrecognized option %c", opt); } CLOSE(fd); DONE(NULL); }
3,207
20.105263
76
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/libpmempool_api_win/libpmempool_test_win.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017, Intel Corporation */ /* * libpmempool_test_win -- test of libpmempool. * */ #include <stddef.h> #include <unistd.h> #include <stdlib.h> #include <stdio.h> #include "unittest.h" /* * Exact copy of the struct pmempool_check_args from libpmempool 1.0 provided to * test libpmempool against various pmempool_check_args structure versions. */ struct pmempool_check_args_1_0 { const wchar_t *path; const wchar_t *backup_path; enum pmempool_pool_type pool_type; int flags; }; /* * check_pool -- check given pool */ static void check_pool(struct pmempool_check_argsW *args, size_t args_size) { const char *status2str[] = { [PMEMPOOL_CHECK_RESULT_CONSISTENT] = "consistent", [PMEMPOOL_CHECK_RESULT_NOT_CONSISTENT] = "not consistent", [PMEMPOOL_CHECK_RESULT_REPAIRED] = "repaired", [PMEMPOOL_CHECK_RESULT_CANNOT_REPAIR] = "cannot repair", [PMEMPOOL_CHECK_RESULT_ERROR] = "fatal", }; PMEMpoolcheck *ppc = pmempool_check_initW(args, args_size); if (!ppc) { char buff[UT_MAX_ERR_MSG]; ut_strerror(errno, buff, UT_MAX_ERR_MSG); UT_OUT("Error: %s", buff); return; } struct pmempool_check_statusW *status = NULL; while ((status = pmempool_checkW(ppc)) != NULL) { char *msg = ut_toUTF8(status->str.msg); switch (status->type) { case PMEMPOOL_CHECK_MSG_TYPE_ERROR: UT_OUT("%s", msg); break; case PMEMPOOL_CHECK_MSG_TYPE_INFO: UT_OUT("%s", msg); break; case PMEMPOOL_CHECK_MSG_TYPE_QUESTION: UT_OUT("%s", msg); status->str.answer = L"yes"; break; default: pmempool_check_end(ppc); free(msg); exit(EXIT_FAILURE); } free(msg); } enum pmempool_check_result ret = pmempool_check_end(ppc); UT_OUT("status = %s", status2str[ret]); } /* * print_usage -- print usage of program */ static void print_usage(wchar_t *name) { UT_OUT("Usage: %S [-t <pool_type>] [-r <repair>] [-d <dry_run>] " "[-y <always_yes>] [-f <flags>] [-a <advanced>] " "[-b <backup_path>] <pool_path>", name); } /* * set_flag -- parse the value and set the flag according to a obtained value */ static void set_flag(const wchar_t *value, int *flags, int flag) { if (_wtoi(value) > 0) *flags |= flag; else *flags &= ~flag; } int wmain(int argc, wchar_t *argv[]) { STARTW(argc, argv, "libpmempool_test_win"); struct pmempool_check_args_1_0 args = { .path = NULL, .backup_path = NULL, .pool_type = PMEMPOOL_POOL_TYPE_LOG, .flags = PMEMPOOL_CHECK_FORMAT_STR | PMEMPOOL_CHECK_REPAIR | PMEMPOOL_CHECK_VERBOSE }; size_t args_size = sizeof(struct pmempool_check_args_1_0); for (int i = 1; i < argc - 1; i += 2) { wchar_t *optarg = argv[i + 1]; if (wcscmp(L"-t", argv[i]) == 0) { if (wcscmp(optarg, L"blk") == 0) { args.pool_type = PMEMPOOL_POOL_TYPE_BLK; } else if (wcscmp(optarg, L"log") == 0) { args.pool_type = PMEMPOOL_POOL_TYPE_LOG; } else if (wcscmp(optarg, L"obj") == 0) { args.pool_type = PMEMPOOL_POOL_TYPE_OBJ; } else if (wcscmp(optarg, L"btt") == 0) { args.pool_type = PMEMPOOL_POOL_TYPE_BTT; } else { args.pool_type = (uint32_t)wcstoul(optarg, NULL, 0); } } else if (wcscmp(L"-r", argv[i]) == 0) { set_flag(optarg, &args.flags, PMEMPOOL_CHECK_REPAIR); } else if (wcscmp(L"-d", argv[i]) == 0) { set_flag(optarg, &args.flags, PMEMPOOL_CHECK_DRY_RUN); } else if (wcscmp(L"-a", argv[i]) == 0) { set_flag(optarg, &args.flags, PMEMPOOL_CHECK_ADVANCED); } else if (wcscmp(L"-y", argv[i]) == 0) { set_flag(optarg, &args.flags, PMEMPOOL_CHECK_ALWAYS_YES); } else if (wcscmp(L"-s", argv[i]) == 0) { args_size = wcstoul(optarg, NULL, 0); } else if (wcscmp(L"-b", argv[i]) == 0) { args.backup_path = optarg; } else { print_usage(argv[0]); UT_FATAL("unknown option: %c", argv[i][1]); } } args.path = argv[argc - 1]; check_pool((struct pmempool_check_argsW *)&args, args_size); DONEW(NULL); }
3,912
24.743421
80
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem_is_pmem_windows/pmem_is_pmem_windows.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * Copyright (c) 2015-2017, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * pmem_is_pmem_windows.c -- Windows specific unit test for is_pmem_detect() * * usage: pmem_is_pmem_windows file [env] */ #include "unittest.h" #include "pmem.h" #include "queue.h" #include "win_mmap.h" #include "util.h" #define NTHREAD 16 static void *Addr; static size_t Size; static int pmem_is_pmem_force = 0; enum test_mmap_scenarios { TEST_MMAP_SCENARIO_UNKNOWN, TEST_MMAP_SCENARIO_BEGIN_HOLE, TEST_MMAP_SCENARIO_END_HOLE, TEST_MMAP_SCENARIO_MIDDLE_HOLE, TEST_MMAP_SCENARIO_NO_HOLE }; enum test_mmap_scenarios get_mmap_scenarios(char *name) { if (stricmp(name, "nothing") == 0) return TEST_MMAP_SCENARIO_NO_HOLE; if (stricmp(name, "begin") == 0) return TEST_MMAP_SCENARIO_BEGIN_HOLE; if (stricmp(name, "end") == 0) return TEST_MMAP_SCENARIO_END_HOLE; if (stricmp(name, "middle") == 0) return TEST_MMAP_SCENARIO_MIDDLE_HOLE; return TEST_MMAP_SCENARIO_UNKNOWN; } /* * mmap_file_mapping_comparer -- (internal) compares the two file mapping * trackers */ static LONG_PTR mmap_file_mapping_comparer(PFILE_MAPPING_TRACKER a, PFILE_MAPPING_TRACKER b) { return ((LONG_PTR)a->BaseAddress - (LONG_PTR)b->BaseAddress); } /* * worker -- the work each thread performs */ static void * worker(void *arg) { int *ret = (int *)arg; /* * We honor the force just to let the scenarios that require pmem fs * work in the environment that forces pmem. * * NOTE: We can't use pmem_is_pmem instead of checking for the ENV * variable explicitly, because we want to call is_pmem_detect that is * defined in this test so that it will use the FileMappingQHead * that's defined here. Because we are crafting the Q in the test. */ if (pmem_is_pmem_force) *ret = 1; else *ret = is_pmem_detect(Addr, Size); return NULL; } extern SRWLOCK FileMappingQLock; extern struct FMLHead FileMappingQHead; int main(int argc, char *argv[]) { HANDLE file_map; SIZE_T chunk_length; enum test_mmap_scenarios scenario; int still_holey = 1; int already_holey = 0; START(argc, argv, "pmem_is_pmem_windows"); if (argc != 3) UT_FATAL("usage: %s file {begin|end|middle|nothing}", argv[0]); util_init(); /* to initialize Mmap_align */ char *str_pmem_is_pmem_force = os_getenv("PMEM_IS_PMEM_FORCE"); if (str_pmem_is_pmem_force && atoi(str_pmem_is_pmem_force) == 1) pmem_is_pmem_force = 1; scenario = get_mmap_scenarios(argv[2]); UT_ASSERT(scenario != TEST_MMAP_SCENARIO_UNKNOWN); int fd = OPEN(argv[1], O_RDWR); os_stat_t stbuf; FSTAT(fd, &stbuf); Size = stbuf.st_size; chunk_length = Mmap_align; /* * We don't support too small a file size. */ UT_ASSERT(Size / 8 > chunk_length); file_map = CreateFileMapping((HANDLE)_get_osfhandle(fd), NULL, PAGE_READONLY, 0, 0, NULL); UT_ASSERT(file_map != NULL); Addr = MapViewOfFile(file_map, FILE_MAP_READ, 0, 0, 0); /* * let's setup FileMappingQHead such that, it appears to have lot of * DAX mapping created through our mmap. Here are our cases based * on the input: * - entire region in mapped through our mmap * - there is a region at the beginning that's not mapped through our * mmap * - there is a region at the end that's not mapped through our mmap * - there is a region in the middle that mapped through our mmap */ for (size_t offset = 0; offset < Size; offset += chunk_length) { void *base_address = (void *)((char *)Addr + offset); switch (scenario) { case TEST_MMAP_SCENARIO_BEGIN_HOLE: if (still_holey && ((offset == 0) || ((rand() % 2) == 0)) && (offset < (Size / 2))) continue; else still_holey = 0; break; case TEST_MMAP_SCENARIO_END_HOLE: if ((offset > (Size / 2)) && (already_holey || ((rand() % 2) == 0) || (offset >= (Size - chunk_length)))) { already_holey = 1; continue; } else UT_ASSERT(!already_holey); break; case TEST_MMAP_SCENARIO_MIDDLE_HOLE: if ((((offset > (Size / 8)) && ((rand() % 2) == 0)) || (offset > (Size / 8) * 6)) && (offset < (Size / 8) * 7)) continue; break; } PFILE_MAPPING_TRACKER mt = MALLOC(sizeof(struct FILE_MAPPING_TRACKER)); mt->Flags = FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED; mt->FileHandle = (HANDLE)_get_osfhandle(fd); mt->FileMappingHandle = file_map; mt->BaseAddress = base_address; mt->EndAddress = (void *)((char *)base_address + chunk_length); mt->Access = FILE_MAP_READ; mt->Offset = offset; AcquireSRWLockExclusive(&FileMappingQLock); PMDK_SORTEDQ_INSERT(&FileMappingQHead, mt, ListEntry, FILE_MAPPING_TRACKER, mmap_file_mapping_comparer); ReleaseSRWLockExclusive(&FileMappingQLock); } CloseHandle(file_map); CLOSE(fd); os_thread_t threads[NTHREAD]; int ret[NTHREAD]; /* kick off NTHREAD threads */ for (int i = 0; i < NTHREAD; i++) THREAD_CREATE(&threads[i], NULL, worker, &ret[i]); /* wait for all the threads to complete */ for (int i = 0; i < NTHREAD; i++) THREAD_JOIN(&threads[i], NULL); /* verify that all the threads return the same value */ for (int i = 1; i < NTHREAD; i++) UT_ASSERTeq(ret[0], ret[i]); UT_OUT("%d", ret[0]); DONE(NULL); } /* * Since libpmem is linked statically, we need to invoke its ctor/dtor. */ MSVC_CONSTR(libpmem_init) MSVC_DESTR(libpmem_fini)
6,946
27.239837
76
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_fragmentation2/obj_fragmentation2.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ /* * obj_fragmentation.c -- measures average heap external fragmentation * * This test is based on the workloads proposed in: * Log-structured Memory for DRAM-based Storage * by Stephen M. Rumble, Ankita Kejriwal, and John Ousterhout * * https://www.usenix.org/system/files/conference/fast14/fast14-paper_rumble.pdf */ #include <stdlib.h> #include <math.h> #include "rand.h" #include "unittest.h" #define LAYOUT_NAME "obj_fragmentation" #define MEGABYTE (1ULL << 20) #define GIGABYTE (1ULL << 30) #define RRAND(max, min)\ ((min) == (max) ? (min) : (rnd64() % ((max) - (min)) + (min))) static PMEMoid *objects; static size_t nobjects; static size_t allocated_current; #define MAX_OBJECTS (200ULL * 1000000) #define ALLOC_TOTAL (5000ULL * MEGABYTE) #define ALLOC_CURR (1000 * MEGABYTE) #define FREES_P 200 #define DEFAULT_FILE_SIZE (3 * GIGABYTE) static void shuffle_objects(size_t start, size_t end) { PMEMoid tmp; size_t dest; for (size_t n = start; n < end; ++n) { dest = RRAND(nobjects - 1, 0); tmp = objects[n]; objects[n] = objects[dest]; objects[dest] = tmp; } } static PMEMoid remove_last() { UT_ASSERT(nobjects > 0); PMEMoid obj = objects[--nobjects]; return obj; } static void delete_objects(PMEMobjpool *pop, float pct) { size_t nfree = (size_t)(nobjects * pct); PMEMoid oid = pmemobj_root(pop, 1); shuffle_objects(0, nobjects); while (nfree--) { oid = remove_last(); allocated_current -= pmemobj_alloc_usable_size(oid); pmemobj_free(&oid); } } /* * object_next_size -- generates random sizes in range with * exponential distribution */ static size_t object_next_size(size_t max, size_t min) { float fmax = (float)max; float fmin = (float)min; float n = (float)rnd64() / ((float)UINT64_MAX / 1.0f); return (size_t)(fmin + (fmax - fmin) * (float)exp(n * - 4.0)); } /* * allocate_exponential -- allocates objects from a large range of sizes. * * This is designed to stress the recycler subsystem that will have to * constantly look for freed/empty runs and reuse them. * * For small pools (single digit gigabytes), this test will show large * fragmentation because it can use a large number of runs - which is fine. */ static void allocate_exponential(PMEMobjpool *pop, size_t size_min, size_t size_max) { size_t allocated_total = 0; PMEMoid oid; while (allocated_total < ALLOC_TOTAL) { size_t s = object_next_size(size_max, size_min); int ret = pmemobj_alloc(pop, &oid, s, 0, NULL, NULL); if (ret != 0) { /* delete a random percentage of allocated objects */ float delete_pct = (float)RRAND(90, 10) / 100.0f; delete_objects(pop, delete_pct); continue; } s = pmemobj_alloc_usable_size(oid); objects[nobjects++] = oid; UT_ASSERT(nobjects < MAX_OBJECTS); allocated_total += s; allocated_current += s; } } static void allocate_objects(PMEMobjpool *pop, size_t size_min, size_t size_max) { size_t allocated_total = 0; size_t sstart = 0; PMEMoid oid; while (allocated_total < ALLOC_TOTAL) { size_t s = RRAND(size_max, size_min); pmemobj_alloc(pop, &oid, s, 0, NULL, NULL); UT_ASSERTeq(OID_IS_NULL(oid), 0); s = pmemobj_alloc_usable_size(oid); objects[nobjects++] = oid; UT_ASSERT(nobjects < MAX_OBJECTS); allocated_total += s; allocated_current += s; if (allocated_current > ALLOC_CURR) { shuffle_objects(sstart, nobjects); for (int i = 0; i < FREES_P; ++i) { oid = remove_last(); allocated_current -= pmemobj_alloc_usable_size(oid); pmemobj_free(&oid); } sstart = nobjects; } } } typedef void workload(PMEMobjpool *pop); static void w0(PMEMobjpool *pop) { allocate_objects(pop, 100, 100); } static void w1(PMEMobjpool *pop) { allocate_objects(pop, 100, 100); allocate_objects(pop, 130, 130); } static void w2(PMEMobjpool *pop) { allocate_objects(pop, 100, 100); delete_objects(pop, 0.9F); allocate_objects(pop, 130, 130); } static void w3(PMEMobjpool *pop) { allocate_objects(pop, 100, 150); allocate_objects(pop, 200, 250); } static void w4(PMEMobjpool *pop) { allocate_objects(pop, 100, 150); delete_objects(pop, 0.9F); allocate_objects(pop, 200, 250); } static void w5(PMEMobjpool *pop) { allocate_objects(pop, 100, 200); delete_objects(pop, 0.5); allocate_objects(pop, 1000, 2000); } static void w6(PMEMobjpool *pop) { allocate_objects(pop, 1000, 2000); delete_objects(pop, 0.9F); allocate_objects(pop, 1500, 2500); } static void w7(PMEMobjpool *pop) { allocate_objects(pop, 50, 150); delete_objects(pop, 0.9F); allocate_objects(pop, 5000, 15000); } static void w8(PMEMobjpool *pop) { allocate_objects(pop, 2 * MEGABYTE, 2 * MEGABYTE); } static void w9(PMEMobjpool *pop) { allocate_exponential(pop, 1, 5 * MEGABYTE); } static workload *workloads[] = { w0, w1, w2, w3, w4, w5, w6, w7, w8, w9 }; static float workloads_target[] = { 0.01f, 0.01f, 0.01f, 0.9f, 0.8f, 0.7f, 0.3f, 0.8f, 0.73f, 3.0f }; static float workloads_defrag_target[] = { 0.01f, 0.01f, 0.01f, 0.01f, 0.01f, 0.05f, 0.09f, 0.13f, 0.01f, 0.16f }; /* * Last two workloads operates mostly on huge chunks, so run * stats are useless. */ static float workloads_stat_target[] = { 0.01f, 1.1f, 1.1f, 0.86f, 0.76f, 1.01f, 0.23f, 1.24f, 2100.f, 2100.f }; static float workloads_defrag_stat_target[] = { 0.01f, 0.01f, 0.01f, 0.02f, 0.02f, 0.04f, 0.08f, 0.12f, 2100.f, 2100.f }; int main(int argc, char *argv[]) { START(argc, argv, "obj_fragmentation2"); if (argc < 3) UT_FATAL("usage: %s filename workload [seed] [defrag]", argv[0]); const char *path = argv[1]; PMEMobjpool *pop = pmemobj_create(path, LAYOUT_NAME, DEFAULT_FILE_SIZE, S_IWUSR | S_IRUSR); if (pop == NULL) UT_FATAL("!pmemobj_create: %s", path); int w = atoi(argv[2]); if (argc > 3) randomize((unsigned)atoi(argv[3])); else randomize(0); int defrag = argc > 4 ? atoi(argv[4]) != 0 : 0; objects = ZALLOC(sizeof(PMEMoid) * MAX_OBJECTS); UT_ASSERTne(objects, NULL); workloads[w](pop); /* this is to trigger global recycling */ pmemobj_defrag(pop, NULL, 0, NULL); size_t active = 0; size_t allocated = 0; pmemobj_ctl_get(pop, "stats.heap.run_active", &active); pmemobj_ctl_get(pop, "stats.heap.run_allocated", &allocated); float stat_frag = 0; if (active != 0 && allocated != 0) { stat_frag = ((float)active / allocated) - 1.f; UT_ASSERT(stat_frag <= workloads_stat_target[w]); } if (defrag) { PMEMoid **objectsf = ZALLOC(sizeof(PMEMoid) * nobjects); for (size_t i = 0; i < nobjects; ++i) objectsf[i] = &objects[i]; pmemobj_defrag(pop, objectsf, nobjects, NULL); FREE(objectsf); active = 0; allocated = 0; /* this is to trigger global recycling */ pmemobj_defrag(pop, NULL, 0, NULL); pmemobj_ctl_get(pop, "stats.heap.run_active", &active); pmemobj_ctl_get(pop, "stats.heap.run_allocated", &allocated); if (active != 0 && allocated != 0) { stat_frag = ((float)active / allocated) - 1.f; UT_ASSERT(stat_frag <= workloads_defrag_stat_target[w]); } } PMEMoid oid; size_t remaining = 0; size_t chunk = (100); /* calc at chunk level */ while (pmemobj_alloc(pop, &oid, chunk, 0, NULL, NULL) == 0) remaining += pmemobj_alloc_usable_size(oid) + 16; size_t allocated_sum = 0; oid = pmemobj_root(pop, 1); for (size_t n = 0; n < nobjects; ++n) { if (OID_IS_NULL(objects[n])) continue; oid = objects[n]; allocated_sum += pmemobj_alloc_usable_size(oid) + 16; } size_t used = DEFAULT_FILE_SIZE - remaining; float frag = ((float)used / allocated_sum) - 1.f; UT_OUT("FRAG: %f\n", frag); UT_ASSERT(frag <= (defrag ? workloads_defrag_target[w] : workloads_target[w])); pmemobj_close(pop); FREE(objects); DONE(NULL); }
7,747
22.337349
80
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/getopt/getopt.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * getopt.c -- test for windows getopt() implementation */ #include <stdio.h> #include <stdlib.h> #include <getopt.h> #include "unittest.h" /* * long_options -- command line arguments */ static const struct option long_options[] = { { "arg_a", no_argument, NULL, 'a' }, { "arg_b", no_argument, NULL, 'b' }, { "arg_c", no_argument, NULL, 'c' }, { "arg_d", no_argument, NULL, 'd' }, { "arg_e", no_argument, NULL, 'e' }, { "arg_f", no_argument, NULL, 'f' }, { "arg_g", no_argument, NULL, 'g' }, { "arg_h", no_argument, NULL, 'h' }, { "arg_A", required_argument, NULL, 'A' }, { "arg_B", required_argument, NULL, 'B' }, { "arg_C", required_argument, NULL, 'C' }, { "arg_D", required_argument, NULL, 'D' }, { "arg_E", required_argument, NULL, 'E' }, { "arg_F", required_argument, NULL, 'F' }, { "arg_G", required_argument, NULL, 'G' }, { "arg_H", required_argument, NULL, 'H' }, { "arg_1", optional_argument, NULL, '1' }, { "arg_2", optional_argument, NULL, '2' }, { "arg_3", optional_argument, NULL, '3' }, { "arg_4", optional_argument, NULL, '4' }, { "arg_5", optional_argument, NULL, '5' }, { "arg_6", optional_argument, NULL, '6' }, { "arg_7", optional_argument, NULL, '7' }, { "arg_8", optional_argument, NULL, '8' }, { NULL, 0, NULL, 0 }, }; int main(int argc, char *argv[]) { int opt; int option_index; START(argc, argv, "getopt"); while ((opt = getopt_long(argc, argv, "abcdefghA:B:C:D:E:F:G::H1::2::3::4::5::6::7::8::", long_options, &option_index)) != -1) { switch (opt) { case '?': UT_OUT("unknown argument"); break; case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': UT_OUT("arg_%c", opt); break; case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': UT_OUT("arg_%c=%s", opt, optarg == NULL ? "null": optarg); break; } } while (optind < argc) { UT_OUT("%s", argv[optind++]); } DONE(NULL); }
2,159
21.736842
55
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/util_sds/util_sds.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ /* * util_sds.c -- unit test for shutdown state functions */ #include <stdlib.h> #include "unittest.h" #include "ut_pmem2.h" #include "shutdown_state.h" #include "set.h" #define PMEM_LEN 4096 static char **uids; static size_t uids_size; static size_t uid_it; static uint64_t *uscs; static size_t uscs_size; static size_t usc_it; static pmem2_persist_fn persist; #define FAIL(X, Y) \ if ((X) == (Y)) { \ goto out; \ } int main(int argc, char *argv[]) { START(argc, argv, "util_sds"); if (argc < 2) UT_FATAL("usage: %s init fail (file uuid usc)...", argv[0]); unsigned files = (unsigned)(argc - 2) / 3; char **pmemaddr = MALLOC(files * sizeof(char *)); int *fds = MALLOC(files * sizeof(fds[0])); struct pmem2_map **maps = MALLOC(files * sizeof(maps[0])); uids = MALLOC(files * sizeof(uids[0])); uscs = MALLOC(files * sizeof(uscs[0])); uids_size = files; uscs_size = files; int init = atoi(argv[1]); int fail_on = atoi(argv[2]); char **args = argv + 3; struct pmem2_config *cfg; PMEM2_CONFIG_NEW(&cfg); pmem2_config_set_required_store_granularity(cfg, PMEM2_GRANULARITY_PAGE); for (unsigned i = 0; i < files; i++) { fds[i] = OPEN(args[i * 3], O_CREAT | O_RDWR, 0666); POSIX_FALLOCATE(fds[i], 0, PMEM_LEN); struct pmem2_source *src; PMEM2_SOURCE_FROM_FD(&src, fds[i]); if (pmem2_map(cfg, src, &maps[i])) { UT_FATAL("pmem2_map: %s", pmem2_errormsg()); } pmemaddr[0] = pmem2_map_get_address(maps[i]); uids[i] = args[i * 3 + 1]; uscs[i] = strtoull(args[i * 3 + 2], NULL, 0); PMEM2_SOURCE_DELETE(&src); } persist = pmem2_get_persist_fn(maps[0]); FAIL(fail_on, 1); struct pool_replica *rep = MALLOC( sizeof(*rep) + sizeof(struct pool_set_part)); memset(rep, 0, sizeof(*rep) + sizeof(struct pool_set_part)); struct shutdown_state *pool_sds = (struct shutdown_state *)pmemaddr[0]; if (init) { /* initialize pool shutdown state */ shutdown_state_init(pool_sds, rep); FAIL(fail_on, 2); for (unsigned i = 0; i < files; i++) { if (shutdown_state_add_part(pool_sds, fds[i], rep)) UT_FATAL("shutdown_state_add_part"); FAIL(fail_on, 3); } } else { /* verify a shutdown state saved in the pool */ struct shutdown_state current_sds; shutdown_state_init(&current_sds, NULL); FAIL(fail_on, 2); for (unsigned i = 0; i < files; i++) { if (shutdown_state_add_part(&current_sds, fds[i], NULL)) UT_FATAL("shutdown_state_add_part"); FAIL(fail_on, 3); } if (shutdown_state_check(&current_sds, pool_sds, rep)) { UT_FATAL( "An ADR failure is detected, the pool might be corrupted"); } } FAIL(fail_on, 4); shutdown_state_set_dirty(pool_sds, rep); /* pool is open */ FAIL(fail_on, 5); /* close pool */ shutdown_state_clear_dirty(pool_sds, rep); FAIL(fail_on, 6); out: for (unsigned i = 0; i < files; i++) { pmem2_unmap(&maps[i]); CLOSE(fds[i]); } PMEM2_CONFIG_DELETE(&cfg); FREE(pmemaddr); FREE(uids); FREE(uscs); FREE(fds); FREE(maps); DONE(NULL); } FUNC_MOCK(pmem2_source_device_id, int, const struct pmem2_source *src, char *uid, size_t *len) FUNC_MOCK_RUN_DEFAULT { if (uid_it < uids_size) { if (uid != NULL) { strcpy(uid, uids[uid_it]); uid_it++; } else { *len = strlen(uids[uid_it]) + 1; } } else { return -1; } return 0; } FUNC_MOCK_END FUNC_MOCK(pmem2_source_device_usc, int, const struct pmem2_source *src, uint64_t *usc) FUNC_MOCK_RUN_DEFAULT { if (usc_it < uscs_size) { *usc = uscs[usc_it]; usc_it++; } else { return -1; } return 0; } FUNC_MOCK_END int os_part_deep_common(struct pool_replica *rep, unsigned partidx, void *addr, size_t len, int flush); /* * os_part_deep_common -- XXX temporally workaround until we will have pmem2 * integrated with common */ int os_part_deep_common(struct pool_replica *rep, unsigned partidx, void *addr, size_t len, int flush) { /* * this is test - we don't need to deep persist anything - * just call regular persist to make valgrind happy */ persist(addr, len); return 0; } #ifdef _MSC_VER MSVC_CONSTR(libpmem2_init) MSVC_DESTR(libpmem2_fini) #endif
4,175
21.572973
79
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_recreate/obj_recreate.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * obj_recreate.c -- recreate pool on dirty file and check consistency */ #include "unittest.h" POBJ_LAYOUT_BEGIN(recreate); POBJ_LAYOUT_ROOT(recreate, struct root); POBJ_LAYOUT_TOID(recreate, struct foo); POBJ_LAYOUT_END(recreate); struct foo { int bar; }; struct root { TOID(struct foo) foo; }; #define LAYOUT_NAME "obj_recreate" #define N PMEMOBJ_MIN_POOL int main(int argc, char *argv[]) { START(argc, argv, "obj_recreate"); /* root doesn't count */ UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(recreate) != 1); if (argc < 2) UT_FATAL("usage: %s file-name [trunc]", argv[0]); const char *path = argv[1]; PMEMobjpool *pop = NULL; /* create pool 2*N */ pop = pmemobj_create(path, LAYOUT_NAME, 2 * N, S_IWUSR | S_IRUSR); if (pop == NULL) UT_FATAL("!pmemobj_create: %s", path); /* allocate 1.5*N */ TOID(struct root) root = (TOID(struct root))pmemobj_root(pop, (size_t)(1.5 * N)); /* use root object for something */ POBJ_NEW(pop, &D_RW(root)->foo, struct foo, NULL, NULL); pmemobj_close(pop); int fd = OPEN(path, O_RDWR); if (argc >= 3 && strcmp(argv[2], "trunc") == 0) { UT_OUT("truncating"); /* shrink file to N */ FTRUNCATE(fd, N); } size_t zero_len = Ut_pagesize; /* zero first page */ void *p = MMAP(NULL, zero_len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); memset(p, 0, zero_len); MUNMAP(p, zero_len); CLOSE(fd); /* create pool on existing file */ pop = pmemobj_create(path, LAYOUT_NAME, 0, S_IWUSR | S_IRUSR); if (pop == NULL) UT_FATAL("!pmemobj_create: %s", path); /* try to allocate 0.7*N */ root = (TOID(struct root))pmemobj_root(pop, (size_t)(0.5 * N)); if (TOID_IS_NULL(root)) UT_FATAL("couldn't allocate root object"); /* validate root object is empty */ if (!TOID_IS_NULL(D_RW(root)->foo)) UT_FATAL("root object is already filled after pmemobj_create!"); pmemobj_close(pop); DONE(NULL); }
1,968
21.123596
73
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/util_ctl/util_ctl.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2020, Intel Corporation */ /* * util_ctl.c -- tests for the control module */ #include "unittest.h" #include "ctl.h" #include "out.h" #include "pmemcommon.h" #include "fault_injection.h" #define LOG_PREFIX "ut" #define LOG_LEVEL_VAR "TEST_LOG_LEVEL" #define LOG_FILE_VAR "TEST_LOG_FILE" #define MAJOR_VERSION 1 #define MINOR_VERSION 0 struct pool { struct ctl *ctl; }; static char *testconfig_path; static int test_config_written; static int CTL_READ_HANDLER(test_rw)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { UT_ASSERTeq(source, CTL_QUERY_PROGRAMMATIC); int *arg_rw = arg; *arg_rw = 0; return 0; } static int CTL_WRITE_HANDLER(test_rw)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { int *arg_rw = arg; *arg_rw = 1; test_config_written++; return 0; } static struct ctl_argument CTL_ARG(test_rw) = CTL_ARG_INT; static int CTL_WRITE_HANDLER(test_wo)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { int *arg_wo = arg; *arg_wo = 1; test_config_written++; return 0; } static struct ctl_argument CTL_ARG(test_wo) = CTL_ARG_INT; #define TEST_CONFIG_VALUE "abcd" static int CTL_WRITE_HANDLER(test_config)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { UT_ASSERTeq(source, CTL_QUERY_CONFIG_INPUT); char *config_value = arg; UT_ASSERTeq(strcmp(config_value, TEST_CONFIG_VALUE), 0); test_config_written++; return 0; } static struct ctl_argument CTL_ARG(test_config) = CTL_ARG_STRING(8); struct complex_arg { int a; char b[5]; long long c; int d; }; #define COMPLEX_ARG_TEST_A 12345 #define COMPLEX_ARG_TEST_B "abcd" #define COMPLEX_ARG_TEST_C 3147483647 #define COMPLEX_ARG_TEST_D 1 static int CTL_WRITE_HANDLER(test_config_complex_arg)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { UT_ASSERTeq(source, CTL_QUERY_CONFIG_INPUT); struct complex_arg *c = arg; UT_ASSERTeq(c->a, COMPLEX_ARG_TEST_A); UT_ASSERT(strcmp(COMPLEX_ARG_TEST_B, c->b) == 0); UT_ASSERTeq(c->c, COMPLEX_ARG_TEST_C); UT_ASSERTeq(c->d, COMPLEX_ARG_TEST_D); test_config_written++; return 0; } static struct ctl_argument CTL_ARG(test_config_complex_arg) = { .dest_size = sizeof(struct complex_arg), .parsers = { CTL_ARG_PARSER_STRUCT(struct complex_arg, a, ctl_arg_integer), CTL_ARG_PARSER_STRUCT(struct complex_arg, b, ctl_arg_string), CTL_ARG_PARSER_STRUCT(struct complex_arg, c, ctl_arg_integer), CTL_ARG_PARSER_STRUCT(struct complex_arg, d, ctl_arg_boolean), CTL_ARG_PARSER_END } }; static int CTL_READ_HANDLER(test_ro)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { UT_ASSERTeq(source, CTL_QUERY_PROGRAMMATIC); int *arg_ro = arg; *arg_ro = 0; return 0; } static int CTL_READ_HANDLER(index_value)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { UT_ASSERTeq(source, CTL_QUERY_PROGRAMMATIC); long *index_value = arg; struct ctl_index *idx = PMDK_SLIST_FIRST(indexes); UT_ASSERT(strcmp(idx->name, "test_index") == 0); *index_value = idx->value; return 0; } static int CTL_RUNNABLE_HANDLER(test_runnable)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { UT_ASSERTeq(source, CTL_QUERY_PROGRAMMATIC); int *arg_runnable = arg; *arg_runnable = 0; return 0; } static const struct ctl_node CTL_NODE(test_index)[] = { CTL_LEAF_RO(index_value), CTL_NODE_END }; static const struct ctl_node CTL_NODE(debug)[] = { CTL_LEAF_RO(test_ro), CTL_LEAF_WO(test_wo), CTL_LEAF_RUNNABLE(test_runnable), CTL_LEAF_RW(test_rw), CTL_INDEXED(test_index), CTL_LEAF_WO(test_config), CTL_LEAF_WO(test_config_complex_arg), CTL_NODE_END }; static int CTL_WRITE_HANDLER(gtest_config)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { UT_ASSERTeq(source, CTL_QUERY_CONFIG_INPUT); char *config_value = arg; UT_ASSERTeq(strcmp(config_value, TEST_CONFIG_VALUE), 0); test_config_written = 1; return 0; } static struct ctl_argument CTL_ARG(gtest_config) = CTL_ARG_STRING(8); static int CTL_READ_HANDLER(gtest_ro)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { UT_ASSERTeq(source, CTL_QUERY_PROGRAMMATIC); int *arg_ro = arg; *arg_ro = 0; return 0; } static const struct ctl_node CTL_NODE(global_debug)[] = { CTL_LEAF_RO(gtest_ro), CTL_LEAF_WO(gtest_config), CTL_NODE_END }; static int util_ctl_get(struct pool *pop, const char *name, void *arg) { LOG(3, "pop %p name %s arg %p", pop, name, arg); return ctl_query(pop ? pop->ctl : NULL, pop, CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_READ, arg); } static int util_ctl_set(struct pool *pop, const char *name, void *arg) { LOG(3, "pop %p name %s arg %p", pop, name, arg); return ctl_query(pop ? pop->ctl : NULL, pop, CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_WRITE, arg); } static int util_ctl_exec(struct pool *pop, const char *name, void *arg) { LOG(3, "pop %p name %s arg %p", pop, name, arg); return ctl_query(pop ? pop->ctl : NULL, pop, CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_RUNNABLE, arg); } static void test_ctl_parser(struct pool *pop) { errno = 0; int ret; ret = util_ctl_get(pop, NULL, NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_get(pop, "a.b.c.d", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_get(pop, "", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_get(pop, "debug.", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_get(pop, ".", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_get(pop, "..", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_get(pop, "1.2.3.4", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_get(pop, "debug.1.", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_get(pop, "debug.1.invalid", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); /* test methods set read to 0 and write to 1 if successful */ int arg_read = 1; int arg_write = 0; errno = 0; /* correct name, wrong args */ ret = util_ctl_get(pop, "debug.test_rw", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_set(pop, "debug.test_rw", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_get(pop, "debug.test_wo", &arg_read); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_get(pop, "debug.test_wo", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_set(pop, "debug.test_ro", &arg_write); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_set(pop, "debug.test_ro", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_get(pop, "debug.test_rw", &arg_read); UT_ASSERTeq(ret, 0); UT_ASSERTeq(arg_read, 0); UT_ASSERTeq(arg_write, 0); UT_ASSERTeq(errno, 0); ret = util_ctl_set(pop, "debug.test_rw", &arg_write); UT_ASSERTeq(ret, 0); UT_ASSERTeq(arg_read, 0); UT_ASSERTeq(arg_write, 1); arg_read = 1; arg_write = 0; ret = util_ctl_get(pop, "debug.test_ro", &arg_read); UT_ASSERTeq(ret, 0); UT_ASSERTeq(arg_read, 0); UT_ASSERTeq(arg_write, 0); arg_read = 1; arg_write = 0; ret = util_ctl_set(pop, "debug.test_wo", &arg_write); UT_ASSERTeq(ret, 0); UT_ASSERTeq(arg_read, 1); UT_ASSERTeq(arg_write, 1); long index_value = 0; ret = util_ctl_get(pop, "debug.5.index_value", &index_value); UT_ASSERTeq(ret, 0); UT_ASSERTeq(index_value, 5); ret = util_ctl_get(pop, "debug.10.index_value", &index_value); UT_ASSERTeq(ret, 0); UT_ASSERTeq(index_value, 10); arg_read = 1; arg_write = 1; int arg_runnable = 1; ret = util_ctl_exec(pop, "debug.test_runnable", &arg_runnable); UT_ASSERTeq(ret, 0); UT_ASSERTeq(arg_read, 1); UT_ASSERTeq(arg_write, 1); UT_ASSERTeq(arg_runnable, 0); } static void test_string_config(struct pool *pop) { UT_ASSERTne(pop, NULL); int ret; test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, ""); UT_ASSERTeq(ret, 0); UT_ASSERTeq(test_config_written, 0); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, ";;"); UT_ASSERTeq(ret, 0); UT_ASSERTeq(test_config_written, 0); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, ";=;"); UT_ASSERTeq(ret, -1); UT_ASSERTeq(test_config_written, 0); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, "="); UT_ASSERTeq(ret, -1); UT_ASSERTeq(test_config_written, 0); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, "debug.test_wo="); UT_ASSERTeq(ret, -1); UT_ASSERTeq(test_config_written, 0); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, "=b"); UT_ASSERTeq(ret, -1); UT_ASSERTeq(test_config_written, 0); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, "debug.test_wo=111=222"); UT_ASSERTeq(ret, -1); UT_ASSERTeq(test_config_written, 0); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, "debug.test_wo=333;debug.test_rw=444;"); UT_ASSERTeq(ret, 0); UT_ASSERTeq(test_config_written, 2); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, "debug.test_config="TEST_CONFIG_VALUE";"); UT_ASSERTeq(ret, 0); UT_ASSERTeq(test_config_written, 1); } static void config_file_create(const char *buf) { /* the test script will take care of removing this file for us */ FILE *f = os_fopen(testconfig_path, "w+"); fwrite(buf, sizeof(char), strlen(buf), f); fclose(f); } static void create_and_test_file_config(struct pool *pop, const char *buf, int ret, int result) { config_file_create(buf); test_config_written = 0; int r = ctl_load_config_from_file(pop ? pop->ctl : NULL, pop, testconfig_path); UT_ASSERTeq(r, ret); UT_ASSERTeq(test_config_written, result); } static void test_too_large_file(struct pool *pop) { char *too_large_buf = calloc(1, 1 << 21); UT_ASSERTne(too_large_buf, NULL); memset(too_large_buf, 0xc, (1 << 21) - 1); config_file_create(too_large_buf); int ret = ctl_load_config_from_file(pop->ctl, pop, testconfig_path); UT_ASSERTne(ret, 0); free(too_large_buf); } static void test_file_config(struct pool *pop) { create_and_test_file_config(pop, "debug.test_config="TEST_CONFIG_VALUE";", 0, 1); create_and_test_file_config(pop, "debug.test_config="TEST_CONFIG_VALUE";" "debug.test_config="TEST_CONFIG_VALUE";", 0, 2); create_and_test_file_config(pop, "#this is a comment\n" "debug.test_config="TEST_CONFIG_VALUE";", 0, 1); create_and_test_file_config(pop, "debug.#this is a comment\n" "test_config#this is a comment\n" "="TEST_CONFIG_VALUE";", 0, 1); create_and_test_file_config(pop, "debug.test_config="TEST_CONFIG_VALUE";#this is a comment", 0, 1); create_and_test_file_config(pop, "\n\n\ndebug\n.\ntest\t_\tconfig="TEST_CONFIG_VALUE";\n", 0, 1); create_and_test_file_config(pop, " d e b u g . t e s t _ c o n f i g = "TEST_CONFIG_VALUE";", 0, 1); create_and_test_file_config(pop, "#debug.test_config="TEST_CONFIG_VALUE";", 0, 0); create_and_test_file_config(pop, "debug.#this is a comment\n" "test_config#this is a not properly terminated comment" "="TEST_CONFIG_VALUE";", -1, 0); create_and_test_file_config(pop, "invalid", -1, 0); create_and_test_file_config(pop, "", 0, 0); create_and_test_file_config(pop, "debug.test_config_complex_arg=;", -1, 0); create_and_test_file_config(pop, "debug.test_config_complex_arg=1,2,3;", -1, 0); create_and_test_file_config(pop, "debug.test_config_complex_arg=12345,abcd,,1;", -1, 0); create_and_test_file_config(pop, "debug.test_config_complex_arg=12345,abcd,3147483647,1;", 0, 1); create_and_test_file_config(NULL, "global_debug.gtest_config="TEST_CONFIG_VALUE";", 0, 1); create_and_test_file_config(NULL, "private.missing.query=1;" "global_debug.gtest_config="TEST_CONFIG_VALUE";", 0, 1); test_too_large_file(pop); int ret = ctl_load_config_from_file(pop->ctl, pop, "does_not_exist"); UT_ASSERTne(ret, 0); } static void test_ctl_global_namespace(struct pool *pop) { int arg_read = 1; int ret = util_ctl_get(pop, "global_debug.gtest_ro", &arg_read); UT_ASSERTeq(ret, 0); UT_ASSERTeq(arg_read, 0); } static void test_ctl_arg_parsers() { char *input; input = ""; int boolean = -1; int ret = ctl_arg_boolean(input, &boolean, sizeof(int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(boolean, -1); input = "abcdefgh"; boolean = -1; ret = ctl_arg_boolean(input, &boolean, sizeof(int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(boolean, -1); input = "-999"; boolean = -1; ret = ctl_arg_boolean(input, &boolean, sizeof(int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(boolean, -1); input = "N"; boolean = -1; ret = ctl_arg_boolean(input, &boolean, sizeof(int)); UT_ASSERTeq(ret, 0); UT_ASSERTeq(boolean, 0); input = "0"; boolean = -1; ret = ctl_arg_boolean(input, &boolean, sizeof(int)); UT_ASSERTeq(ret, 0); UT_ASSERTeq(boolean, 0); input = "yes"; boolean = -1; ret = ctl_arg_boolean(input, &boolean, sizeof(int)); UT_ASSERTeq(ret, 0); UT_ASSERTeq(boolean, 1); input = "Yes"; boolean = -1; ret = ctl_arg_boolean(input, &boolean, sizeof(int)); UT_ASSERTeq(ret, 0); UT_ASSERTeq(boolean, 1); input = "1"; boolean = -1; ret = ctl_arg_boolean(input, &boolean, sizeof(int)); UT_ASSERTeq(ret, 0); UT_ASSERTeq(boolean, 1); input = "1234"; boolean = -1; ret = ctl_arg_boolean(input, &boolean, sizeof(int)); UT_ASSERTeq(ret, 0); UT_ASSERTeq(boolean, 1); input = ""; int small_int = -1; ret = ctl_arg_integer(input, &small_int, sizeof(small_int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(small_int, -1); input = "abcd"; small_int = -1; ret = ctl_arg_integer(input, &small_int, sizeof(small_int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(small_int, -1); input = "12345678901234567890"; small_int = -1; ret = ctl_arg_integer(input, &small_int, sizeof(small_int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(small_int, -1); input = "-12345678901234567890"; small_int = -1; ret = ctl_arg_integer(input, &small_int, sizeof(small_int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(small_int, -1); input = "2147483648"; /* INT_MAX + 1 */ small_int = -1; ret = ctl_arg_integer(input, &small_int, sizeof(small_int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(small_int, -1); input = "-2147483649"; /* INT_MIN - 2 */ small_int = -1; ret = ctl_arg_integer(input, &small_int, sizeof(small_int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(small_int, -1); input = "0"; small_int = -1; ret = ctl_arg_integer(input, &small_int, sizeof(small_int)); UT_ASSERTeq(ret, 0); UT_ASSERTeq(small_int, 0); input = "500"; small_int = -1; ret = ctl_arg_integer(input, &small_int, sizeof(small_int)); UT_ASSERTeq(ret, 0); UT_ASSERTeq(small_int, 500); input = "-500"; small_int = -1; ret = ctl_arg_integer(input, &small_int, sizeof(small_int)); UT_ASSERTeq(ret, 0); UT_ASSERTeq(small_int, -500); input = ""; long long ll_int = -1; ret = ctl_arg_integer(input, &ll_int, sizeof(ll_int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(ll_int, -1); input = "12345678901234567890"; ll_int = -1; ret = ctl_arg_integer(input, &ll_int, sizeof(ll_int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(ll_int, -1); input = "-12345678901234567890"; ll_int = -1; ret = ctl_arg_integer(input, &ll_int, sizeof(ll_int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(ll_int, -1); input = "2147483648"; ll_int = -1; ret = ctl_arg_integer(input, &ll_int, sizeof(ll_int)); UT_ASSERTeq(ret, 0); UT_ASSERTeq(ll_int, 2147483648); input = "-2147483649"; ll_int = -1; ret = ctl_arg_integer(input, &ll_int, sizeof(ll_int)); UT_ASSERTeq(ret, 0); UT_ASSERTeq(ll_int, -2147483649LL); input = ""; char string[1000] = {0}; ret = ctl_arg_string(input, string, 0); UT_ASSERTeq(ret, -1); input = "abcd"; ret = ctl_arg_string(input, string, 3); UT_ASSERTeq(ret, -1); input = "abcdefg"; ret = ctl_arg_string(input, string, 3); UT_ASSERTeq(ret, -1); input = "abcd"; ret = ctl_arg_string(input, string, 4); UT_ASSERTeq(ret, -1); input = "abc"; ret = ctl_arg_string(input, string, 4); UT_ASSERTeq(ret, 0); UT_ASSERT(strcmp(input, string) == 0); } static void test_fault_injection(struct pool *pop) { if (!core_fault_injection_enabled()) return; UT_ASSERTne(pop, NULL); core_inject_fault_at(PMEM_MALLOC, 1, "ctl_parse_args"); test_config_written = 0; int ret = ctl_load_config_from_string(pop->ctl, pop, "debug.test_wo=333;debug.test_rw=444;"); UT_ASSERTne(ret, 0); UT_ASSERTeq(errno, ENOMEM); } int main(int argc, char *argv[]) { START(argc, argv, "util_ctl"); common_init(LOG_PREFIX, LOG_LEVEL_VAR, LOG_FILE_VAR, MAJOR_VERSION, MINOR_VERSION); if (argc != 2) UT_FATAL("usage: %s testconfig", argv[0]); testconfig_path = argv[1]; CTL_REGISTER_MODULE(NULL, global_debug); test_ctl_global_namespace(NULL); struct pool *pop = malloc(sizeof(pop)); pop->ctl = ctl_new(); test_ctl_global_namespace(NULL); CTL_REGISTER_MODULE(pop->ctl, debug); test_ctl_global_namespace(pop); test_fault_injection(pop); test_ctl_parser(pop); test_string_config(pop); test_file_config(pop); test_ctl_arg_parsers(); ctl_delete(pop->ctl); free(pop); common_fini(); DONE(NULL); }
17,492
22.639189
72
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem_has_auto_flush_win/mocks_windows.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018, Intel Corporation */ /* * mocks_windows.h -- redefinitions of EnumSystemFirmwareTables and * GetSystemFirmwareTable * * This file is Windows-specific. * * This file should be included (i.e. using Forced Include) by libpmem * files, when compiled for the purpose of pmem_has_auto_flush_win test. * It would replace default implementation with mocked functions defined * in mocks_windows.c * * This WRAP_REAL define could be also passed as preprocessor definition. */ #include <windows.h> #ifndef WRAP_REAL #define EnumSystemFirmwareTables __wrap_EnumSystemFirmwareTables #define GetSystemFirmwareTable __wrap_GetSystemFirmwareTable UINT __wrap_EnumSystemFirmwareTables(DWORD FirmwareTableProviderSignature, PVOID pFirmwareTableEnumBuffer, DWORD BufferSize); UINT __wrap_GetSystemFirmwareTable(DWORD FirmwareTableProviderSignature, DWORD FirmwareTableID, PVOID pFirmwareTableBuffer, DWORD BufferSize); #endif
988
33.103448
73
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem_has_auto_flush_win/mocks_windows.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * mocks_windows.c -- mocked functions used in auto_flush_windows.c */ #include "util.h" #include "unittest.h" #include "set.h" #include "pmemcommon.h" #include "auto_flush_windows.h" #include "pmem_has_auto_flush_win.h" #include <errno.h> extern size_t Is_nfit; extern size_t Pc_type; extern size_t Pc_capabilities; FUNC_MOCK_DLLIMPORT(EnumSystemFirmwareTables, UINT, DWORD FirmwareTableProviderSignature, PVOID pFirmwareTableBuffer, DWORD BufferSize) FUNC_MOCK_RUN_DEFAULT { if (FirmwareTableProviderSignature != ACPI_SIGNATURE) return _FUNC_REAL(EnumSystemFirmwareTables) (FirmwareTableProviderSignature, pFirmwareTableBuffer, BufferSize); if (Is_nfit == 1 && pFirmwareTableBuffer != NULL && BufferSize != 0) { UT_OUT("Mock NFIT available"); strncpy(pFirmwareTableBuffer, NFIT_STR_SIGNATURE, BufferSize); } return NFIT_SIGNATURE_LEN + sizeof(struct nfit_header); } FUNC_MOCK_END FUNC_MOCK_DLLIMPORT(GetSystemFirmwareTable, UINT, DWORD FirmwareTableProviderSignature, DWORD FirmwareTableID, PVOID pFirmwareTableBuffer, DWORD BufferSize) FUNC_MOCK_RUN_DEFAULT { if (FirmwareTableProviderSignature != ACPI_SIGNATURE || FirmwareTableID != NFIT_REV_SIGNATURE) return _FUNC_REAL(GetSystemFirmwareTable) (FirmwareTableProviderSignature, FirmwareTableID, pFirmwareTableBuffer, BufferSize); if (pFirmwareTableBuffer == NULL && BufferSize == 0) { UT_OUT("GetSystemFirmwareTable mock"); return sizeof(struct platform_capabilities) + sizeof(struct nfit_header); } struct nfit_header nfit; struct platform_capabilities pc; /* fill nfit */ char sig[NFIT_SIGNATURE_LEN] = NFIT_STR_SIGNATURE; strncpy(nfit.signature, sig, NFIT_SIGNATURE_LEN); nfit.length = sizeof(nfit); memcpy(pFirmwareTableBuffer, &nfit, nfit.length); /* fill platform_capabilities */ pc.length = sizeof(pc); /* [...] 0000 0011 - proper capabilities bits combination */ pc.capabilities = (uint32_t)Pc_capabilities; pc.type = (uint16_t)Pc_type; memcpy((char *)pFirmwareTableBuffer + nfit.length, &pc, pc.length); return BufferSize; } FUNC_MOCK_END
2,173
28.378378
68
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem_has_auto_flush_win/pmem_has_auto_flush_win.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * pmem_has_auto_flush_win.c -- unit test for pmem_has_auto_flush_win() * * usage: pmem_has_auto_flush_win <option> * options: * n - is nfit available or not (y or n) * type: number of platform capabilities structure * capabilities: platform capabilities bits */ #include <stdbool.h> #include <errno.h> #include "unittest.h" #include "pmem.h" #include "pmemcommon.h" #include "set.h" #include "mocks_windows.h" #include "pmem_has_auto_flush_win.h" #include "util.h" #define LOG_PREFIX "ut" #define LOG_LEVEL_VAR "TEST_LOG_LEVEL" #define LOG_FILE_VAR "TEST_LOG_FILE" #define MAJOR_VERSION 1 #define MINOR_VERSION 0 size_t Is_nfit = 0; size_t Pc_type = 0; size_t Pc_capabilities = 3; int main(int argc, char *argv[]) { START(argc, argv, "pmem_has_auto_flush_win"); common_init(LOG_PREFIX, LOG_LEVEL_VAR, LOG_FILE_VAR, MAJOR_VERSION, MINOR_VERSION); if (argc < 4) UT_FATAL("usage: pmem_has_auto_flush_win " "<option> <type> <capabilities>", argv[0]); pmem_init(); Pc_type = (size_t)atoi(argv[2]); Pc_capabilities = (size_t)atoi(argv[3]); Is_nfit = argv[1][0] == 'y'; int eADR = pmem_has_auto_flush(); UT_OUT("pmem_has_auto_flush ret: %d", eADR); common_fini(); DONE(NULL); }
1,305
21.517241
71
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_tx_alloc/obj_tx_alloc.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * obj_tx_alloc.c -- unit test for pmemobj_tx_alloc and pmemobj_tx_zalloc */ #include <assert.h> #include <sys/param.h> #include <string.h> #include "unittest.h" #include "libpmemobj.h" #include "util.h" #include "valgrind_internal.h" #define LAYOUT_NAME "tx_alloc" #define TEST_VALUE_1 1 #define TEST_VALUE_2 2 #define OBJ_SIZE (200 * 1024) enum type_number { TYPE_NO_TX, TYPE_COMMIT, TYPE_ABORT, TYPE_ZEROED_COMMIT, TYPE_ZEROED_ABORT, TYPE_XCOMMIT, TYPE_XABORT, TYPE_XZEROED_COMMIT, TYPE_XZEROED_ABORT, TYPE_XNOFLUSHED_COMMIT, TYPE_COMMIT_NESTED1, TYPE_COMMIT_NESTED2, TYPE_ABORT_NESTED1, TYPE_ABORT_NESTED2, TYPE_ABORT_AFTER_NESTED1, TYPE_ABORT_AFTER_NESTED2, TYPE_OOM, }; TOID_DECLARE(struct object, TYPE_OOM); struct object { size_t value; char data[OBJ_SIZE - sizeof(size_t)]; }; /* * do_tx_alloc_oom -- allocates objects until OOM */ static void do_tx_alloc_oom(PMEMobjpool *pop) { int do_alloc = 1; size_t alloc_cnt = 0; do { TX_BEGIN(pop) { TOID(struct object) obj = TX_NEW(struct object); D_RW(obj)->value = alloc_cnt; } TX_ONCOMMIT { alloc_cnt++; } TX_ONABORT { do_alloc = 0; } TX_END } while (do_alloc); size_t bitmap_size = howmany(alloc_cnt, 8); char *bitmap = (char *)MALLOC(bitmap_size); memset(bitmap, 0, bitmap_size); size_t obj_cnt = 0; TOID(struct object) i; POBJ_FOREACH_TYPE(pop, i) { UT_ASSERT(D_RO(i)->value < alloc_cnt); UT_ASSERT(!isset(bitmap, D_RO(i)->value)); setbit(bitmap, D_RO(i)->value); obj_cnt++; } FREE(bitmap); UT_ASSERTeq(obj_cnt, alloc_cnt); TOID(struct object) o = POBJ_FIRST(pop, struct object); while (!TOID_IS_NULL(o)) { TOID(struct object) next = POBJ_NEXT(o); POBJ_FREE(&o); o = next; } } /* * do_tx_alloc_abort_after_nested -- aborts transaction after allocation * in nested transaction */ static void do_tx_alloc_abort_after_nested(PMEMobjpool *pop) { TOID(struct object) obj1; TOID(struct object) obj2; TX_BEGIN(pop) { TOID_ASSIGN(obj1, pmemobj_tx_alloc(sizeof(struct object), TYPE_ABORT_AFTER_NESTED1)); UT_ASSERT(!TOID_IS_NULL(obj1)); D_RW(obj1)->value = TEST_VALUE_1; TX_BEGIN(pop) { TOID_ASSIGN(obj2, pmemobj_tx_zalloc( sizeof(struct object), TYPE_ABORT_AFTER_NESTED2)); UT_ASSERT(!TOID_IS_NULL(obj2)); UT_ASSERT(util_is_zeroed(D_RO(obj2), sizeof(struct object))); D_RW(obj2)->value = TEST_VALUE_2; } TX_ONCOMMIT { UT_ASSERTeq(D_RO(obj2)->value, TEST_VALUE_2); } TX_ONABORT { UT_ASSERT(0); } TX_END pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj1, OID_NULL); TOID_ASSIGN(obj2, OID_NULL); } TX_END TOID(struct object) first; /* check the obj1 object */ UT_ASSERT(TOID_IS_NULL(obj1)); first.oid = POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_AFTER_NESTED1); UT_ASSERT(TOID_IS_NULL(first)); /* check the obj2 object */ UT_ASSERT(TOID_IS_NULL(obj2)); first.oid = POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_AFTER_NESTED2); UT_ASSERT(TOID_IS_NULL(first)); } /* * do_tx_alloc_abort_nested -- aborts transaction in nested transaction */ static void do_tx_alloc_abort_nested(PMEMobjpool *pop) { TOID(struct object) obj1; TOID(struct object) obj2; TX_BEGIN(pop) { TOID_ASSIGN(obj1, pmemobj_tx_alloc(sizeof(struct object), TYPE_ABORT_NESTED1)); UT_ASSERT(!TOID_IS_NULL(obj1)); D_RW(obj1)->value = TEST_VALUE_1; TX_BEGIN(pop) { TOID_ASSIGN(obj2, pmemobj_tx_zalloc( sizeof(struct object), TYPE_ABORT_NESTED2)); UT_ASSERT(!TOID_IS_NULL(obj2)); UT_ASSERT(util_is_zeroed(D_RO(obj2), sizeof(struct object))); D_RW(obj2)->value = TEST_VALUE_2; pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj2, OID_NULL); } TX_END } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj1, OID_NULL); } TX_END TOID(struct object) first; /* check the obj1 object */ UT_ASSERT(TOID_IS_NULL(obj1)); first.oid = POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_NESTED1); UT_ASSERT(TOID_IS_NULL(first)); /* check the obj2 object */ UT_ASSERT(TOID_IS_NULL(obj2)); first.oid = POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_NESTED2); UT_ASSERT(TOID_IS_NULL(first)); } /* * do_tx_alloc_commit_nested -- allocates two objects, one in nested transaction */ static void do_tx_alloc_commit_nested(PMEMobjpool *pop) { TOID(struct object) obj1; TOID(struct object) obj2; TX_BEGIN(pop) { TOID_ASSIGN(obj1, pmemobj_tx_alloc(sizeof(struct object), TYPE_COMMIT_NESTED1)); UT_ASSERT(!TOID_IS_NULL(obj1)); D_RW(obj1)->value = TEST_VALUE_1; TX_BEGIN(pop) { TOID_ASSIGN(obj2, pmemobj_tx_zalloc( sizeof(struct object), TYPE_COMMIT_NESTED2)); UT_ASSERT(!TOID_IS_NULL(obj2)); UT_ASSERT(util_is_zeroed(D_RO(obj2), sizeof(struct object))); D_RW(obj2)->value = TEST_VALUE_2; } TX_ONCOMMIT { UT_ASSERTeq(D_RO(obj1)->value, TEST_VALUE_1); UT_ASSERTeq(D_RO(obj2)->value, TEST_VALUE_2); } TX_ONABORT { UT_ASSERT(0); } TX_END } TX_ONCOMMIT { UT_ASSERTeq(D_RO(obj1)->value, TEST_VALUE_1); UT_ASSERTeq(D_RO(obj2)->value, TEST_VALUE_2); } TX_ONABORT { UT_ASSERT(0); } TX_END TOID(struct object) first; TOID(struct object) next; /* check the obj1 object */ TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_NESTED1)); UT_ASSERT(TOID_EQUALS(first, obj1)); UT_ASSERTeq(D_RO(first)->value, TEST_VALUE_1); TOID_ASSIGN(next, POBJ_NEXT_TYPE_NUM(first.oid)); UT_ASSERT(TOID_IS_NULL(next)); /* check the obj2 object */ TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_NESTED2)); UT_ASSERT(TOID_EQUALS(first, obj2)); UT_ASSERTeq(D_RO(first)->value, TEST_VALUE_2); TOID_ASSIGN(next, POBJ_NEXT_TYPE_NUM(first.oid)); UT_ASSERT(TOID_IS_NULL(next)); } /* * do_tx_alloc_abort -- allocates an object and aborts the transaction */ static void do_tx_alloc_abort(PMEMobjpool *pop) { TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_alloc(sizeof(struct object), TYPE_ABORT)); UT_ASSERT(!TOID_IS_NULL(obj)); D_RW(obj)->value = TEST_VALUE_1; pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT)); UT_ASSERT(TOID_IS_NULL(first)); } /* * do_tx_alloc_zerolen -- allocates an object of zero size to trigger tx abort */ static void do_tx_alloc_zerolen(PMEMobjpool *pop) { TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_alloc(0, TYPE_ABORT)); UT_ASSERT(0); /* should not get to this point */ } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT)); UT_ASSERT(TOID_IS_NULL(first)); } /* * do_tx_alloc_huge -- allocates a huge object to trigger tx abort */ static void do_tx_alloc_huge(PMEMobjpool *pop) { TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_alloc(PMEMOBJ_MAX_ALLOC_SIZE + 1, TYPE_ABORT)); UT_ASSERT(0); /* should not get to this point */ } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT)); UT_ASSERT(TOID_IS_NULL(first)); } /* * do_tx_alloc_commit -- allocates and object */ static void do_tx_alloc_commit(PMEMobjpool *pop) { TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_alloc(sizeof(struct object), TYPE_COMMIT)); UT_ASSERT(!TOID_IS_NULL(obj)); D_RW(obj)->value = TEST_VALUE_1; } TX_ONCOMMIT { UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); } TX_ONABORT { UT_ASSERT(0); } TX_END TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT)); UT_ASSERT(TOID_EQUALS(first, obj)); UT_ASSERTeq(D_RO(first)->value, D_RO(obj)->value); TOID(struct object) next; next = POBJ_NEXT(first); UT_ASSERT(TOID_IS_NULL(next)); } /* * do_tx_zalloc_abort -- allocates a zeroed object and aborts the transaction */ static void do_tx_zalloc_abort(PMEMobjpool *pop) { TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_zalloc(sizeof(struct object), TYPE_ZEROED_ABORT)); UT_ASSERT(!TOID_IS_NULL(obj)); UT_ASSERT(util_is_zeroed(D_RO(obj), sizeof(struct object))); D_RW(obj)->value = TEST_VALUE_1; pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ZEROED_ABORT)); UT_ASSERT(TOID_IS_NULL(first)); } /* * do_tx_zalloc_zerolen -- allocate an object of zero size to trigger tx abort */ static void do_tx_zalloc_zerolen(PMEMobjpool *pop) { TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_zalloc(0, TYPE_ZEROED_ABORT)); UT_ASSERT(0); /* should not get to this point */ } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ZEROED_ABORT)); UT_ASSERT(TOID_IS_NULL(first)); } /* * do_tx_zalloc_huge -- allocates a huge object to trigger tx abort */ static void do_tx_zalloc_huge(PMEMobjpool *pop) { TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_zalloc(PMEMOBJ_MAX_ALLOC_SIZE + 1, TYPE_ZEROED_ABORT)); UT_ASSERT(0); /* should not get to this point */ } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ZEROED_ABORT)); UT_ASSERT(TOID_IS_NULL(first)); } /* * do_tx_zalloc_commit -- allocates zeroed object */ static void do_tx_zalloc_commit(PMEMobjpool *pop) { TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_zalloc(sizeof(struct object), TYPE_ZEROED_COMMIT)); UT_ASSERT(!TOID_IS_NULL(obj)); UT_ASSERT(util_is_zeroed(D_RO(obj), sizeof(struct object))); D_RW(obj)->value = TEST_VALUE_1; } TX_ONCOMMIT { UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); } TX_ONABORT { UT_ASSERT(0); } TX_END TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ZEROED_COMMIT)); UT_ASSERT(TOID_EQUALS(first, obj)); UT_ASSERTeq(D_RO(first)->value, D_RO(obj)->value); TOID(struct object) next; next = POBJ_NEXT(first); UT_ASSERT(TOID_IS_NULL(next)); } /* * do_tx_xalloc_abort -- allocates a zeroed object and aborts the transaction */ static void do_tx_xalloc_abort(PMEMobjpool *pop) { /* xalloc 0 */ TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_xalloc(sizeof(struct object), TYPE_XABORT, 0)); UT_ASSERT(!TOID_IS_NULL(obj)); D_RW(obj)->value = TEST_VALUE_1; pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XABORT)); UT_ASSERT(TOID_IS_NULL(first)); /* xalloc ZERO */ TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_xalloc(sizeof(struct object), TYPE_XZEROED_ABORT, POBJ_XALLOC_ZERO)); UT_ASSERT(!TOID_IS_NULL(obj)); UT_ASSERT(util_is_zeroed(D_RO(obj), sizeof(struct object))); D_RW(obj)->value = TEST_VALUE_1; pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XZEROED_ABORT)); UT_ASSERT(TOID_IS_NULL(first)); } /* * do_tx_xalloc_zerolen -- allocate an object of zero size to trigger tx abort */ static void do_tx_xalloc_zerolen(PMEMobjpool *pop) { /* xalloc 0 */ TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_xalloc(0, TYPE_XABORT, 0)); UT_ASSERT(0); /* should not get to this point */ } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); /* xalloc 0 with POBJ_XALLOC_NO_ABORT flag */ TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_xalloc(0, TYPE_XABORT, POBJ_XALLOC_NO_ABORT)); } TX_ONCOMMIT { TOID_ASSIGN(obj, OID_NULL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END UT_ASSERT(TOID_IS_NULL(obj)); /* alloc 0 with pmemobj_tx_set_failure_behavior called */ TX_BEGIN(pop) { pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN); TOID_ASSIGN(obj, pmemobj_tx_alloc(0, TYPE_XABORT)); } TX_ONCOMMIT { TOID_ASSIGN(obj, OID_NULL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END UT_ASSERT(TOID_IS_NULL(obj)); /* xalloc 0 with pmemobj_tx_set_failure_behavior called */ TX_BEGIN(pop) { pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN); TOID_ASSIGN(obj, pmemobj_tx_xalloc(0, TYPE_XABORT, 0)); } TX_ONCOMMIT { TOID_ASSIGN(obj, OID_NULL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END UT_ASSERT(TOID_IS_NULL(obj)); /* zalloc 0 with pmemobj_tx_set_failure_behavior called */ TX_BEGIN(pop) { pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN); TOID_ASSIGN(obj, pmemobj_tx_zalloc(0, TYPE_XABORT)); } TX_ONCOMMIT { TOID_ASSIGN(obj, OID_NULL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XABORT)); UT_ASSERT(TOID_IS_NULL(first)); /* xalloc ZERO */ TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_xalloc(0, TYPE_XZEROED_ABORT, POBJ_XALLOC_ZERO)); UT_ASSERT(0); /* should not get to this point */ } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XZEROED_ABORT)); UT_ASSERT(TOID_IS_NULL(first)); } /* * do_tx_xalloc_huge -- allocates a huge object to trigger tx abort */ static void do_tx_xalloc_huge(PMEMobjpool *pop) { /* xalloc 0 */ TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_xalloc(PMEMOBJ_MAX_ALLOC_SIZE + 1, TYPE_XABORT, 0)); UT_ASSERT(0); /* should not get to this point */ } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XABORT)); UT_ASSERT(TOID_IS_NULL(first)); /* xalloc ZERO */ TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_xalloc(PMEMOBJ_MAX_ALLOC_SIZE + 1, TYPE_XZEROED_ABORT, POBJ_XALLOC_ZERO)); UT_ASSERT(0); /* should not get to this point */ } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XZEROED_ABORT)); UT_ASSERT(TOID_IS_NULL(first)); /* * do xalloc until overfilled and then * free last successful allocation */ uint64_t tot_allocated = 0, alloc_size = (5 * 1024 *1024); int rc = 0; PMEMoid oid, prev_oid; POBJ_FOREACH_SAFE(pop, oid, prev_oid) { pmemobj_free(&oid); } TOID_ASSIGN(first, pmemobj_first(pop)); UT_ASSERT(TOID_IS_NULL(first)); TX_BEGIN(pop) { while (rc == 0) { oid = pmemobj_tx_xalloc(alloc_size, 0, POBJ_XALLOC_NO_ABORT); if (oid.off == 0) rc = -1; else { tot_allocated += alloc_size; prev_oid = oid; } } rc = pmemobj_tx_free(prev_oid); } TX_ONCOMMIT { UT_ASSERTeq(errno, ENOMEM); UT_ASSERTeq(rc, 0); } TX_ONABORT { UT_ASSERT(0); } TX_END } /* * do_tx_xalloc_commit -- allocates zeroed object */ static void do_tx_xalloc_commit(PMEMobjpool *pop) { /* xalloc 0 */ TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_xalloc(sizeof(struct object), TYPE_XCOMMIT, 0)); UT_ASSERT(!TOID_IS_NULL(obj)); D_RW(obj)->value = TEST_VALUE_1; } TX_ONCOMMIT { UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); } TX_ONABORT { UT_ASSERT(0); } TX_END TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XCOMMIT)); UT_ASSERT(TOID_EQUALS(first, obj)); UT_ASSERTeq(D_RO(first)->value, D_RO(obj)->value); TOID(struct object) next; TOID_ASSIGN(next, POBJ_NEXT_TYPE_NUM(first.oid)); UT_ASSERT(TOID_IS_NULL(next)); /* xalloc ZERO */ TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_xalloc(sizeof(struct object), TYPE_XZEROED_COMMIT, POBJ_XALLOC_ZERO)); UT_ASSERT(!TOID_IS_NULL(obj)); UT_ASSERT(util_is_zeroed(D_RO(obj), sizeof(struct object))); D_RW(obj)->value = TEST_VALUE_1; } TX_ONCOMMIT { UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); } TX_ONABORT { UT_ASSERT(0); } TX_END TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XZEROED_COMMIT)); UT_ASSERT(TOID_EQUALS(first, obj)); UT_ASSERTeq(D_RO(first)->value, D_RO(obj)->value); TOID_ASSIGN(next, POBJ_NEXT_TYPE_NUM(first.oid)); UT_ASSERT(TOID_IS_NULL(next)); } /* * do_tx_xalloc_noflush -- allocates zeroed object */ static void do_tx_xalloc_noflush(PMEMobjpool *pop) { TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_xalloc(sizeof(struct object), TYPE_XNOFLUSHED_COMMIT, POBJ_XALLOC_NO_FLUSH)); UT_ASSERT(!TOID_IS_NULL(obj)); D_RW(obj)->data[OBJ_SIZE - sizeof(size_t) - 1] = TEST_VALUE_1; /* let pmemcheck find we didn't flush it */ } TX_ONCOMMIT { UT_ASSERTeq(D_RO(obj)->data[OBJ_SIZE - sizeof(size_t) - 1], TEST_VALUE_1); } TX_ONABORT { UT_ASSERT(0); } TX_END TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XNOFLUSHED_COMMIT)); UT_ASSERT(TOID_EQUALS(first, obj)); UT_ASSERTeq(D_RO(first)->data[OBJ_SIZE - sizeof(size_t) - 1], D_RO(obj)->data[OBJ_SIZE - sizeof(size_t) - 1]); TOID(struct object) next; TOID_ASSIGN(next, POBJ_NEXT_TYPE_NUM(first.oid)); UT_ASSERT(TOID_IS_NULL(next)); } /* * do_tx_root -- retrieve root inside of transaction */ static void do_tx_root(PMEMobjpool *pop) { size_t root_size = 24; TX_BEGIN(pop) { PMEMoid root = pmemobj_root(pop, root_size); UT_ASSERT(!OID_IS_NULL(root)); UT_ASSERT(util_is_zeroed(pmemobj_direct(root), root_size)); UT_ASSERTeq(root_size, pmemobj_root_size(pop)); } TX_ONABORT { UT_ASSERT(0); } TX_END } /* * do_tx_alloc_many -- allocates many objects inside of a single transaction */ static void do_tx_alloc_many(PMEMobjpool *pop) { #define TX_ALLOC_COUNT 70 /* bigger than max reservations */ PMEMoid oid, oid2; POBJ_FOREACH_SAFE(pop, oid, oid2) { pmemobj_free(&oid); } TOID(struct object) first; TOID_ASSIGN(first, pmemobj_first(pop)); UT_ASSERT(TOID_IS_NULL(first)); PMEMoid oids[TX_ALLOC_COUNT]; TX_BEGIN(pop) { for (int i = 0; i < TX_ALLOC_COUNT; ++i) { oids[i] = pmemobj_tx_alloc(1, 0); UT_ASSERT(!OID_IS_NULL(oids[i])); } } TX_ONABORT { UT_ASSERT(0); } TX_END TX_BEGIN(pop) { /* empty tx to make sure there's no leftover state */ } TX_ONABORT { UT_ASSERT(0); } TX_END TX_BEGIN(pop) { for (int i = 0; i < TX_ALLOC_COUNT; ++i) { pmemobj_tx_free(oids[i]); } } TX_ONABORT { UT_ASSERT(0); } TX_END TOID_ASSIGN(first, pmemobj_first(pop)); UT_ASSERT(TOID_IS_NULL(first)); #undef TX_ALLOC_COUNT } int main(int argc, char *argv[]) { START(argc, argv, "obj_tx_alloc"); util_init(); if (argc != 2) UT_FATAL("usage: %s [file]", argv[0]); PMEMobjpool *pop; if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, 0, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create"); do_tx_root(pop); VALGRIND_WRITE_STATS; /* alloc */ do_tx_alloc_commit(pop); VALGRIND_WRITE_STATS; do_tx_alloc_abort(pop); VALGRIND_WRITE_STATS; do_tx_alloc_zerolen(pop); VALGRIND_WRITE_STATS; do_tx_alloc_huge(pop); VALGRIND_WRITE_STATS; /* zalloc */ do_tx_zalloc_commit(pop); VALGRIND_WRITE_STATS; do_tx_zalloc_abort(pop); VALGRIND_WRITE_STATS; do_tx_zalloc_zerolen(pop); VALGRIND_WRITE_STATS; do_tx_zalloc_huge(pop); VALGRIND_WRITE_STATS; /* xalloc */ do_tx_xalloc_commit(pop); VALGRIND_WRITE_STATS; do_tx_xalloc_abort(pop); VALGRIND_WRITE_STATS; do_tx_xalloc_zerolen(pop); VALGRIND_WRITE_STATS; do_tx_xalloc_huge(pop); VALGRIND_WRITE_STATS; /* alloc */ do_tx_alloc_commit_nested(pop); VALGRIND_WRITE_STATS; do_tx_alloc_abort_nested(pop); VALGRIND_WRITE_STATS; do_tx_alloc_abort_after_nested(pop); VALGRIND_WRITE_STATS; do_tx_alloc_oom(pop); VALGRIND_WRITE_STATS; do_tx_alloc_many(pop); VALGRIND_WRITE_STATS; do_tx_xalloc_noflush(pop); pmemobj_close(pop); DONE(NULL); }
20,667
21.862832
80
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/blk_pool/blk_pool.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * blk_pool.c -- unit test for pmemblk_create() and pmemblk_open() * * usage: blk_pool op path bsize [poolsize mode] * * op can be: * c - create * o - open * f - do fault injection * * "poolsize" and "mode" arguments are ignored for "open" */ #include "unittest.h" #include "../libpmemblk/blk.h" #define MB ((size_t)1 << 20) static void do_fault_injection(const char *path, size_t bsize, size_t poolsize, unsigned mode) { if (!pmemblk_fault_injection_enabled()) return; pmemblk_inject_fault_at(PMEM_MALLOC, 1, "blk_runtime_init"); PMEMblkpool *pbp = pmemblk_create(path, bsize, poolsize, mode); UT_ASSERTeq(pbp, NULL); UT_ASSERTeq(errno, ENOMEM); } static void pool_create(const char *path, size_t bsize, size_t poolsize, unsigned mode) { PMEMblkpool *pbp = pmemblk_create(path, bsize, poolsize, mode); if (pbp == NULL) UT_OUT("!%s: pmemblk_create", path); else { os_stat_t stbuf; STAT(path, &stbuf); UT_OUT("%s: file size %zu usable blocks %zu mode 0%o", path, stbuf.st_size, pmemblk_nblock(pbp), stbuf.st_mode & 0777); pmemblk_close(pbp); int result = pmemblk_check(path, bsize); if (result < 0) UT_OUT("!%s: pmemblk_check", path); else if (result == 0) UT_OUT("%s: pmemblk_check: not consistent", path); else UT_ASSERTeq(pmemblk_check(path, bsize * 2), -1); } } static void pool_open(const char *path, size_t bsize) { PMEMblkpool *pbp = pmemblk_open(path, bsize); if (pbp == NULL) UT_OUT("!%s: pmemblk_open", path); else { UT_OUT("%s: pmemblk_open: Success", path); pmemblk_close(pbp); } } int main(int argc, char *argv[]) { START(argc, argv, "blk_pool"); if (argc < 4) UT_FATAL("usage: %s op path bsize [poolsize mode]", argv[0]); size_t bsize = strtoul(argv[3], NULL, 0); size_t poolsize; unsigned mode; switch (argv[1][0]) { case 'c': poolsize = strtoul(argv[4], NULL, 0) * MB; /* in megabytes */ mode = strtoul(argv[5], NULL, 8); pool_create(argv[2], bsize, poolsize, mode); break; case 'o': pool_open(argv[2], bsize); break; case 'f': poolsize = strtoul(argv[4], NULL, 0) * MB; /* in megabytes */ mode = strtoul(argv[5], NULL, 8); do_fault_injection(argv[2], bsize, poolsize, mode); break; default: UT_FATAL("unknown operation"); } DONE(NULL); }
2,377
20.423423
75
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem_memcpy/pmem_memcpy.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * pmem_memcpy.c -- unit test for doing a memcpy * * usage: pmem_memcpy file destoff srcoff length * */ #include "unittest.h" #include "util_pmem.h" #include "file.h" #include "memcpy_common.h" static void * pmem_memcpy_persist_wrapper(void *pmemdest, const void *src, size_t len, unsigned flags) { (void) flags; return pmem_memcpy_persist(pmemdest, src, len); } static void * pmem_memcpy_nodrain_wrapper(void *pmemdest, const void *src, size_t len, unsigned flags) { (void) flags; return pmem_memcpy_nodrain(pmemdest, src, len); } static void do_persist_ddax(const void *ptr, size_t size) { util_persist_auto(1, ptr, size); } static void do_persist(const void *ptr, size_t size) { util_persist_auto(0, ptr, size); } /* * swap_mappings - swap given two mapped regions. * * Try swapping src and dest by unmapping src, mapping a new dest with * the original src address as a hint. If successful, unmap original dest. * Map a new src with the original dest as a hint. */ static void swap_mappings(char **dest, char **src, size_t size, int fd) { char *d = *dest; char *s = *src; char *td, *ts; MUNMAP(*src, size); /* mmap destination using src addr as a hint */ td = MMAP(s, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); MUNMAP(*dest, size); *dest = td; /* mmap src using original destination addr as a hint */ ts = MMAP(d, size, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, -1, 0); *src = ts; } /* * do_memcpy_variants -- do_memcpy wrapper that tests multiple variants * of memcpy functions */ static void do_memcpy_variants(int fd, char *dest, int dest_off, char *src, int src_off, size_t bytes, size_t mapped_len, const char *file_name, persist_fn p) { do_memcpy(fd, dest, dest_off, src, src_off, bytes, mapped_len, file_name, pmem_memcpy_persist_wrapper, 0, p); do_memcpy(fd, dest, dest_off, src, src_off, bytes, mapped_len, file_name, pmem_memcpy_nodrain_wrapper, 0, p); for (int i = 0; i < ARRAY_SIZE(Flags); ++i) { do_memcpy(fd, dest, dest_off, src, src_off, bytes, mapped_len, file_name, pmem_memcpy, Flags[i], p); } } int main(int argc, char *argv[]) { int fd; char *dest; char *src; char *dest_orig; char *src_orig; size_t mapped_len; if (argc != 5) UT_FATAL("usage: %s file srcoff destoff length", argv[0]); const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD"); const char *avx = os_getenv("PMEM_AVX"); const char *avx512f = os_getenv("PMEM_AVX512F"); START(argc, argv, "pmem_memcpy %s %s %s %s %savx %savx512f", argv[2], argv[3], argv[4], thr ? thr : "default", avx ? "" : "!", avx512f ? "" : "!"); fd = OPEN(argv[1], O_RDWR); int dest_off = atoi(argv[2]); int src_off = atoi(argv[3]); size_t bytes = strtoul(argv[4], NULL, 0); /* src > dst */ dest_orig = dest = pmem_map_file(argv[1], 0, 0, 0, &mapped_len, NULL); if (dest == NULL) UT_FATAL("!could not map file: %s", argv[1]); src_orig = src = MMAP(dest + mapped_len, mapped_len, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, -1, 0); /* * Its very unlikely that src would not be > dest. pmem_map_file * chooses the first unused address >= 1TB, large * enough to hold the give range, and 1GB aligned. If the * addresses did not get swapped to allow src > dst, log error * and allow test to continue. */ if (src <= dest) { swap_mappings(&dest, &src, mapped_len, fd); if (src <= dest) UT_FATAL("cannot map files in memory order"); } enum file_type type = util_fd_get_type(fd); if (type < 0) UT_FATAL("cannot check type of file with fd %d", fd); persist_fn persist; persist = type == TYPE_DEVDAX ? do_persist_ddax : do_persist; memset(dest, 0, (2 * bytes)); persist(dest, 2 * bytes); memset(src, 0, (2 * bytes)); do_memcpy_variants(fd, dest, dest_off, src, src_off, bytes, 0, argv[1], persist); /* dest > src */ swap_mappings(&dest, &src, mapped_len, fd); if (dest <= src) UT_FATAL("cannot map files in memory order"); do_memcpy_variants(fd, dest, dest_off, src, src_off, bytes, 0, argv[1], persist); int ret = pmem_unmap(dest_orig, mapped_len); UT_ASSERTeq(ret, 0); MUNMAP(src_orig, mapped_len); CLOSE(fd); DONE(NULL); }
4,249
23.853801
76
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_heap_interrupt/mocks_windows.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * mocks_windows.h -- redefinitions of memops functions * * This file is Windows-specific. * * This file should be included (i.e. using Forced Include) by libpmemobj * files, when compiled for the purpose of obj_heap_interrupt test. * It would replace default implementation with mocked functions defined * in obj_heap_interrupt.c. * * These defines could be also passed as preprocessor definitions. */ #ifndef WRAP_REAL #define operation_finish __wrap_operation_finish #endif
578
27.95
73
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_pmalloc_mt/obj_pmalloc_mt.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * obj_pmalloc_mt.c -- multithreaded test of allocator */ #include <stdint.h> #include "file.h" #include "obj.h" #include "pmalloc.h" #include "sys_util.h" #include "unittest.h" #define MAX_THREADS 32 #define MAX_OPS_PER_THREAD 1000 #define ALLOC_SIZE 104 #define REALLOC_SIZE (ALLOC_SIZE * 3) #define MIX_RERUNS 2 #define CHUNKSIZE (1 << 18) #define CHUNKS_PER_THREAD 3 static unsigned Threads; static unsigned Ops_per_thread; static unsigned Tx_per_thread; struct action { struct pobj_action pact; os_mutex_t lock; os_cond_t cond; }; struct root { uint64_t offs[MAX_THREADS][MAX_OPS_PER_THREAD]; struct action actions[MAX_THREADS][MAX_OPS_PER_THREAD]; }; struct worker_args { PMEMobjpool *pop; struct root *r; unsigned idx; }; static void * alloc_worker(void *arg) { struct worker_args *a = arg; for (unsigned i = 0; i < Ops_per_thread; ++i) { pmalloc(a->pop, &a->r->offs[a->idx][i], ALLOC_SIZE, 0, 0); UT_ASSERTne(a->r->offs[a->idx][i], 0); } return NULL; } static void * realloc_worker(void *arg) { struct worker_args *a = arg; for (unsigned i = 0; i < Ops_per_thread; ++i) { prealloc(a->pop, &a->r->offs[a->idx][i], REALLOC_SIZE, 0, 0); UT_ASSERTne(a->r->offs[a->idx][i], 0); } return NULL; } static void * free_worker(void *arg) { struct worker_args *a = arg; for (unsigned i = 0; i < Ops_per_thread; ++i) { pfree(a->pop, &a->r->offs[a->idx][i]); UT_ASSERTeq(a->r->offs[a->idx][i], 0); } return NULL; } static void * mix_worker(void *arg) { struct worker_args *a = arg; /* * The mix scenario is ran twice to increase the chances of run * contention. */ for (unsigned j = 0; j < MIX_RERUNS; ++j) { for (unsigned i = 0; i < Ops_per_thread; ++i) { pmalloc(a->pop, &a->r->offs[a->idx][i], ALLOC_SIZE, 0, 0); UT_ASSERTne(a->r->offs[a->idx][i], 0); } for (unsigned i = 0; i < Ops_per_thread; ++i) { pfree(a->pop, &a->r->offs[a->idx][i]); UT_ASSERTeq(a->r->offs[a->idx][i], 0); } } return NULL; } static void * tx_worker(void *arg) { struct worker_args *a = arg; /* * Allocate objects until exhaustion, once that happens the transaction * will automatically abort and all of the objects will be freed. */ TX_BEGIN(a->pop) { for (unsigned n = 0; ; ++n) { /* this is NOT an infinite loop */ pmemobj_tx_alloc(ALLOC_SIZE, a->idx); if (Ops_per_thread != MAX_OPS_PER_THREAD && n == Ops_per_thread) { pmemobj_tx_abort(0); } } } TX_END return NULL; } static void * tx3_worker(void *arg) { struct worker_args *a = arg; /* * Allocate N objects, abort, repeat M times. Should reveal issues in * transaction abort handling. */ for (unsigned n = 0; n < Tx_per_thread; ++n) { TX_BEGIN(a->pop) { for (unsigned i = 0; i < Ops_per_thread; ++i) { pmemobj_tx_alloc(ALLOC_SIZE, a->idx); } pmemobj_tx_abort(EINVAL); } TX_END } return NULL; } static void * alloc_free_worker(void *arg) { struct worker_args *a = arg; PMEMoid oid; for (unsigned i = 0; i < Ops_per_thread; ++i) { int err = pmemobj_alloc(a->pop, &oid, ALLOC_SIZE, 0, NULL, NULL); UT_ASSERTeq(err, 0); pmemobj_free(&oid); } return NULL; } #define OPS_PER_TX 10 #define STEP 8 #define TEST_LANES 4 static void * tx2_worker(void *arg) { struct worker_args *a = arg; for (unsigned n = 0; n < Tx_per_thread; ++n) { PMEMoid oids[OPS_PER_TX]; TX_BEGIN(a->pop) { for (int i = 0; i < OPS_PER_TX; ++i) { oids[i] = pmemobj_tx_alloc(ALLOC_SIZE, a->idx); for (unsigned j = 0; j < ALLOC_SIZE; j += STEP) { pmemobj_tx_add_range(oids[i], j, STEP); } } } TX_END TX_BEGIN(a->pop) { for (int i = 0; i < OPS_PER_TX; ++i) pmemobj_tx_free(oids[i]); } TX_ONABORT { UT_ASSERT(0); } TX_END } return NULL; } static void * action_cancel_worker(void *arg) { struct worker_args *a = arg; PMEMoid oid; for (unsigned i = 0; i < Ops_per_thread; ++i) { unsigned arr_id = a->idx / 2; struct action *act = &a->r->actions[arr_id][i]; if (a->idx % 2 == 0) { os_mutex_lock(&act->lock); oid = pmemobj_reserve(a->pop, &act->pact, ALLOC_SIZE, 0); UT_ASSERT(!OID_IS_NULL(oid)); os_cond_signal(&act->cond); os_mutex_unlock(&act->lock); } else { os_mutex_lock(&act->lock); while (act->pact.heap.offset == 0) os_cond_wait(&act->cond, &act->lock); pmemobj_cancel(a->pop, &act->pact, 1); os_mutex_unlock(&act->lock); } } return NULL; } static void * action_publish_worker(void *arg) { struct worker_args *a = arg; PMEMoid oid; for (unsigned i = 0; i < Ops_per_thread; ++i) { unsigned arr_id = a->idx / 2; struct action *act = &a->r->actions[arr_id][i]; if (a->idx % 2 == 0) { os_mutex_lock(&act->lock); oid = pmemobj_reserve(a->pop, &act->pact, ALLOC_SIZE, 0); UT_ASSERT(!OID_IS_NULL(oid)); os_cond_signal(&act->cond); os_mutex_unlock(&act->lock); } else { os_mutex_lock(&act->lock); while (act->pact.heap.offset == 0) os_cond_wait(&act->cond, &act->lock); pmemobj_publish(a->pop, &act->pact, 1); os_mutex_unlock(&act->lock); } } return NULL; } static void * action_mix_worker(void *arg) { struct worker_args *a = arg; PMEMoid oid; for (unsigned i = 0; i < Ops_per_thread; ++i) { unsigned arr_id = a->idx / 2; unsigned publish = i % 2; struct action *act = &a->r->actions[arr_id][i]; if (a->idx % 2 == 0) { os_mutex_lock(&act->lock); oid = pmemobj_reserve(a->pop, &act->pact, ALLOC_SIZE, 0); UT_ASSERT(!OID_IS_NULL(oid)); os_cond_signal(&act->cond); os_mutex_unlock(&act->lock); } else { os_mutex_lock(&act->lock); while (act->pact.heap.offset == 0) os_cond_wait(&act->cond, &act->lock); if (publish) pmemobj_publish(a->pop, &act->pact, 1); else pmemobj_cancel(a->pop, &act->pact, 1); os_mutex_unlock(&act->lock); } pmemobj_persist(a->pop, act, sizeof(*act)); } return NULL; } static void actions_clear(PMEMobjpool *pop, struct root *r) { for (unsigned i = 0; i < Threads; ++i) { for (unsigned j = 0; j < Ops_per_thread; ++j) { struct action *a = &r->actions[i][j]; util_mutex_destroy(&a->lock); util_mutex_init(&a->lock); util_cond_destroy(&a->cond); util_cond_init(&a->cond); memset(&a->pact, 0, sizeof(a->pact)); pmemobj_persist(pop, a, sizeof(*a)); } } } static void run_worker(void *(worker_func)(void *arg), struct worker_args args[]) { os_thread_t t[MAX_THREADS]; for (unsigned i = 0; i < Threads; ++i) THREAD_CREATE(&t[i], NULL, worker_func, &args[i]); for (unsigned i = 0; i < Threads; ++i) THREAD_JOIN(&t[i], NULL); } int main(int argc, char *argv[]) { START(argc, argv, "obj_pmalloc_mt"); if (argc != 5) UT_FATAL("usage: %s <threads> <ops/t> <tx/t> [file]", argv[0]); PMEMobjpool *pop; Threads = ATOU(argv[1]); if (Threads > MAX_THREADS) UT_FATAL("Threads %d > %d", Threads, MAX_THREADS); Ops_per_thread = ATOU(argv[2]); if (Ops_per_thread > MAX_OPS_PER_THREAD) UT_FATAL("Ops per thread %d > %d", Threads, MAX_THREADS); Tx_per_thread = ATOU(argv[3]); int exists = util_file_exists(argv[4]); if (exists < 0) UT_FATAL("!util_file_exists"); if (!exists) { pop = pmemobj_create(argv[4], "TEST", (PMEMOBJ_MIN_POOL) + (MAX_THREADS * CHUNKSIZE * CHUNKS_PER_THREAD), 0666); if (pop == NULL) UT_FATAL("!pmemobj_create"); } else { pop = pmemobj_open(argv[4], "TEST"); if (pop == NULL) UT_FATAL("!pmemobj_open"); } PMEMoid oid = pmemobj_root(pop, sizeof(struct root)); struct root *r = pmemobj_direct(oid); UT_ASSERTne(r, NULL); struct worker_args args[MAX_THREADS]; for (unsigned i = 0; i < Threads; ++i) { args[i].pop = pop; args[i].r = r; args[i].idx = i; for (unsigned j = 0; j < Ops_per_thread; ++j) { struct action *a = &r->actions[i][j]; util_mutex_init(&a->lock); util_cond_init(&a->cond); } } run_worker(alloc_worker, args); run_worker(realloc_worker, args); run_worker(free_worker, args); run_worker(mix_worker, args); run_worker(alloc_free_worker, args); run_worker(action_cancel_worker, args); actions_clear(pop, r); run_worker(action_publish_worker, args); actions_clear(pop, r); run_worker(action_mix_worker, args); /* * Reduce the number of lanes to a value smaller than the number of * threads. This will ensure that at least some of the state of the lane * will be shared between threads. Doing this might reveal bugs related * to runtime race detection instrumentation. */ unsigned old_nlanes = pop->lanes_desc.runtime_nlanes; pop->lanes_desc.runtime_nlanes = TEST_LANES; run_worker(tx2_worker, args); pop->lanes_desc.runtime_nlanes = old_nlanes; /* * This workload might create many allocation classes due to pvector, * keep it last. */ if (Threads == MAX_THREADS) /* don't run for short tests */ run_worker(tx_worker, args); run_worker(tx3_worker, args); pmemobj_close(pop); DONE(NULL); } #ifdef _MSC_VER /* * Since libpmemobj is linked statically, we need to invoke its ctor/dtor. */ MSVC_CONSTR(libpmemobj_init) MSVC_DESTR(libpmemobj_fini) #endif
9,123
21.09201
74
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_ctl_alignment/obj_ctl_alignment.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2020, Intel Corporation */ /* * obj_ctl_alignment.c -- tests for the alloc class alignment */ #include "unittest.h" #define LAYOUT "obj_ctl_alignment" static PMEMobjpool *pop; static void test_fail(void) { struct pobj_alloc_class_desc ac; ac.header_type = POBJ_HEADER_NONE; ac.unit_size = 1024 - 1; ac.units_per_block = 100; ac.alignment = 512; int ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc", &ac); UT_ASSERTeq(ret, -1); /* unit_size must be multiple of alignment */ } static void test_aligned_allocs(size_t size, size_t alignment, enum pobj_header_type htype) { struct pobj_alloc_class_desc ac; ac.header_type = htype; ac.unit_size = size; ac.units_per_block = 100; ac.alignment = alignment; int ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc", &ac); UT_ASSERTeq(ret, 0); PMEMoid oid; ret = pmemobj_xalloc(pop, &oid, 1, 0, POBJ_CLASS_ID(ac.class_id), NULL, NULL); UT_ASSERTeq(ret, 0); UT_ASSERTeq(oid.off % alignment, 0); UT_ASSERTeq((uintptr_t)pmemobj_direct(oid) % alignment, 0); ret = pmemobj_xalloc(pop, &oid, 1, 0, POBJ_CLASS_ID(ac.class_id), NULL, NULL); UT_ASSERTeq(ret, 0); UT_ASSERTeq(oid.off % alignment, 0); UT_ASSERTeq((uintptr_t)pmemobj_direct(oid) % alignment, 0); char query[1024]; SNPRINTF(query, 1024, "heap.alloc_class.%u.desc", ac.class_id); struct pobj_alloc_class_desc read_ac; ret = pmemobj_ctl_get(pop, query, &read_ac); UT_ASSERTeq(ret, 0); UT_ASSERTeq(ac.alignment, read_ac.alignment); } int main(int argc, char *argv[]) { START(argc, argv, "obj_ctl_alignment"); if (argc != 2) UT_FATAL("usage: %s file-name", argv[0]); const char *path = argv[1]; if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL * 10, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create: %s", path); test_fail(); test_aligned_allocs(1024, 512, POBJ_HEADER_NONE); test_aligned_allocs(1024, 512, POBJ_HEADER_COMPACT); test_aligned_allocs(64, 64, POBJ_HEADER_COMPACT); pmemobj_close(pop); DONE(NULL); }
2,055
23.47619
79
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_list/mocks_windows.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * mocks_windows.h -- redefinitions of obj list functions * * This file is Windows-specific. * * This file should be included (i.e. using Forced Include) by libpmemobj * files, when compiled for the purpose of obj_list test. * It would replace default implementation with mocked functions defined * in obj_list.c. * * These defines could be also passed as preprocessor definitions. */ #if defined(__cplusplus) extern "C" { #endif #ifdef WRAP_REAL #define WRAP_REAL_PMALLOC #define WRAP_REAL_ULOG #define WRAP_REAL_LANE #define WRAP_REAL_HEAP #define WRAP_REAL_PMEMOBJ #endif #ifndef WRAP_REAL_PMALLOC #define pmalloc __wrap_pmalloc #define pfree __wrap_pfree #define pmalloc_construct __wrap_pmalloc_construct #define prealloc __wrap_prealloc #define prealloc_construct __wrap_prealloc_construct #define palloc_usable_size __wrap_palloc_usable_size #define palloc_reserve __wrap_palloc_reserve #define palloc_publish __wrap_palloc_publish #define palloc_defer_free __wrap_palloc_defer_free #endif #ifndef WRAP_REAL_ULOG #define ulog_store __wrap_ulog_store #define ulog_process __wrap_ulog_process #endif #ifndef WRAP_REAL_LANE #define lane_hold __wrap_lane_hold #define lane_release __wrap_lane_release #define lane_recover_and_section_boot __wrap_lane_recover_and_section_boot #define lane_section_cleanup __wrap_lane_section_cleanup #endif #ifndef WRAP_REAL_HEAP #define heap_boot __wrap_heap_boot #endif #ifndef WRAP_REAL_PMEMOBJ #define pmemobj_alloc __wrap_pmemobj_alloc #define pmemobj_alloc_usable_size __wrap_pmemobj_alloc_usable_size #define pmemobj_openU __wrap_pmemobj_open #define pmemobj_close __wrap_pmemobj_close #define pmemobj_direct __wrap_pmemobj_direct #define pmemobj_pool_by_oid __wrap_pmemobj_pool_by_oid #define pmemobj_pool_by_ptr __wrap_pmemobj_pool_by_ptr #endif #if defined(__cplusplus) } #endif
1,933
26.628571
74
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_list/obj_list.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * obj_list.h -- unit tests for list module */ #include <stddef.h> #include <sys/param.h> #include "list.h" #include "obj.h" #include "lane.h" #include "unittest.h" #include "util.h" /* offset to "in band" item */ #define OOB_OFF (sizeof(struct oob_header)) /* pmemobj initial heap offset */ #define HEAP_OFFSET 8192 TOID_DECLARE(struct item, 0); TOID_DECLARE(struct list, 1); TOID_DECLARE(struct oob_list, 2); TOID_DECLARE(struct oob_item, 3); struct item { int id; POBJ_LIST_ENTRY(struct item) next; }; struct oob_header { char data[48]; }; struct oob_item { struct oob_header oob; struct item item; }; struct oob_list { struct list_head head; }; struct list { POBJ_LIST_HEAD(listhead, struct item) head; }; enum ulog_fail { /* don't fail at all */ NO_FAIL, /* fail after ulog_store */ FAIL_AFTER_FINISH, /* fail before ulog_store */ FAIL_BEFORE_FINISH, /* fail after process */ FAIL_AFTER_PROCESS }; /* global handle to pmemobj pool */ extern PMEMobjpool *Pop; /* pointer to heap offset */ extern uint64_t *Heap_offset; /* list lane section */ extern struct lane Lane; /* actual item id */ extern int *Id; /* fail event */ extern enum ulog_fail Ulog_fail; /* global "in band" lists */ extern TOID(struct list) List; extern TOID(struct list) List_sec; /* global "out of band" lists */ extern TOID(struct oob_list) List_oob; extern TOID(struct oob_list) List_oob_sec; extern TOID(struct oob_item) *Item; /* usage macros */ #define FATAL_USAGE()\ UT_FATAL("usage: obj_list <file> [PRnifr]") #define FATAL_USAGE_PRINT()\ UT_FATAL("usage: obj_list <file> P:<list>") #define FATAL_USAGE_PRINT_REVERSE()\ UT_FATAL("usage: obj_list <file> R:<list>") #define FATAL_USAGE_INSERT()\ UT_FATAL("usage: obj_list <file> i:<where>:<num>") #define FATAL_USAGE_INSERT_NEW()\ UT_FATAL("usage: obj_list <file> n:<where>:<num>:<value>") #define FATAL_USAGE_REMOVE_FREE()\ UT_FATAL("usage: obj_list <file> f:<list>:<num>:<from>") #define FATAL_USAGE_REMOVE()\ UT_FATAL("usage: obj_list <file> r:<num>") #define FATAL_USAGE_MOVE()\ UT_FATAL("usage: obj_list <file> m:<num>:<where>:<num>") #define FATAL_USAGE_FAIL()\ UT_FATAL("usage: obj_list <file> "\ "F:<after_finish|before_finish|after_process>")
2,314
21.475728
59
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_list/obj_list_mocks.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * obj_list_mocks.c -- mocks for redo/lane/heap/obj modules */ #include <inttypes.h> #include "valgrind_internal.h" #include "obj_list.h" #include "set.h" /* * pmem_drain_nop -- no operation for drain on non-pmem memory */ static void pmem_drain_nop(void) { /* NOP */ } /* * obj_persist -- pmemobj version of pmem_persist w/o replication */ static int obj_persist(void *ctx, const void *addr, size_t len, unsigned flags) { PMEMobjpool *pop = (PMEMobjpool *)ctx; pop->persist_local(addr, len); return 0; } /* * obj_flush -- pmemobj version of pmem_flush w/o replication */ static int obj_flush(void *ctx, const void *addr, size_t len, unsigned flags) { PMEMobjpool *pop = (PMEMobjpool *)ctx; pop->flush_local(addr, len); return 0; } static uintptr_t Pool_addr; static size_t Pool_size; static void obj_msync_nofail(const void *addr, size_t size) { uintptr_t addr_ptrt = (uintptr_t)addr; /* * Verify msynced range is in the last mapped file range. Useful for * catching errors which normally would be caught only on Windows by * win_mmap.c. */ if (addr_ptrt < Pool_addr || addr_ptrt >= Pool_addr + Pool_size || addr_ptrt + size >= Pool_addr + Pool_size) UT_FATAL("<0x%" PRIxPTR ",0x%" PRIxPTR "> " "not in <0x%" PRIxPTR ",0x%" PRIxPTR "> range", addr_ptrt, addr_ptrt + size, Pool_addr, Pool_addr + Pool_size); if (pmem_msync(addr, size)) UT_FATAL("!pmem_msync"); } /* * obj_drain -- pmemobj version of pmem_drain w/o replication */ static void obj_drain(void *ctx) { PMEMobjpool *pop = (PMEMobjpool *)ctx; pop->drain_local(); } static void * obj_memcpy(void *ctx, void *dest, const void *src, size_t len, unsigned flags) { return pmem_memcpy(dest, src, len, flags); } static void * obj_memset(void *ctx, void *ptr, int c, size_t sz, unsigned flags) { return pmem_memset(ptr, c, sz, flags); } /* * linear_alloc -- allocates `size` bytes (rounded up to 8 bytes) and returns * offset to the allocated object */ static uint64_t linear_alloc(uint64_t *cur_offset, size_t size) { uint64_t ret = *cur_offset; *cur_offset += roundup(size, sizeof(uint64_t)); return ret; } /* * pmemobj_open -- pmemobj_open mock * * This function initializes the pmemobj pool for purposes of this * unittest. */ FUNC_MOCK(pmemobj_open, PMEMobjpool *, const char *fname, const char *layout) FUNC_MOCK_RUN_DEFAULT { size_t size; int is_pmem; void *addr = pmem_map_file(fname, 0, 0, 0, &size, &is_pmem); if (!addr) { UT_OUT("!%s: pmem_map_file", fname); return NULL; } Pool_addr = (uintptr_t)addr; Pool_size = size; Pop = (PMEMobjpool *)addr; Pop->addr = Pop; Pop->is_pmem = is_pmem; Pop->rdonly = 0; Pop->uuid_lo = 0x12345678; VALGRIND_REMOVE_PMEM_MAPPING(&Pop->mutex_head, sizeof(Pop->mutex_head)); VALGRIND_REMOVE_PMEM_MAPPING(&Pop->rwlock_head, sizeof(Pop->rwlock_head)); VALGRIND_REMOVE_PMEM_MAPPING(&Pop->cond_head, sizeof(Pop->cond_head)); Pop->mutex_head = NULL; Pop->rwlock_head = NULL; Pop->cond_head = NULL; if (Pop->is_pmem) { Pop->persist_local = pmem_persist; Pop->flush_local = pmem_flush; Pop->drain_local = pmem_drain; Pop->memcpy_local = pmem_memcpy; Pop->memset_local = pmem_memset; } else { Pop->persist_local = obj_msync_nofail; Pop->flush_local = obj_msync_nofail; Pop->drain_local = pmem_drain_nop; Pop->memcpy_local = pmem_memcpy; Pop->memset_local = pmem_memset; } Pop->p_ops.persist = obj_persist; Pop->p_ops.flush = obj_flush; Pop->p_ops.drain = obj_drain; Pop->p_ops.memcpy = obj_memcpy; Pop->p_ops.memset = obj_memset; Pop->p_ops.base = Pop; struct pmem_ops *p_ops = &Pop->p_ops; Pop->heap_offset = HEAP_OFFSET; Pop->heap_size = size - Pop->heap_offset; uint64_t heap_offset = HEAP_OFFSET; Heap_offset = (uint64_t *)((uintptr_t)Pop + linear_alloc(&heap_offset, sizeof(*Heap_offset))); Id = (int *)((uintptr_t)Pop + linear_alloc(&heap_offset, sizeof(*Id))); /* Alloc lane layout */ Lane.layout = (struct lane_layout *)((uintptr_t)Pop + linear_alloc(&heap_offset, LANE_TOTAL_SIZE)); /* Alloc in band lists */ List.oid.pool_uuid_lo = Pop->uuid_lo; List.oid.off = linear_alloc(&heap_offset, sizeof(struct list)); List_sec.oid.pool_uuid_lo = Pop->uuid_lo; List_sec.oid.off = linear_alloc(&heap_offset, sizeof(struct list)); /* Alloc out of band lists */ List_oob.oid.pool_uuid_lo = Pop->uuid_lo; List_oob.oid.off = linear_alloc(&heap_offset, sizeof(struct oob_list)); List_oob_sec.oid.pool_uuid_lo = Pop->uuid_lo; List_oob_sec.oid.off = linear_alloc(&heap_offset, sizeof(struct oob_list)); Item = (union oob_item_toid *)((uintptr_t)Pop + linear_alloc(&heap_offset, sizeof(*Item))); Item->oid.pool_uuid_lo = Pop->uuid_lo; Item->oid.off = linear_alloc(&heap_offset, sizeof(struct oob_item)); pmemops_persist(p_ops, Item, sizeof(*Item)); if (*Heap_offset == 0) { *Heap_offset = heap_offset; pmemops_persist(p_ops, Heap_offset, sizeof(*Heap_offset)); } pmemops_persist(p_ops, Pop, HEAP_OFFSET); Pop->run_id += 2; pmemops_persist(p_ops, &Pop->run_id, sizeof(Pop->run_id)); Lane.external = operation_new((struct ulog *)&Lane.layout->external, LANE_REDO_EXTERNAL_SIZE, NULL, NULL, p_ops, LOG_TYPE_REDO); return Pop; } FUNC_MOCK_END /* * pmemobj_close -- pmemobj_close mock * * Just unmap the mapped area. */ FUNC_MOCK(pmemobj_close, void, PMEMobjpool *pop) FUNC_MOCK_RUN_DEFAULT { operation_delete(Lane.external); UT_ASSERTeq(pmem_unmap(Pop, Pop->heap_size + Pop->heap_offset), 0); Pop = NULL; Pool_addr = 0; Pool_size = 0; } FUNC_MOCK_END /* * pmemobj_pool_by_ptr -- pmemobj_pool_by_ptr mock * * Just return Pop. */ FUNC_MOCK_RET_ALWAYS(pmemobj_pool_by_ptr, PMEMobjpool *, Pop, const void *ptr); /* * pmemobj_direct -- pmemobj_direct mock */ FUNC_MOCK(pmemobj_direct, void *, PMEMoid oid) FUNC_MOCK_RUN_DEFAULT { return (void *)((uintptr_t)Pop + oid.off); } FUNC_MOCK_END FUNC_MOCK_RET_ALWAYS(pmemobj_pool_by_oid, PMEMobjpool *, Pop, PMEMoid oid); /* * pmemobj_alloc_usable_size -- pmemobj_alloc_usable_size mock */ FUNC_MOCK(pmemobj_alloc_usable_size, size_t, PMEMoid oid) FUNC_MOCK_RUN_DEFAULT { size_t size = palloc_usable_size( &Pop->heap, oid.off - OOB_OFF); return size - OOB_OFF; } FUNC_MOCK_END /* * pmemobj_alloc -- pmemobj_alloc mock * * Allocates an object using pmalloc and return PMEMoid. */ FUNC_MOCK(pmemobj_alloc, int, PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num, pmemobj_constr constructor, void *arg) FUNC_MOCK_RUN_DEFAULT { PMEMoid oid = {0, 0}; oid.pool_uuid_lo = 0; pmalloc(pop, &oid.off, size, 0, 0); if (oidp) { *oidp = oid; if (OBJ_PTR_FROM_POOL(pop, oidp)) pmemops_persist(&Pop->p_ops, oidp, sizeof(*oidp)); } return 0; } FUNC_MOCK_END /* * lane_hold -- lane_hold mock * * Returns pointer to list lane section. */ FUNC_MOCK(lane_hold, unsigned, PMEMobjpool *pop, struct lane **lane) FUNC_MOCK_RUN_DEFAULT { *lane = &Lane; return 0; } FUNC_MOCK_END /* * lane_release -- lane_release mock * * Always returns success. */ FUNC_MOCK_RET_ALWAYS_VOID(lane_release, PMEMobjpool *pop); /* * lane_recover_and_section_boot -- lane_recover_and_section_boot mock */ FUNC_MOCK(lane_recover_and_section_boot, int, PMEMobjpool *pop) FUNC_MOCK_RUN_DEFAULT { ulog_recover((struct ulog *)&Lane.layout->external, OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops); return 0; } FUNC_MOCK_END /* * lane_section_cleanup -- lane_section_cleanup mock */ FUNC_MOCK(lane_section_cleanup, int, PMEMobjpool *pop) FUNC_MOCK_RUN_DEFAULT { return 0; } FUNC_MOCK_END /* * ulog_store_last -- ulog_store_last mock */ FUNC_MOCK(ulog_store, void, struct ulog *dest, struct ulog *src, size_t nbytes, size_t redo_base_nbytes, size_t ulog_base_capacity, struct ulog_next *next, const struct pmem_ops *p_ops) FUNC_MOCK_RUN_DEFAULT { switch (Ulog_fail) { case FAIL_AFTER_FINISH: _FUNC_REAL(ulog_store)(dest, src, nbytes, redo_base_nbytes, ulog_base_capacity, next, p_ops); DONEW(NULL); break; case FAIL_BEFORE_FINISH: DONEW(NULL); break; default: _FUNC_REAL(ulog_store)(dest, src, nbytes, redo_base_nbytes, ulog_base_capacity, next, p_ops); break; } } FUNC_MOCK_END /* * ulog_process -- ulog_process mock */ FUNC_MOCK(ulog_process, void, struct ulog *ulog, ulog_check_offset_fn check, const struct pmem_ops *p_ops) FUNC_MOCK_RUN_DEFAULT { _FUNC_REAL(ulog_process)(ulog, check, p_ops); if (Ulog_fail == FAIL_AFTER_PROCESS) { DONEW(NULL); } } FUNC_MOCK_END /* * heap_boot -- heap_boot mock * * Always returns success. */ FUNC_MOCK_RET_ALWAYS(heap_boot, int, 0, PMEMobjpool *pop);
8,765
22.691892
79
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_list/obj_list_mocks_palloc.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * obj_list_mocks_palloc.c -- mocks for palloc/pmalloc modules */ #include "obj_list.h" /* * pmalloc -- pmalloc mock * * Allocates the memory using linear allocator. * Prints the id of allocated struct oob_item for tracking purposes. */ FUNC_MOCK(pmalloc, int, PMEMobjpool *pop, uint64_t *ptr, size_t size, uint64_t extra_field, uint16_t flags) FUNC_MOCK_RUN_DEFAULT { struct pmem_ops *p_ops = &Pop->p_ops; size = size + OOB_OFF + sizeof(uint64_t) * 2; uint64_t *alloc_size = (uint64_t *)((uintptr_t)Pop + *Heap_offset); *alloc_size = size; pmemops_persist(p_ops, alloc_size, sizeof(*alloc_size)); *ptr = *Heap_offset + sizeof(uint64_t); if (OBJ_PTR_FROM_POOL(pop, ptr)) pmemops_persist(p_ops, ptr, sizeof(*ptr)); struct oob_item *item = (struct oob_item *)((uintptr_t)Pop + *ptr); *ptr += OOB_OFF; if (OBJ_PTR_FROM_POOL(pop, ptr)) pmemops_persist(p_ops, ptr, sizeof(*ptr)); item->item.id = *Id; pmemops_persist(p_ops, &item->item.id, sizeof(item->item.id)); (*Id)++; pmemops_persist(p_ops, Id, sizeof(*Id)); *Heap_offset = *Heap_offset + sizeof(uint64_t) + size + OOB_OFF; pmemops_persist(p_ops, Heap_offset, sizeof(*Heap_offset)); UT_OUT("pmalloc(id = %d)", item->item.id); return 0; } FUNC_MOCK_END /* * pfree -- pfree mock * * Just prints freeing struct oob_item id. Doesn't free the memory. */ FUNC_MOCK(pfree, void, PMEMobjpool *pop, uint64_t *ptr) FUNC_MOCK_RUN_DEFAULT { struct oob_item *item = (struct oob_item *)((uintptr_t)Pop + *ptr - OOB_OFF); UT_OUT("pfree(id = %d)", item->item.id); *ptr = 0; if (OBJ_PTR_FROM_POOL(pop, ptr)) pmemops_persist(&Pop->p_ops, ptr, sizeof(*ptr)); return; } FUNC_MOCK_END /* * pmalloc_construct -- pmalloc_construct mock * * Allocates the memory using linear allocator and invokes the constructor. * Prints the id of allocated struct oob_item for tracking purposes. */ FUNC_MOCK(pmalloc_construct, int, PMEMobjpool *pop, uint64_t *off, size_t size, palloc_constr constructor, void *arg, uint64_t extra_field, uint16_t flags, uint16_t class_id) FUNC_MOCK_RUN_DEFAULT { struct pmem_ops *p_ops = &Pop->p_ops; size = size + OOB_OFF + sizeof(uint64_t) * 2; uint64_t *alloc_size = (uint64_t *)((uintptr_t)Pop + *Heap_offset); *alloc_size = size; pmemops_persist(p_ops, alloc_size, sizeof(*alloc_size)); *off = *Heap_offset + sizeof(uint64_t) + OOB_OFF; if (OBJ_PTR_FROM_POOL(pop, off)) pmemops_persist(p_ops, off, sizeof(*off)); *Heap_offset = *Heap_offset + sizeof(uint64_t) + size; pmemops_persist(p_ops, Heap_offset, sizeof(*Heap_offset)); void *ptr = (void *)((uintptr_t)Pop + *off); constructor(pop, ptr, size, arg); return 0; } FUNC_MOCK_END /* * prealloc -- prealloc mock */ FUNC_MOCK(prealloc, int, PMEMobjpool *pop, uint64_t *off, size_t size, uint64_t extra_field, uint16_t flags) FUNC_MOCK_RUN_DEFAULT { uint64_t *alloc_size = (uint64_t *)((uintptr_t)Pop + *off - sizeof(uint64_t)); struct item *item = (struct item *)((uintptr_t)Pop + *off + OOB_OFF); if (*alloc_size >= size) { *alloc_size = size; pmemops_persist(&Pop->p_ops, alloc_size, sizeof(*alloc_size)); UT_OUT("prealloc(id = %d, size = %zu) = true", item->id, (size - OOB_OFF) / sizeof(struct item)); return 0; } else { UT_OUT("prealloc(id = %d, size = %zu) = false", item->id, (size - OOB_OFF) / sizeof(struct item)); return -1; } } FUNC_MOCK_END /* * prealloc_construct -- prealloc_construct mock */ FUNC_MOCK(prealloc_construct, int, PMEMobjpool *pop, uint64_t *off, size_t size, palloc_constr constructor, void *arg, uint64_t extra_field, uint16_t flags, uint16_t class_id) FUNC_MOCK_RUN_DEFAULT { int ret = __wrap_prealloc(pop, off, size, 0, 0); if (!ret) { void *ptr = (void *)((uintptr_t)Pop + *off + OOB_OFF); constructor(pop, ptr, size, arg); } return ret; } FUNC_MOCK_END /* * palloc_reserve -- palloc_reserve mock */ FUNC_MOCK(palloc_reserve, int, struct palloc_heap *heap, size_t size, palloc_constr constructor, void *arg, uint64_t extra_field, uint16_t object_flags, uint16_t class_id, uint16_t arena_id, struct pobj_action *act) FUNC_MOCK_RUN_DEFAULT { struct pmem_ops *p_ops = &Pop->p_ops; size = size + OOB_OFF + sizeof(uint64_t) * 2; uint64_t *alloc_size = (uint64_t *)((uintptr_t)Pop + *Heap_offset); *alloc_size = size; pmemops_persist(p_ops, alloc_size, sizeof(*alloc_size)); act->heap.offset = *Heap_offset + sizeof(uint64_t); struct oob_item *item = (struct oob_item *)((uintptr_t)Pop + act->heap.offset); act->heap.offset += OOB_OFF; item->item.id = *Id; pmemops_persist(p_ops, &item->item.id, sizeof(item->item.id)); (*Id)++; pmemops_persist(p_ops, Id, sizeof(*Id)); *Heap_offset += sizeof(uint64_t) + size + OOB_OFF; pmemops_persist(p_ops, Heap_offset, sizeof(*Heap_offset)); UT_OUT("pmalloc(id = %d)", item->item.id); return 0; } FUNC_MOCK_END /* * palloc_publish -- mock publish, must process operation */ FUNC_MOCK(palloc_publish, void, struct palloc_heap *heap, struct pobj_action *actv, size_t actvcnt, struct operation_context *ctx) FUNC_MOCK_RUN_DEFAULT { operation_process(ctx); operation_finish(ctx, 0); } FUNC_MOCK_END /* * palloc_defer_free -- pfree mock * * Just prints freeing struct oob_item id. Doesn't free the memory. */ FUNC_MOCK(palloc_defer_free, void, struct palloc_heap *heap, uint64_t off, struct pobj_action *act) FUNC_MOCK_RUN_DEFAULT { struct oob_item *item = (struct oob_item *)((uintptr_t)Pop + off - OOB_OFF); UT_OUT("pfree(id = %d)", item->item.id); act->heap.offset = off; return; } FUNC_MOCK_END /* * pmalloc_usable_size -- pmalloc_usable_size mock */ FUNC_MOCK(palloc_usable_size, size_t, struct palloc_heap *heap, uint64_t off) FUNC_MOCK_RUN_DEFAULT { uint64_t *alloc_size = (uint64_t *)((uintptr_t)Pop + off - sizeof(uint64_t)); return (size_t)*alloc_size; } FUNC_MOCK_END
6,050
26.756881
77
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/blk_rw_mt/blk_rw_mt.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * blk_rw_mt.c -- unit test for multi-threaded random I/O * * usage: blk_rw_mt bsize file seed nthread nops * */ #include "unittest.h" #include "rand.h" static size_t Bsize; /* all I/O below this LBA (increases collisions) */ static const unsigned Nblock = 100; static unsigned Seed; static unsigned Nthread; static unsigned Nops; static PMEMblkpool *Handle; /* * construct -- build a buffer for writing */ static void construct(int *ordp, unsigned char *buf) { for (int i = 0; i < Bsize; i++) buf[i] = *ordp; (*ordp)++; if (*ordp > 255) *ordp = 1; } /* * check -- check for torn buffers */ static void check(unsigned char *buf) { unsigned val = *buf; for (int i = 1; i < Bsize; i++) if (buf[i] != val) { UT_OUT("{%u} TORN at byte %d", val, i); break; } } /* * worker -- the work each thread performs */ static void * worker(void *arg) { uintptr_t mytid = (uintptr_t)arg; unsigned char *buf = MALLOC(Bsize); int ord = 1; rng_t rng; randomize_r(&rng, Seed + mytid); for (unsigned i = 0; i < Nops; i++) { os_off_t lba = (os_off_t)(rnd64_r(&rng) % Nblock); if (rnd64_r(&rng) % 2) { /* read */ if (pmemblk_read(Handle, buf, lba) < 0) UT_OUT("!read lba %zu", lba); else check(buf); } else { /* write */ construct(&ord, buf); if (pmemblk_write(Handle, buf, lba) < 0) UT_OUT("!write lba %zu", lba); } } FREE(buf); return NULL; } int main(int argc, char *argv[]) { START(argc, argv, "blk_rw_mt"); if (argc != 6) UT_FATAL("usage: %s bsize file seed nthread nops", argv[0]); Bsize = strtoul(argv[1], NULL, 0); const char *path = argv[2]; if ((Handle = pmemblk_create(path, Bsize, 0, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!%s: pmemblk_create", path); Seed = strtoul(argv[3], NULL, 0); Nthread = strtoul(argv[4], NULL, 0); Nops = strtoul(argv[5], NULL, 0); UT_OUT("%s block size %zu usable blocks %u", argv[1], Bsize, Nblock); os_thread_t *threads = MALLOC(Nthread * sizeof(os_thread_t)); /* kick off nthread threads */ for (unsigned i = 0; i < Nthread; i++) THREAD_CREATE(&threads[i], NULL, worker, (void *)(intptr_t)i); /* wait for all the threads to complete */ for (unsigned i = 0; i < Nthread; i++) THREAD_JOIN(&threads[i], NULL); FREE(threads); pmemblk_close(Handle); /* XXX not ready to pass this part of the test yet */ int result = pmemblk_check(path, Bsize); if (result < 0) UT_OUT("!%s: pmemblk_check", path); else if (result == 0) UT_OUT("%s: pmemblk_check: not consistent", path); DONE(NULL); }
4,260
25.302469
74
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_ctl_stats/obj_ctl_stats.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ /* * obj_ctl_stats.c -- tests for the libpmemobj statistics module */ #include "unittest.h" int main(int argc, char *argv[]) { START(argc, argv, "obj_ctl_stats"); if (argc != 2) UT_FATAL("usage: %s file-name", argv[0]); const char *path = argv[1]; PMEMobjpool *pop; if ((pop = pmemobj_create(path, "ctl", PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create: %s", path); int enabled; int ret = pmemobj_ctl_get(pop, "stats.enabled", &enabled); UT_ASSERTeq(enabled, 0); UT_ASSERTeq(ret, 0); ret = pmemobj_alloc(pop, NULL, 1, 0, NULL, NULL); UT_ASSERTeq(ret, 0); size_t allocated; ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocated); UT_ASSERTeq(allocated, 0); enabled = 1; ret = pmemobj_ctl_set(pop, "stats.enabled", &enabled); UT_ASSERTeq(ret, 0); PMEMoid oid; ret = pmemobj_alloc(pop, &oid, 1, 0, NULL, NULL); UT_ASSERTeq(ret, 0); size_t oid_size = pmemobj_alloc_usable_size(oid) + 16; ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocated); UT_ASSERTeq(ret, 0); UT_ASSERTeq(allocated, oid_size); size_t run_allocated = 0; ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &run_allocated); UT_ASSERTeq(ret, 0); UT_ASSERT(run_allocated /* 2 allocs */ > allocated /* 1 alloc */); pmemobj_free(&oid); ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocated); UT_ASSERTeq(ret, 0); UT_ASSERTeq(allocated, 0); ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &run_allocated); UT_ASSERTeq(ret, 0); UT_ASSERT(run_allocated /* 2 allocs */ > allocated /* 1 alloc */); TX_BEGIN(pop) { oid = pmemobj_tx_alloc(1, 0); } TX_ONABORT { UT_ASSERT(0); } TX_END oid_size = pmemobj_alloc_usable_size(oid) + 16; ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocated); UT_ASSERTeq(ret, 0); UT_ASSERTeq(allocated, oid_size); enum pobj_stats_enabled enum_enabled; ret = pmemobj_ctl_get(pop, "stats.enabled", &enum_enabled); UT_ASSERTeq(enabled, POBJ_STATS_ENABLED_BOTH); UT_ASSERTeq(ret, 0); run_allocated = 0; ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &run_allocated); UT_ASSERTeq(ret, 0); enum_enabled = POBJ_STATS_ENABLED_PERSISTENT; /* transient disabled */ ret = pmemobj_ctl_set(pop, "stats.enabled", &enum_enabled); UT_ASSERTeq(ret, 0); ret = pmemobj_alloc(pop, &oid, 1, 0, NULL, NULL); UT_ASSERTeq(ret, 0); size_t tmp = 0; ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &tmp); UT_ASSERTeq(ret, 0); UT_ASSERTeq(tmp, run_allocated); /* shouldn't change */ /* the deallocated object shouldn't be reflected in rebuilt stats */ pmemobj_free(&oid); pmemobj_close(pop); pop = pmemobj_open(path, "ctl"); UT_ASSERTne(pop, NULL); /* stats are rebuilt lazily, so initially this should be 0 */ tmp = 0; ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &tmp); UT_ASSERTeq(ret, 0); UT_ASSERTeq(tmp, 0); ret = pmemobj_alloc(pop, NULL, 1, 0, NULL, NULL); UT_ASSERTeq(ret, 0); /* after first alloc, the previously allocated object will be found */ tmp = 0; ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &tmp); UT_ASSERTeq(ret, 0); UT_ASSERTeq(tmp, run_allocated + oid_size); pmemobj_close(pop); DONE(NULL); }
3,299
25.829268
72
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/util_poolset_foreach/util_poolset_foreach.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * util_poolset_foreach.c -- unit test for util_poolset_foreach_part() * * usage: util_poolset_foreach file... */ #include "unittest.h" #include "set.h" #include "pmemcommon.h" #include <errno.h> #define LOG_PREFIX "ut" #define LOG_LEVEL_VAR "TEST_LOG_LEVEL" #define LOG_FILE_VAR "TEST_LOG_FILE" #define MAJOR_VERSION 1 #define MINOR_VERSION 0 static int cb(struct part_file *pf, void *arg) { if (pf->is_remote) { /* remote replica */ const char *node_addr = pf->remote->node_addr; const char *pool_desc = pf->remote->pool_desc; char *set_name = (char *)arg; UT_OUT("%s: %s %s", set_name, node_addr, pool_desc); } else { const char *name = pf->part->path; char *set_name = (char *)arg; UT_OUT("%s: %s", set_name, name); } return 0; } int main(int argc, char *argv[]) { START(argc, argv, "util_poolset_foreach"); common_init(LOG_PREFIX, LOG_LEVEL_VAR, LOG_FILE_VAR, MAJOR_VERSION, MINOR_VERSION); if (argc < 2) UT_FATAL("usage: %s file...", argv[0]); for (int i = 1; i < argc; i++) { char *fname = argv[i]; int ret = util_poolset_foreach_part(fname, cb, fname); UT_OUT("util_poolset_foreach_part(%s): %d", fname, ret); } common_fini(); DONE(NULL); }
1,293
20.213115
70
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_map_prot/pmem2_map_prot.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * pmem2_map_prot.c -- pmem2_map_prot unit tests */ #include <stdbool.h> #include <signal.h> #include <setjmp.h> #include "config.h" #include "source.h" #include "map.h" #include "out.h" #include "pmem2.h" #include "unittest.h" #include "ut_pmem2.h" #include "ut_pmem2_setup.h" #include "ut_fh.h" struct res { struct FHandle *fh; struct pmem2_config cfg; struct pmem2_source *src; }; /* * res_prepare -- set access mode and protection flags */ static void res_prepare(const char *file, struct res *res, int access, unsigned proto) { #ifdef _WIN32 enum file_handle_type fh_type = FH_HANDLE; #else enum file_handle_type fh_type = FH_FD; #endif ut_pmem2_prepare_config(&res->cfg, &res->src, &res->fh, fh_type, file, 0, 0, access); pmem2_config_set_protection(&res->cfg, proto); } /* * res_cleanup -- free resources */ static void res_cleanup(struct res *res) { PMEM2_SOURCE_DELETE(&res->src); UT_FH_CLOSE(res->fh); } static const char *word1 = "Persistent or nonpersistent: this is the question."; static ut_jmp_buf_t Jmp; /* * signal_handler -- called on SIGSEGV */ static void signal_handler(int sig) { ut_siglongjmp(Jmp); } /* * test_rw_mode_rw_prot -- test R/W protection * pmem2_map() - should success * memcpy() - should success */ static int test_rw_mode_rw_prot(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_rw_mode_rw_prot <file>"); struct res res; /* read/write on file opened in read/write mode - should success */ res_prepare(argv[0], &res, FH_RDWR, PMEM2_PROT_READ | PMEM2_PROT_WRITE); struct pmem2_map *map; int ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map); void *addr_map = pmem2_map_get_address(map); memcpy_fn(addr_map, word1, strlen(word1), 0); UT_ASSERTeq(memcmp(addr_map, word1, strlen(word1)), 0); pmem2_unmap(&map); res_cleanup(&res); return 1; } /* * template_mode_prot_mismatch - try to map file with mutually exclusive FD * access and map protection */ static void template_mode_prot_mismatch(char *file, int access, unsigned prot) { struct res res; /* read/write on file opened in read-only mode - should fail */ res_prepare(file, &res, access, prot); struct pmem2_map *map; int ret = pmem2_map(&res.cfg, res.src, &map); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_NO_ACCESS); res_cleanup(&res); } /* * test_r_mode_rw_prot -- test R/W protection * pmem2_map() - should fail */ static int test_r_mode_rw_prot(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_r_mode_rw_prot <file>"); char *file = argv[0]; template_mode_prot_mismatch(file, FH_READ, PMEM2_PROT_WRITE | PMEM2_PROT_READ); return 1; } /* * test_rw_mode_rwx_prot - test R/W/X protection on R/W file * pmem2_map() - should fail */ static int test_rw_modex_rwx_prot(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_rw_modex_rwx_prot <file>"); char *file = argv[0]; template_mode_prot_mismatch(file, FH_RDWR, PMEM2_PROT_EXEC |PMEM2_PROT_WRITE | PMEM2_PROT_READ); return 1; } /* * test_rw_modex_rx_prot - test R/X protection on R/W file * pmem2_map() - should fail */ static int test_rw_modex_rx_prot(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_rw_modex_rx_prot <file>"); char *file = argv[0]; template_mode_prot_mismatch(file, FH_RDWR, PMEM2_PROT_EXEC | PMEM2_PROT_READ); return 1; } /* * test_rw_mode_r_prot -- test R/W protection * pmem2_map() - should success * memcpy() - should fail */ static int test_rw_mode_r_prot(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_rw_mode_r_prot <file>"); /* arrange to catch SIGSEGV */ struct sigaction v; sigemptyset(&v.sa_mask); v.sa_flags = 0; v.sa_handler = signal_handler; SIGACTION(SIGSEGV, &v, NULL); struct res res; /* read-only on file opened in read/write mode - should success */ res_prepare(argv[0], &res, FH_RDWR, PMEM2_PROT_READ); struct pmem2_map *map; int ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map); void *addr_map = pmem2_map_get_address(map); if (!ut_sigsetjmp(Jmp)) { /* memcpy should now fail */ memcpy_fn(addr_map, word1, strlen(word1), 0); UT_FATAL("memcpy successful"); } pmem2_unmap(&map); res_cleanup(&res); signal(SIGSEGV, SIG_DFL); return 1; } /* * test_r_mode_r_prot -- test R/W protection * pmem2_map() - should success * memcpy() - should fail */ static int test_r_mode_r_prot(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_r_mode_r_prot <file>"); /* arrange to catch SIGSEGV */ struct sigaction v; sigemptyset(&v.sa_mask); v.sa_flags = 0; v.sa_handler = signal_handler; SIGACTION(SIGSEGV, &v, NULL); struct res res; /* read-only on file opened in read-only mode - should succeed */ res_prepare(argv[0], &res, FH_READ, PMEM2_PROT_READ); struct pmem2_map *map; int ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map); void *addr_map = pmem2_map_get_address(map); if (!ut_sigsetjmp(Jmp)) { /* memcpy should now fail */ memcpy_fn(addr_map, word1, strlen(word1), 0); UT_FATAL("memcpy successful"); } pmem2_unmap(&map); res_cleanup(&res); signal(SIGSEGV, SIG_DFL); return 1; } /* * test_rw_mode_none_prot -- test R/W protection * pmem2_map() - should success * memcpy() - should fail */ static int test_rw_mode_none_prot(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_rw_mode_none_prot <file>"); /* arrange to catch SIGSEGV */ struct sigaction v; sigemptyset(&v.sa_mask); v.sa_flags = 0; v.sa_handler = signal_handler; SIGACTION(SIGSEGV, &v, NULL); struct res res; /* none on file opened in read-only mode - should success */ res_prepare(argv[0], &res, FH_READ, PMEM2_PROT_NONE); struct pmem2_map *map; int ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map); void *addr_map = pmem2_map_get_address(map); if (!ut_sigsetjmp(Jmp)) { /* memcpy should now fail */ memcpy_fn(addr_map, word1, strlen(word1), 0); UT_FATAL("memcpy successful"); } pmem2_unmap(&map); res_cleanup(&res); signal(SIGSEGV, SIG_DFL); return 1; } /* * sum_asm[] --> simple program in assembly which calculates '2 + 2' and * returns the result */ static unsigned char sum_asm[] = { 0x55, /* push %rbp */ 0x48, 0x89, 0xe5, /* mov %rsp,%rbp */ 0xc7, 0x45, 0xf8, 0x02, 0x00, 0x00, 0x00, /* movl $0x2,-0x8(%rbp) */ 0x8b, 0x45, 0xf8, /* mov -0x8(%rbp),%eax */ 0x01, 0xc0, /* add %eax,%eax */ 0x89, 0x45, 0xfc, /* mov %eax,-0x4(%rbp) */ 0x8b, 0x45, 0xfc, /* mov -0x4(%rbp),%eax */ 0x5d, /* pop %rbp */ 0xc3, /* retq */ }; typedef int (*sum_fn)(void); /* * test_rx_mode_rx_prot_do_execute -- copy string with the program to mapped * memory to prepare memory, execute the program and verify result */ static int test_rx_mode_rx_prot_do_execute(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_rx_mode_rx_prot_do_execute <file>"); char *file = argv[0]; struct res res; /* Windows does not support PMEM2_PROT_WRITE combination */ res_prepare(file, &res, FH_EXEC | FH_RDWR, PMEM2_PROT_WRITE | PMEM2_PROT_READ); struct pmem2_map *map; int ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); char *addr_map = pmem2_map_get_address(map); map->memcpy_fn(addr_map, sum_asm, sizeof(sum_asm), 0); pmem2_unmap(&map); /* Windows does not support PMEM2_PROT_EXEC combination */ pmem2_config_set_protection(&res.cfg, PMEM2_PROT_READ | PMEM2_PROT_EXEC); ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); sum_fn sum = (sum_fn)addr_map; int sum_result = sum(); UT_ASSERTeq(sum_result, 4); pmem2_unmap(&map); res_cleanup(&res); return 1; } /* * test_rwx_mode_rx_prot_do_write -- try to copy the string into mapped memory, * expect failure */ static int test_rwx_mode_rx_prot_do_write(const struct test_case *tc, int argc, char *argv[]) { if (argc < 2) UT_FATAL( "usage: test_rwx_mode_rx_prot_do_write <file> <if_sharing>"); struct sigaction v; sigemptyset(&v.sa_mask); v.sa_flags = 0; v.sa_handler = signal_handler; SIGACTION(SIGSEGV, &v, NULL); char *file = argv[0]; unsigned if_sharing = ATOU(argv[1]); struct res res; /* Windows does not support PMEM2_PROT_EXEC combination */ res_prepare(file, &res, FH_EXEC | FH_RDWR, PMEM2_PROT_READ | PMEM2_PROT_EXEC); if (if_sharing) pmem2_config_set_sharing(&res.cfg, PMEM2_PRIVATE); struct pmem2_map *map; int ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); char *addr_map = pmem2_map_get_address(map); if (!ut_sigsetjmp(Jmp)) { /* memcpy_fn should fail */ map->memcpy_fn(addr_map, sum_asm, sizeof(sum_asm), 0); } pmem2_unmap(&map); res_cleanup(&res); signal(SIGSEGV, SIG_DFL); return 2; } /* * test_rwx_mode_rwx_prot_do_execute -- copy string with the program to mapped * memory to prepare memory, execute the program and verify result */ static int test_rwx_mode_rwx_prot_do_execute(const struct test_case *tc, int argc, char *argv[]) { if (argc < 2) UT_FATAL( "usage: test_rwx_mode_rwx_prot_do_execute <file> <if_sharing>"); char *file = argv[0]; unsigned if_sharing = ATOU(argv[1]); struct res res; res_prepare(file, &res, FH_EXEC | FH_RDWR, PMEM2_PROT_EXEC | PMEM2_PROT_WRITE | PMEM2_PROT_READ); if (if_sharing) pmem2_config_set_sharing(&res.cfg, PMEM2_PRIVATE); struct pmem2_map *map; int ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); char *addr_map = pmem2_map_get_address(map); map->memcpy_fn(addr_map, sum_asm, sizeof(sum_asm), 0); sum_fn sum = (sum_fn)addr_map; int sum_result = sum(); UT_ASSERTeq(sum_result, 4); pmem2_unmap(&map); res_cleanup(&res); signal(SIGSEGV, SIG_DFL); return 2; } /* * test_rw_mode_rw_prot_do_execute -- copy string with the program to mapped * memory to prepare memory, and execute the program - should fail */ static int test_rw_mode_rw_prot_do_execute(const struct test_case *tc, int argc, char *argv[]) { if (argc < 2) UT_FATAL( "usage: test_rw_mode_rwx_prot_do_execute <file> <if_sharing>"); struct sigaction v; sigemptyset(&v.sa_mask); v.sa_flags = 0; v.sa_handler = signal_handler; SIGACTION(SIGSEGV, &v, NULL); char *file = argv[0]; unsigned if_sharing = ATOU(argv[1]); struct res res; res_prepare(file, &res, FH_RDWR, PMEM2_PROT_WRITE | PMEM2_PROT_READ); if (if_sharing) pmem2_config_set_sharing(&res.cfg, PMEM2_PRIVATE); struct pmem2_map *map; int ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); void *addr_map = pmem2_map_get_address(map); map->memcpy_fn(addr_map, sum_asm, sizeof(sum_asm), 0); sum_fn sum = (sum_fn)addr_map; if (!ut_sigsetjmp(Jmp)) { sum(); /* sum function should now fail */ } pmem2_unmap(&map); res_cleanup(&res); return 2; } static const char *initial_state = "No code."; /* * test_rwx_prot_map_priv_do_execute -- copy string with the program to * the mapped memory with MAP_PRIVATE to prepare memory, execute the program * and verify the result */ static int test_rwx_prot_map_priv_do_execute(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL( "usage: test_rwx_prot_map_priv_do_execute <file> <if_sharing>"); char *file = argv[0]; struct res res; res_prepare(file, &res, FH_RDWR, PMEM2_PROT_WRITE | PMEM2_PROT_READ); struct pmem2_map *map; int ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); char *addr_map = pmem2_map_get_address(map); map->memcpy_fn(addr_map, initial_state, sizeof(initial_state), 0); pmem2_unmap(&map); res_cleanup(&res); res_prepare(file, &res, FH_READ | FH_EXEC, PMEM2_PROT_EXEC | PMEM2_PROT_WRITE | PMEM2_PROT_READ); pmem2_config_set_sharing(&res.cfg, PMEM2_PRIVATE); ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); addr_map = pmem2_map_get_address(map); map->memcpy_fn(addr_map, sum_asm, sizeof(sum_asm), 0); sum_fn sum = (sum_fn)addr_map; int sum_result = sum(); UT_ASSERTeq(sum_result, 4); pmem2_unmap(&map); ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); addr_map = pmem2_map_get_address(map); /* check if changes in private mapping affect initial state */ UT_ASSERTeq(memcmp(addr_map, initial_state, strlen(initial_state)), 0); pmem2_unmap(&map); res_cleanup(&res); return 1; } /* * test_cases -- available test cases */ static struct test_case test_cases[] = { TEST_CASE(test_rw_mode_rw_prot), TEST_CASE(test_r_mode_rw_prot), TEST_CASE(test_rw_modex_rwx_prot), TEST_CASE(test_rw_modex_rx_prot), TEST_CASE(test_rw_mode_r_prot), TEST_CASE(test_r_mode_r_prot), TEST_CASE(test_rw_mode_none_prot), TEST_CASE(test_rx_mode_rx_prot_do_execute), TEST_CASE(test_rwx_mode_rx_prot_do_write), TEST_CASE(test_rwx_mode_rwx_prot_do_execute), TEST_CASE(test_rw_mode_rw_prot_do_execute), TEST_CASE(test_rwx_prot_map_priv_do_execute), }; #define NTESTS (sizeof(test_cases) / sizeof(test_cases[0])) int main(int argc, char *argv[]) { START(argc, argv, "pmem2_map_prot"); util_init(); out_init("pmem2_map_prot", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0); TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS); out_fini(); DONE(NULL); } #ifdef _MSC_VER MSVC_CONSTR(libpmem2_init) MSVC_DESTR(libpmem2_fini) #endif
13,698
22.537801
80
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_layout/obj_layout.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * obj_layout.c -- unit test for layout * * This test should be modified after every layout change. It's here to prevent * any accidental layout changes. */ #include "util.h" #include "unittest.h" #include "sync.h" #include "heap_layout.h" #include "lane.h" #include "tx.h" #include "ulog.h" #include "list.h" #define SIZEOF_CHUNK_HEADER_V3 (8) #define MAX_CHUNK_V3 (65535 - 7) #define SIZEOF_CHUNK_V3 (1024ULL * 256) #define SIZEOF_CHUNK_RUN_HEADER_V3 (16) #define SIZEOF_ZONE_HEADER_V3 (64) #define SIZEOF_ZONE_METADATA_V3 (SIZEOF_ZONE_HEADER_V3 +\ SIZEOF_CHUNK_HEADER_V3 * MAX_CHUNK_V3) #define SIZEOF_HEAP_HDR_V3 (1024) #define SIZEOF_LEGACY_ALLOCATION_HEADER_V3 (64) #define SIZEOF_COMPACT_ALLOCATION_HEADER_V3 (16) #define SIZEOF_LOCK_V3 (64) #define SIZEOF_PMEMOID_V3 (16) #define SIZEOF_LIST_ENTRY_V3 (SIZEOF_PMEMOID_V3 * 2) #define SIZEOF_LIST_HEAD_V3 (SIZEOF_PMEMOID_V3 + SIZEOF_LOCK_V3) #define SIZEOF_LANE_SECTION_V3 (1024) #define SIZEOF_LANE_V3 (3 * SIZEOF_LANE_SECTION_V3) #define SIZEOF_ULOG_V4 (CACHELINE_SIZE) #define SIZEOF_ULOG_BASE_ENTRY_V4 (8) #define SIZEOF_ULOG_VAL_ENTRY_V4 (16) #define SIZEOF_ULOG_BUF_ENTRY_V4 (24) #if CACHELINE_SIZE == 128 #define SIZEOF_LANE_UNDO_SIZE (1920) #define SIZEOF_LANE_REDO_EXTERNAL_SIZE (640) #define SIZEOF_LANE_REDO_INTERNAL_SIZE (128) #elif CACHELINE_SIZE == 64 #define SIZEOF_LANE_UNDO_SIZE (2048) #define SIZEOF_LANE_REDO_EXTERNAL_SIZE (640) #define SIZEOF_LANE_REDO_INTERNAL_SIZE (192) #else #error "Unknown cacheline size" #endif POBJ_LAYOUT_BEGIN(layout); POBJ_LAYOUT_ROOT(layout, struct foo); POBJ_LAYOUT_END(layout); struct foo { POBJ_LIST_ENTRY(struct foo) f; }; POBJ_LIST_HEAD(foo_head, struct foo); int main(int argc, char *argv[]) { START(argc, argv, "obj_layout"); UT_COMPILE_ERROR_ON(CHUNKSIZE != SIZEOF_CHUNK_V3); ASSERT_ALIGNED_BEGIN(struct chunk); ASSERT_ALIGNED_FIELD(struct chunk, data); ASSERT_ALIGNED_CHECK(struct chunk); UT_COMPILE_ERROR_ON(sizeof(struct chunk_run) != SIZEOF_CHUNK_V3); ASSERT_ALIGNED_BEGIN(struct chunk_run_header); ASSERT_ALIGNED_FIELD(struct chunk_run_header, block_size); ASSERT_ALIGNED_FIELD(struct chunk_run_header, alignment); ASSERT_ALIGNED_CHECK(struct chunk_run_header); UT_COMPILE_ERROR_ON(sizeof(struct chunk_run_header) != SIZEOF_CHUNK_RUN_HEADER_V3); ASSERT_ALIGNED_BEGIN(struct chunk_run); ASSERT_ALIGNED_FIELD(struct chunk_run, hdr); ASSERT_ALIGNED_FIELD(struct chunk_run, content); ASSERT_ALIGNED_CHECK(struct chunk_run); UT_COMPILE_ERROR_ON(sizeof(struct chunk_run) != SIZEOF_CHUNK_V3); ASSERT_ALIGNED_BEGIN(struct chunk_header); ASSERT_ALIGNED_FIELD(struct chunk_header, type); ASSERT_ALIGNED_FIELD(struct chunk_header, flags); ASSERT_ALIGNED_FIELD(struct chunk_header, size_idx); ASSERT_ALIGNED_CHECK(struct chunk_header); UT_COMPILE_ERROR_ON(sizeof(struct chunk_header) != SIZEOF_CHUNK_HEADER_V3); ASSERT_ALIGNED_BEGIN(struct zone_header); ASSERT_ALIGNED_FIELD(struct zone_header, magic); ASSERT_ALIGNED_FIELD(struct zone_header, size_idx); ASSERT_ALIGNED_FIELD(struct zone_header, reserved); ASSERT_ALIGNED_CHECK(struct zone_header); UT_COMPILE_ERROR_ON(sizeof(struct zone_header) != SIZEOF_ZONE_HEADER_V3); ASSERT_ALIGNED_BEGIN(struct zone); ASSERT_ALIGNED_FIELD(struct zone, header); ASSERT_ALIGNED_FIELD(struct zone, chunk_headers); ASSERT_ALIGNED_CHECK(struct zone); UT_COMPILE_ERROR_ON(sizeof(struct zone) != SIZEOF_ZONE_METADATA_V3); ASSERT_ALIGNED_BEGIN(struct heap_header); ASSERT_ALIGNED_FIELD(struct heap_header, signature); ASSERT_ALIGNED_FIELD(struct heap_header, major); ASSERT_ALIGNED_FIELD(struct heap_header, minor); ASSERT_ALIGNED_FIELD(struct heap_header, unused); ASSERT_ALIGNED_FIELD(struct heap_header, chunksize); ASSERT_ALIGNED_FIELD(struct heap_header, chunks_per_zone); ASSERT_ALIGNED_FIELD(struct heap_header, reserved); ASSERT_ALIGNED_FIELD(struct heap_header, checksum); ASSERT_ALIGNED_CHECK(struct heap_header); UT_COMPILE_ERROR_ON(sizeof(struct heap_header) != SIZEOF_HEAP_HDR_V3); ASSERT_ALIGNED_BEGIN(struct allocation_header_legacy); ASSERT_ALIGNED_FIELD(struct allocation_header_legacy, unused); ASSERT_ALIGNED_FIELD(struct allocation_header_legacy, size); ASSERT_ALIGNED_FIELD(struct allocation_header_legacy, unused2); ASSERT_ALIGNED_FIELD(struct allocation_header_legacy, root_size); ASSERT_ALIGNED_FIELD(struct allocation_header_legacy, type_num); ASSERT_ALIGNED_CHECK(struct allocation_header_legacy); UT_COMPILE_ERROR_ON(sizeof(struct allocation_header_legacy) != SIZEOF_LEGACY_ALLOCATION_HEADER_V3); ASSERT_ALIGNED_BEGIN(struct allocation_header_compact); ASSERT_ALIGNED_FIELD(struct allocation_header_compact, size); ASSERT_ALIGNED_FIELD(struct allocation_header_compact, extra); ASSERT_ALIGNED_CHECK(struct allocation_header_compact); UT_COMPILE_ERROR_ON(sizeof(struct allocation_header_compact) != SIZEOF_COMPACT_ALLOCATION_HEADER_V3); ASSERT_ALIGNED_BEGIN(struct ulog); ASSERT_ALIGNED_FIELD(struct ulog, checksum); ASSERT_ALIGNED_FIELD(struct ulog, next); ASSERT_ALIGNED_FIELD(struct ulog, capacity); ASSERT_ALIGNED_FIELD(struct ulog, gen_num); ASSERT_ALIGNED_FIELD(struct ulog, flags); ASSERT_ALIGNED_FIELD(struct ulog, unused); ASSERT_ALIGNED_CHECK(struct ulog); UT_COMPILE_ERROR_ON(sizeof(struct ulog) != SIZEOF_ULOG_V4); ASSERT_ALIGNED_BEGIN(struct ulog_entry_base); ASSERT_ALIGNED_FIELD(struct ulog_entry_base, offset); ASSERT_ALIGNED_CHECK(struct ulog_entry_base); UT_COMPILE_ERROR_ON(sizeof(struct ulog_entry_base) != SIZEOF_ULOG_BASE_ENTRY_V4); ASSERT_ALIGNED_BEGIN(struct ulog_entry_val); ASSERT_ALIGNED_FIELD(struct ulog_entry_val, base); ASSERT_ALIGNED_FIELD(struct ulog_entry_val, value); ASSERT_ALIGNED_CHECK(struct ulog_entry_val); UT_COMPILE_ERROR_ON(sizeof(struct ulog_entry_val) != SIZEOF_ULOG_VAL_ENTRY_V4); ASSERT_ALIGNED_BEGIN(struct ulog_entry_buf); ASSERT_ALIGNED_FIELD(struct ulog_entry_buf, base); ASSERT_ALIGNED_FIELD(struct ulog_entry_buf, checksum); ASSERT_ALIGNED_FIELD(struct ulog_entry_buf, size); ASSERT_ALIGNED_CHECK(struct ulog_entry_buf); UT_COMPILE_ERROR_ON(sizeof(struct ulog_entry_buf) != SIZEOF_ULOG_BUF_ENTRY_V4); ASSERT_ALIGNED_BEGIN(PMEMoid); ASSERT_ALIGNED_FIELD(PMEMoid, pool_uuid_lo); ASSERT_ALIGNED_FIELD(PMEMoid, off); ASSERT_ALIGNED_CHECK(PMEMoid); UT_COMPILE_ERROR_ON(sizeof(PMEMoid) != SIZEOF_PMEMOID_V3); UT_COMPILE_ERROR_ON(sizeof(PMEMmutex) != SIZEOF_LOCK_V3); UT_COMPILE_ERROR_ON(sizeof(PMEMmutex) != sizeof(PMEMmutex_internal)); UT_COMPILE_ERROR_ON(util_alignof(PMEMmutex) != util_alignof(PMEMmutex_internal)); UT_COMPILE_ERROR_ON(util_alignof(PMEMmutex) != util_alignof(os_mutex_t)); UT_COMPILE_ERROR_ON(util_alignof(PMEMmutex) != util_alignof(uint64_t)); UT_COMPILE_ERROR_ON(sizeof(PMEMrwlock) != SIZEOF_LOCK_V3); UT_COMPILE_ERROR_ON(util_alignof(PMEMrwlock) != util_alignof(PMEMrwlock_internal)); UT_COMPILE_ERROR_ON(util_alignof(PMEMrwlock) != util_alignof(os_rwlock_t)); UT_COMPILE_ERROR_ON(util_alignof(PMEMrwlock) != util_alignof(uint64_t)); UT_COMPILE_ERROR_ON(sizeof(PMEMcond) != SIZEOF_LOCK_V3); UT_COMPILE_ERROR_ON(util_alignof(PMEMcond) != util_alignof(PMEMcond_internal)); UT_COMPILE_ERROR_ON(util_alignof(PMEMcond) != util_alignof(os_cond_t)); UT_COMPILE_ERROR_ON(util_alignof(PMEMcond) != util_alignof(uint64_t)); UT_COMPILE_ERROR_ON(sizeof(struct foo) != SIZEOF_LIST_ENTRY_V3); UT_COMPILE_ERROR_ON(sizeof(struct list_entry) != SIZEOF_LIST_ENTRY_V3); UT_COMPILE_ERROR_ON(sizeof(struct foo_head) != SIZEOF_LIST_HEAD_V3); UT_COMPILE_ERROR_ON(sizeof(struct list_head) != SIZEOF_LIST_HEAD_V3); ASSERT_ALIGNED_BEGIN(struct lane_layout); ASSERT_ALIGNED_FIELD(struct lane_layout, internal); ASSERT_ALIGNED_FIELD(struct lane_layout, external); ASSERT_ALIGNED_FIELD(struct lane_layout, undo); ASSERT_ALIGNED_CHECK(struct lane_layout); UT_COMPILE_ERROR_ON(sizeof(struct lane_layout) != SIZEOF_LANE_V3); UT_COMPILE_ERROR_ON(LANE_UNDO_SIZE != SIZEOF_LANE_UNDO_SIZE); UT_COMPILE_ERROR_ON(LANE_REDO_EXTERNAL_SIZE != SIZEOF_LANE_REDO_EXTERNAL_SIZE); UT_COMPILE_ERROR_ON(LANE_REDO_INTERNAL_SIZE != SIZEOF_LANE_REDO_INTERNAL_SIZE); DONE(NULL); } #ifdef _MSC_VER /* * Since libpmemobj is linked statically, we need to invoke its ctor/dtor. */ MSVC_CONSTR(libpmemobj_init) MSVC_DESTR(libpmemobj_fini) #endif
8,411
35.103004
79
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_tx_add_range_direct/obj_tx_add_range_direct.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * obj_tx_add_range_direct.c -- unit test for pmemobj_tx_add_range_direct */ #include <string.h> #include <stddef.h> #include "tx.h" #include "unittest.h" #include "util.h" #include "valgrind_internal.h" #define LAYOUT_NAME "tx_add_range_direct" #define OBJ_SIZE 1024 enum type_number { TYPE_OBJ, TYPE_OBJ_ABORT, }; TOID_DECLARE(struct object, 0); struct object { size_t value; unsigned char data[OBJ_SIZE - sizeof(size_t)]; }; #define VALUE_OFF (offsetof(struct object, value)) #define VALUE_SIZE (sizeof(size_t)) #define DATA_OFF (offsetof(struct object, data)) #define DATA_SIZE (OBJ_SIZE - sizeof(size_t)) #define TEST_VALUE_1 1 #define TEST_VALUE_2 2 /* * do_tx_zalloc -- do tx allocation with specified type number */ static PMEMoid do_tx_zalloc(PMEMobjpool *pop, unsigned type_num) { PMEMoid ret = OID_NULL; TX_BEGIN(pop) { ret = pmemobj_tx_zalloc(sizeof(struct object), type_num); } TX_END return ret; } /* * do_tx_alloc -- do tx allocation and initialize first num bytes */ static PMEMoid do_tx_alloc(PMEMobjpool *pop, uint64_t type_num, uint64_t init_num) { PMEMoid ret = OID_NULL; TX_BEGIN(pop) { ret = pmemobj_tx_alloc(sizeof(struct object), type_num); pmemobj_memset(pop, pmemobj_direct(ret), 0, init_num, 0); } TX_END return ret; } /* * do_tx_add_range_alloc_commit -- call add_range_direct on object allocated * within the same transaction and commit the transaction */ static void do_tx_add_range_alloc_commit(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); UT_ASSERT(!TOID_IS_NULL(obj)); char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; ret = pmemobj_tx_add_range_direct(ptr + DATA_OFF, DATA_SIZE); UT_ASSERTeq(ret, 0); pmemobj_memset_persist(pop, D_RW(obj)->data, TEST_VALUE_2, DATA_SIZE); } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); size_t i; for (i = 0; i < DATA_SIZE; i++) UT_ASSERTeq(D_RO(obj)->data[i], TEST_VALUE_2); } /* * do_tx_add_range_alloc_abort -- call add_range_direct on object allocated * within the same transaction and abort the transaction */ static void do_tx_add_range_alloc_abort(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ_ABORT)); UT_ASSERT(!TOID_IS_NULL(obj)); char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; ret = pmemobj_tx_add_range_direct(ptr + DATA_OFF, DATA_SIZE); UT_ASSERTeq(ret, 0); pmemobj_memset_persist(pop, D_RW(obj)->data, TEST_VALUE_2, DATA_SIZE); pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_OBJ_ABORT)); UT_ASSERT(TOID_IS_NULL(obj)); } /* * do_tx_add_range_twice_commit -- call add_range_direct one the same area * twice and commit the transaction */ static void do_tx_add_range_twice_commit(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); UT_ASSERT(!TOID_IS_NULL(obj)); TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_2; } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_2); } /* * do_tx_add_range_twice_abort -- call add_range_direct one the same area * twice and abort the transaction */ static void do_tx_add_range_twice_abort(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); UT_ASSERT(!TOID_IS_NULL(obj)); TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_2; pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, 0); } /* * do_tx_add_range_abort_after_nested -- call add_range_direct and * commit the tx */ static void do_tx_add_range_abort_after_nested(PMEMobjpool *pop) { int ret; TOID(struct object) obj1; TOID(struct object) obj2; TOID_ASSIGN(obj1, do_tx_zalloc(pop, TYPE_OBJ)); TOID_ASSIGN(obj2, do_tx_zalloc(pop, TYPE_OBJ)); TX_BEGIN(pop) { char *ptr1 = (char *)pmemobj_direct(obj1.oid); ret = pmemobj_tx_add_range_direct(ptr1 + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj1)->value = TEST_VALUE_1; TX_BEGIN(pop) { char *ptr2 = (char *)pmemobj_direct(obj2.oid); ret = pmemobj_tx_add_range_direct(ptr2 + DATA_OFF, DATA_SIZE); UT_ASSERTeq(ret, 0); pmemobj_memset_persist(pop, D_RW(obj2)->data, TEST_VALUE_2, DATA_SIZE); } TX_ONABORT { UT_ASSERT(0); } TX_END pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj1)->value, 0); size_t i; for (i = 0; i < DATA_SIZE; i++) UT_ASSERTeq(D_RO(obj2)->data[i], 0); } /* * do_tx_add_range_abort_nested -- call add_range_direct and * commit the tx */ static void do_tx_add_range_abort_nested(PMEMobjpool *pop) { int ret; TOID(struct object) obj1; TOID(struct object) obj2; TOID_ASSIGN(obj1, do_tx_zalloc(pop, TYPE_OBJ)); TOID_ASSIGN(obj2, do_tx_zalloc(pop, TYPE_OBJ)); TX_BEGIN(pop) { char *ptr1 = (char *)pmemobj_direct(obj1.oid); ret = pmemobj_tx_add_range_direct(ptr1 + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj1)->value = TEST_VALUE_1; TX_BEGIN(pop) { char *ptr2 = (char *)pmemobj_direct(obj2.oid); ret = pmemobj_tx_add_range_direct(ptr2 + DATA_OFF, DATA_SIZE); UT_ASSERTeq(ret, 0); pmemobj_memset_persist(pop, D_RW(obj2)->data, TEST_VALUE_2, DATA_SIZE); pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END } TX_ONCOMMIT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj1)->value, 0); size_t i; for (i = 0; i < DATA_SIZE; i++) UT_ASSERTeq(D_RO(obj2)->data[i], 0); } /* * do_tx_add_range_commit_nested -- call add_range_direct and commit the tx */ static void do_tx_add_range_commit_nested(PMEMobjpool *pop) { int ret; TOID(struct object) obj1; TOID(struct object) obj2; TOID_ASSIGN(obj1, do_tx_zalloc(pop, TYPE_OBJ)); TOID_ASSIGN(obj2, do_tx_zalloc(pop, TYPE_OBJ)); TX_BEGIN(pop) { char *ptr1 = (char *)pmemobj_direct(obj1.oid); ret = pmemobj_tx_add_range_direct(ptr1 + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj1)->value = TEST_VALUE_1; TX_BEGIN(pop) { char *ptr2 = (char *)pmemobj_direct(obj2.oid); ret = pmemobj_tx_add_range_direct(ptr2 + DATA_OFF, DATA_SIZE); UT_ASSERTeq(ret, 0); pmemobj_memset_persist(pop, D_RW(obj2)->data, TEST_VALUE_2, DATA_SIZE); } TX_ONABORT { UT_ASSERT(0); } TX_END } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj1)->value, TEST_VALUE_1); size_t i; for (i = 0; i < DATA_SIZE; i++) UT_ASSERTeq(D_RO(obj2)->data[i], TEST_VALUE_2); } /* * do_tx_add_range_abort -- call add_range_direct and abort the tx */ static void do_tx_add_range_abort(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, 0); } /* * do_tx_add_range_commit -- call add_range_direct and commit tx */ static void do_tx_add_range_commit(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); } /* * do_tx_xadd_range_no_flush_commit -- call xadd_range_direct with * POBJ_XADD_NO_FLUSH flag set and commit tx */ static void do_tx_xadd_range_no_flush_commit(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF, VALUE_SIZE, POBJ_XADD_NO_FLUSH); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; /* let pmemcheck find we didn't flush it */ } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); } /* * do_tx_xadd_range_no_snapshot_commit -- call xadd_range_direct with * POBJ_XADD_NO_SNAPSHOT flag, commit the transaction */ static void do_tx_xadd_range_no_snapshot_commit(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF, VALUE_SIZE, POBJ_XADD_NO_SNAPSHOT); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); } /* * do_tx_xadd_range_no_snapshot_abort -- call xadd_range_direct with * POBJ_XADD_NO_SNAPSHOT flag, modify the value, abort the transaction */ static void do_tx_xadd_range_no_snapshot_abort(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); D_RW(obj)->value = TEST_VALUE_1; TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF, VALUE_SIZE, POBJ_XADD_NO_SNAPSHOT); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_2; pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END /* * value added with NO_SNAPSHOT flag should NOT be rolled back * after abort */ UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_2); } /* * do_tx_xadd_range_no_uninit_check -- call xdd_range_direct for * initialized memory with POBJ_XADD_ASSUME_INITIALIZED flag set and commit the * tx */ static void do_tx_xadd_range_no_uninit_check_commit(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF, VALUE_SIZE, POBJ_XADD_ASSUME_INITIALIZED); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); } /* * do_tx_xadd_range_no_uninit_check -- call xadd_range_direct for * uninitialized memory with POBJ_XADD_ASSUME_INITIALIZED flag set and commit * the tx */ static void do_tx_xadd_range_no_uninit_check_commit_uninit(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_OBJ, 0)); TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF, VALUE_SIZE, POBJ_XADD_ASSUME_INITIALIZED); UT_ASSERTeq(ret, 0); ret = pmemobj_tx_xadd_range_direct(ptr + DATA_OFF, DATA_SIZE, POBJ_XADD_ASSUME_INITIALIZED); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; D_RW(obj)->data[256] = TEST_VALUE_2; } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); UT_ASSERTeq(D_RO(obj)->data[256], TEST_VALUE_2); } /* * do_tx_xadd_range_no_uninit_check -- call xadd_range_direct for * partially uninitialized memory with POBJ_XADD_ASSUME_INITIALIZED flag set * only for uninitialized part and commit the tx */ static void do_tx_xadd_range_no_uninit_check_commit_part_uninit(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_OBJ, VALUE_SIZE)); TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); ret = pmemobj_tx_xadd_range_direct(ptr + DATA_OFF, DATA_SIZE, POBJ_XADD_ASSUME_INITIALIZED); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; D_RW(obj)->data[256] = TEST_VALUE_2; } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); UT_ASSERTeq(D_RO(obj)->data[256], TEST_VALUE_2); } /* * do_tx_add_range_no_uninit_check -- call add_range_direct for * partially uninitialized memory. */ static void do_tx_add_range_no_uninit_check_commit_no_flag(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_OBJ, VALUE_SIZE)); TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); ret = pmemobj_tx_add_range_direct(ptr + DATA_OFF, DATA_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; D_RW(obj)->data[256] = TEST_VALUE_2; } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); UT_ASSERTeq(D_RO(obj)->data[256], TEST_VALUE_2); } /* * do_tx_xadd_range_no_uninit_check_abort -- call pmemobj_tx_range with * POBJ_XADD_ASSUME_INITIALIZED flag, modify the value inside aborted * transaction */ static void do_tx_xadd_range_no_uninit_check_abort(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_OBJ, 0)); TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF, VALUE_SIZE, POBJ_XADD_ASSUME_INITIALIZED); UT_ASSERTeq(ret, 0); ret = pmemobj_tx_xadd_range_direct(ptr + DATA_OFF, DATA_SIZE, POBJ_XADD_ASSUME_INITIALIZED); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; D_RW(obj)->data[256] = TEST_VALUE_2; pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END } /* * do_tx_commit_and_abort -- use range cache, commit and then abort to make * sure that it won't affect previously modified data. */ static void do_tx_commit_and_abort(PMEMobjpool *pop) { TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); TX_BEGIN(pop) { TX_SET(obj, value, TEST_VALUE_1); /* this will land in cache */ } TX_ONABORT { UT_ASSERT(0); } TX_END TX_BEGIN(pop) { pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); } /* * test_add_direct_macros -- test TX_ADD_DIRECT, TX_ADD_FIELD_DIRECT and * TX_SET_DIRECT */ static void test_add_direct_macros(PMEMobjpool *pop) { TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); TX_BEGIN(pop) { struct object *o = D_RW(obj); TX_SET_DIRECT(o, value, TEST_VALUE_1); } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); TX_BEGIN(pop) { struct object *o = D_RW(obj); TX_ADD_DIRECT(o); o->value = TEST_VALUE_2; } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_2); TX_BEGIN(pop) { struct object *o = D_RW(obj); TX_ADD_FIELD_DIRECT(o, value); o->value = TEST_VALUE_1; } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); } #define MAX_CACHED_RANGES 100 /* * test_tx_corruption_bug -- test whether tx_adds for small objects from one * transaction does NOT leak to the next transaction */ static void test_tx_corruption_bug(PMEMobjpool *pop) { TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); struct object *o = D_RW(obj); unsigned char i; UT_COMPILE_ERROR_ON(1.5 * MAX_CACHED_RANGES > 255); TX_BEGIN(pop) { for (i = 0; i < 1.5 * MAX_CACHED_RANGES; ++i) { TX_ADD_DIRECT(&o->data[i]); o->data[i] = i; } } TX_ONABORT { UT_ASSERT(0); } TX_END for (i = 0; i < 1.5 * MAX_CACHED_RANGES; ++i) UT_ASSERTeq((unsigned char)o->data[i], i); TX_BEGIN(pop) { for (i = 0; i < 0.1 * MAX_CACHED_RANGES; ++i) { TX_ADD_DIRECT(&o->data[i]); o->data[i] = i + 10; } pmemobj_tx_abort(EINVAL); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END for (i = 0; i < 1.5 * MAX_CACHED_RANGES; ++i) UT_ASSERTeq((unsigned char)o->data[i], i); pmemobj_free(&obj.oid); } static void do_tx_add_range_too_large(PMEMobjpool *pop) { TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); int ret = 0; TX_BEGIN(pop) { ret = pmemobj_tx_add_range_direct(pmemobj_direct(obj.oid), PMEMOBJ_MAX_ALLOC_SIZE + 1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { UT_ASSERTeq(errno, EINVAL); UT_ASSERTeq(ret, 0); } TX_END errno = 0; ret = 0; TX_BEGIN(pop) { ret = pmemobj_tx_xadd_range_direct(pmemobj_direct(obj.oid), PMEMOBJ_MAX_ALLOC_SIZE + 1, POBJ_XADD_NO_ABORT); } TX_ONCOMMIT { UT_ASSERTeq(errno, EINVAL); UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); } TX_END errno = 0; ret = 0; TX_BEGIN(pop) { pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN); ret = pmemobj_tx_add_range_direct(pmemobj_direct(obj.oid), PMEMOBJ_MAX_ALLOC_SIZE + 1); } TX_ONCOMMIT { UT_ASSERTeq(errno, EINVAL); UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); } TX_END errno = 0; ret = 0; TX_BEGIN(pop) { pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN); ret = pmemobj_tx_xadd_range_direct(pmemobj_direct(obj.oid), PMEMOBJ_MAX_ALLOC_SIZE + 1, 0); } TX_ONCOMMIT { UT_ASSERTeq(errno, EINVAL); UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); } TX_END errno = 0; } static void do_tx_add_range_lots_of_small_snapshots(PMEMobjpool *pop) { size_t s = TX_DEFAULT_RANGE_CACHE_SIZE * 2; size_t snapshot_s = 8; PMEMoid obj; int ret = pmemobj_zalloc(pop, &obj, s, 0); UT_ASSERTeq(ret, 0); TX_BEGIN(pop) { for (size_t n = 0; n < s; n += snapshot_s) { void *addr = (void *)((size_t)pmemobj_direct(obj) + n); pmemobj_tx_add_range_direct(addr, snapshot_s); } } TX_ONABORT { UT_ASSERT(0); } TX_END } static void do_tx_add_cache_overflowing_range(PMEMobjpool *pop) { /* * This test adds snapshot to the cache, but in way that results in * one of the add_range being split into two caches. */ size_t s = TX_DEFAULT_RANGE_CACHE_SIZE * 2; size_t snapshot_s = TX_DEFAULT_RANGE_CACHE_THRESHOLD - 8; PMEMoid obj; int ret = pmemobj_zalloc(pop, &obj, s, 0); UT_ASSERTeq(ret, 0); TX_BEGIN(pop) { size_t n = 0; while (n != s) { if (n + snapshot_s > s) snapshot_s = s - n; void *addr = (void *)((size_t)pmemobj_direct(obj) + n); pmemobj_tx_add_range_direct(addr, snapshot_s); memset(addr, 0xc, snapshot_s); n += snapshot_s; } pmemobj_tx_abort(0); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END UT_ASSERT(util_is_zeroed(pmemobj_direct(obj), s)); UT_ASSERTne(errno, 0); errno = 0; pmemobj_free(&obj); } int main(int argc, char *argv[]) { START(argc, argv, "obj_tx_add_range_direct"); util_init(); if (argc != 2) UT_FATAL("usage: %s [file]", argv[0]); PMEMobjpool *pop; if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL * 4, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create"); do_tx_add_range_commit(pop); VALGRIND_WRITE_STATS; do_tx_add_range_abort(pop); VALGRIND_WRITE_STATS; do_tx_add_range_commit_nested(pop); VALGRIND_WRITE_STATS; do_tx_add_range_abort_nested(pop); VALGRIND_WRITE_STATS; do_tx_add_range_abort_after_nested(pop); VALGRIND_WRITE_STATS; do_tx_add_range_twice_commit(pop); VALGRIND_WRITE_STATS; do_tx_add_range_twice_abort(pop); VALGRIND_WRITE_STATS; do_tx_add_range_alloc_commit(pop); VALGRIND_WRITE_STATS; do_tx_add_range_alloc_abort(pop); VALGRIND_WRITE_STATS; do_tx_commit_and_abort(pop); VALGRIND_WRITE_STATS; test_add_direct_macros(pop); VALGRIND_WRITE_STATS; test_tx_corruption_bug(pop); VALGRIND_WRITE_STATS; do_tx_add_range_too_large(pop); VALGRIND_WRITE_STATS; do_tx_add_range_lots_of_small_snapshots(pop); VALGRIND_WRITE_STATS; do_tx_add_cache_overflowing_range(pop); VALGRIND_WRITE_STATS; do_tx_xadd_range_no_snapshot_commit(pop); VALGRIND_WRITE_STATS; do_tx_xadd_range_no_snapshot_abort(pop); VALGRIND_WRITE_STATS; do_tx_xadd_range_no_uninit_check_commit(pop); VALGRIND_WRITE_STATS; do_tx_xadd_range_no_uninit_check_commit_uninit(pop); VALGRIND_WRITE_STATS; do_tx_xadd_range_no_uninit_check_commit_part_uninit(pop); VALGRIND_WRITE_STATS; do_tx_xadd_range_no_uninit_check_abort(pop); VALGRIND_WRITE_STATS; do_tx_add_range_no_uninit_check_commit_no_flag(pop); VALGRIND_WRITE_STATS; do_tx_xadd_range_no_flush_commit(pop); pmemobj_close(pop); DONE(NULL); }
20,975
22.177901
79
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_many_size_allocs/obj_many_size_allocs.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * obj_many_size_allocs.c -- allocation of many objects with different sizes * */ #include <stddef.h> #include "unittest.h" #include "heap.h" #define LAYOUT_NAME "many_size_allocs" #define TEST_ALLOC_SIZE 2048 #define LAZY_LOAD_SIZE 10 #define LAZY_LOAD_BIG_SIZE 150 struct cargs { size_t size; }; static int test_constructor(PMEMobjpool *pop, void *addr, void *args) { struct cargs *a = args; /* do not use pmem_memset_persit() here */ pmemobj_memset_persist(pop, addr, a->size % 256, a->size); return 0; } static PMEMobjpool * test_allocs(PMEMobjpool *pop, const char *path) { PMEMoid *oid = MALLOC(sizeof(PMEMoid) * TEST_ALLOC_SIZE); if (pmemobj_alloc(pop, &oid[0], 0, 0, NULL, NULL) == 0) UT_FATAL("pmemobj_alloc(0) succeeded"); for (unsigned i = 1; i < TEST_ALLOC_SIZE; ++i) { struct cargs args = { i }; if (pmemobj_alloc(pop, &oid[i], i, 0, test_constructor, &args) != 0) UT_FATAL("!pmemobj_alloc"); UT_ASSERT(!OID_IS_NULL(oid[i])); } pmemobj_close(pop); UT_ASSERT(pmemobj_check(path, LAYOUT_NAME) == 1); UT_ASSERT((pop = pmemobj_open(path, LAYOUT_NAME)) != NULL); for (int i = 1; i < TEST_ALLOC_SIZE; ++i) { pmemobj_free(&oid[i]); UT_ASSERT(OID_IS_NULL(oid[i])); } FREE(oid); return pop; } static PMEMobjpool * test_lazy_load(PMEMobjpool *pop, const char *path) { PMEMoid oid[3]; int ret = pmemobj_alloc(pop, &oid[0], LAZY_LOAD_SIZE, 0, NULL, NULL); UT_ASSERTeq(ret, 0); ret = pmemobj_alloc(pop, &oid[1], LAZY_LOAD_SIZE, 0, NULL, NULL); UT_ASSERTeq(ret, 0); ret = pmemobj_alloc(pop, &oid[2], LAZY_LOAD_SIZE, 0, NULL, NULL); UT_ASSERTeq(ret, 0); pmemobj_close(pop); UT_ASSERT((pop = pmemobj_open(path, LAYOUT_NAME)) != NULL); pmemobj_free(&oid[1]); ret = pmemobj_alloc(pop, &oid[1], LAZY_LOAD_BIG_SIZE, 0, NULL, NULL); UT_ASSERTeq(ret, 0); return pop; } #define ALLOC_BLOCK_SIZE 64 #define MAX_BUCKET_MAP_ENTRIES (RUN_DEFAULT_SIZE / ALLOC_BLOCK_SIZE) static void test_all_classes(PMEMobjpool *pop) { for (unsigned i = 1; i <= MAX_BUCKET_MAP_ENTRIES; ++i) { int err; int nallocs = 0; while ((err = pmemobj_alloc(pop, NULL, i * ALLOC_BLOCK_SIZE, 0, NULL, NULL)) == 0) { nallocs++; } UT_ASSERT(nallocs > 0); PMEMoid iter, niter; POBJ_FOREACH_SAFE(pop, iter, niter) { pmemobj_free(&iter); } } } int main(int argc, char *argv[]) { START(argc, argv, "obj_many_size_allocs"); if (argc != 2) UT_FATAL("usage: %s file-name", argv[0]); const char *path = argv[1]; PMEMobjpool *pop = NULL; if ((pop = pmemobj_create(path, LAYOUT_NAME, 0, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create: %s", path); pop = test_lazy_load(pop, path); pop = test_allocs(pop, path); test_all_classes(pop); pmemobj_close(pop); DONE(NULL); }
2,837
20.179104
76
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_integration/pmem2_integration.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * pmem2_integration.c -- pmem2 integration tests */ #include "libpmem2.h" #include "unittest.h" #include "rand.h" #include "ut_pmem2.h" #include "ut_pmem2_setup_integration.h" #define N_GRANULARITIES 3 /* BYTE, CACHE_LINE, PAGE */ /* * map_invalid -- try to mapping memory with invalid config */ static void map_invalid(struct pmem2_config *cfg, struct pmem2_source *src, int result) { struct pmem2_map *map = (struct pmem2_map *)0x7; int ret = pmem2_map(cfg, src, &map); UT_PMEM2_EXPECT_RETURN(ret, result); UT_ASSERTeq(map, NULL); } /* * map_valid -- return valid mapped pmem2_map and validate mapped memory length */ static struct pmem2_map * map_valid(struct pmem2_config *cfg, struct pmem2_source *src, size_t size) { struct pmem2_map *map = NULL; int ret = pmem2_map(cfg, src, &map); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTne(map, NULL); UT_ASSERTeq(pmem2_map_get_size(map), size); return map; } /* * test_reuse_cfg -- map pmem2_map twice using the same pmem2_config */ static int test_reuse_cfg(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_reuse_cfg <file>"); char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t size; UT_ASSERTeq(pmem2_source_size(src, &size), 0); struct pmem2_map *map1 = map_valid(cfg, src, size); struct pmem2_map *map2 = map_valid(cfg, src, size); /* cleanup after the test */ pmem2_unmap(&map2); pmem2_unmap(&map1); pmem2_config_delete(&cfg); pmem2_source_delete(&src); CLOSE(fd); return 1; } /* * test_reuse_cfg_with_diff_fd -- map pmem2_map using the same pmem2_config * with changed file descriptor */ static int test_reuse_cfg_with_diff_fd(const struct test_case *tc, int argc, char *argv[]) { if (argc < 2) UT_FATAL("usage: test_reuse_cfg_with_diff_fd <file> <file2>"); char *file1 = argv[0]; int fd1 = OPEN(file1, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd1, PMEM2_GRANULARITY_PAGE); size_t size1; UT_ASSERTeq(pmem2_source_size(src, &size1), 0); struct pmem2_map *map1 = map_valid(cfg, src, size1); char *file2 = argv[1]; int fd2 = OPEN(file2, O_RDWR); /* set another valid file descriptor in source */ struct pmem2_source *src2; UT_ASSERTeq(pmem2_source_from_fd(&src2, fd2), 0); size_t size2; UT_ASSERTeq(pmem2_source_size(src2, &size2), 0); struct pmem2_map *map2 = map_valid(cfg, src2, size2); /* cleanup after the test */ pmem2_unmap(&map2); CLOSE(fd2); pmem2_unmap(&map1); pmem2_config_delete(&cfg); pmem2_source_delete(&src); pmem2_source_delete(&src2); CLOSE(fd1); return 2; } /* * test_register_pmem -- map, use and unmap memory */ static int test_register_pmem(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_register_pmem <file>"); char *file = argv[0]; int fd = OPEN(file, O_RDWR); char *word = "XXXXXXXX"; struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t size; UT_ASSERTeq(pmem2_source_size(src, &size), 0); struct pmem2_map *map = map_valid(cfg, src, size); char *addr = pmem2_map_get_address(map); size_t length = strlen(word); /* write some data in mapped memory without persisting data */ memcpy(addr, word, length); /* cleanup after the test */ pmem2_unmap(&map); pmem2_config_delete(&cfg); pmem2_source_delete(&src); CLOSE(fd); return 1; } /* * test_use_misc_lens_and_offsets -- test with multiple offsets and lengths */ static int test_use_misc_lens_and_offsets(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_use_misc_lens_and_offsets <file>"); char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t len; UT_ASSERTeq(pmem2_source_size(src, &len), 0); struct pmem2_map *map = map_valid(cfg, src, len); char *base = pmem2_map_get_address(map); pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map); rng_t rng; randomize_r(&rng, 13); /* arbitrarily chosen value */ for (size_t i = 0; i < len; i++) base[i] = (char)rnd64_r(&rng); persist_fn(base, len); UT_ASSERTeq(len % Ut_mmap_align, 0); for (size_t l = len; l > 0; l -= Ut_mmap_align) { for (size_t off = 0; off < l; off += Ut_mmap_align) { size_t len2 = l - off; int ret = pmem2_config_set_length(cfg, len2); UT_PMEM2_EXPECT_RETURN(ret, 0); ret = pmem2_config_set_offset(cfg, off); UT_PMEM2_EXPECT_RETURN(ret, 0); struct pmem2_map *map2 = map_valid(cfg, src, len2); char *ptr = pmem2_map_get_address(map2); UT_ASSERTeq(ret = memcmp(base + off, ptr, len2), 0); pmem2_unmap(&map2); } } pmem2_unmap(&map); pmem2_config_delete(&cfg); pmem2_source_delete(&src); CLOSE(fd); return 1; } struct gran_test_ctx; typedef void(*map_func)(struct pmem2_config *cfg, struct pmem2_source *src, struct gran_test_ctx *ctx); /* * gran_test_ctx -- essential parameters used by granularity test */ struct gran_test_ctx { map_func map_with_expected_gran; enum pmem2_granularity expected_granularity; }; /* * map_with_avail_gran -- map the range with valid granularity, * includes cleanup */ static void map_with_avail_gran(struct pmem2_config *cfg, struct pmem2_source *src, struct gran_test_ctx *ctx) { struct pmem2_map *map; int ret = pmem2_map(cfg, src, &map); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTne(map, NULL); UT_ASSERTeq(ctx->expected_granularity, pmem2_map_get_store_granularity(map)); /* cleanup after the test */ pmem2_unmap(&map); } /* * map_with_unavail_gran -- map the range with invalid granularity * (unsuccessful) */ static void map_with_unavail_gran(struct pmem2_config *cfg, struct pmem2_source *src, struct gran_test_ctx *unused) { struct pmem2_map *map; int ret = pmem2_map(cfg, src, &map); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_GRANULARITY_NOT_SUPPORTED); UT_ERR("%s", pmem2_errormsg()); UT_ASSERTeq(map, NULL); } static const map_func map_with_gran[N_GRANULARITIES][N_GRANULARITIES] = { /* requested granularity / available granularity */ /* -------------------------------------------------------------------- */ /* BYTE CACHE_LINE PAGE */ /* -------------------------------------------------------------------- */ /* BYTE */ {map_with_avail_gran, map_with_unavail_gran, map_with_unavail_gran}, /* CL */ {map_with_avail_gran, map_with_avail_gran, map_with_unavail_gran}, /* PAGE */ {map_with_avail_gran, map_with_avail_gran, map_with_avail_gran}}; static const enum pmem2_granularity gran_id2granularity[N_GRANULARITIES] = { PMEM2_GRANULARITY_BYTE, PMEM2_GRANULARITY_CACHE_LINE, PMEM2_GRANULARITY_PAGE}; /* * str2gran_id -- reads granularity id from the provided string */ static int str2gran_id(const char *in) { int gran = atoi(in); UT_ASSERT(gran >= 0 && gran < N_GRANULARITIES); return gran; } /* * test_granularity -- performs pmem2_map with certain expected granularity * in context of certain available granularity */ static int test_granularity(const struct test_case *tc, int argc, char *argv[]) { if (argc < 3) UT_FATAL( "usage: test_granularity <file>" " <available_granularity> <requested_granularity>"); struct gran_test_ctx ctx; int avail_gran_id = str2gran_id(argv[1]); int req_gran_id = str2gran_id(argv[2]); ctx.expected_granularity = gran_id2granularity[avail_gran_id]; ctx.map_with_expected_gran = map_with_gran[req_gran_id][avail_gran_id]; char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, gran_id2granularity[req_gran_id]); ctx.map_with_expected_gran(cfg, src, &ctx); pmem2_config_delete(&cfg); pmem2_source_delete(&src); CLOSE(fd); return 3; } /* * test_len_not_aligned -- try to use unaligned length */ static int test_len_not_aligned(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_len_not_aligned <file>"); char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t len, alignment; int ret = pmem2_source_size(src, &len); UT_PMEM2_EXPECT_RETURN(ret, 0); PMEM2_SOURCE_ALIGNMENT(src, &alignment); UT_ASSERT(len > alignment); size_t aligned_len = ALIGN_DOWN(len, alignment); size_t unaligned_len = aligned_len - 1; ret = pmem2_config_set_length(cfg, unaligned_len); UT_PMEM2_EXPECT_RETURN(ret, 0); map_invalid(cfg, src, PMEM2_E_LENGTH_UNALIGNED); pmem2_config_delete(&cfg); pmem2_source_delete(&src); CLOSE(fd); return 1; } /* * test_len_aligned -- try to use aligned length */ static int test_len_aligned(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_len_aligned <file>"); char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t len, alignment; int ret = pmem2_source_size(src, &len); UT_PMEM2_EXPECT_RETURN(ret, 0); PMEM2_SOURCE_ALIGNMENT(src, &alignment); UT_ASSERT(len > alignment); size_t aligned_len = ALIGN_DOWN(len, alignment); ret = pmem2_config_set_length(cfg, aligned_len); UT_PMEM2_EXPECT_RETURN(ret, 0); struct pmem2_map *map = map_valid(cfg, src, aligned_len); pmem2_unmap(&map); pmem2_config_delete(&cfg); pmem2_source_delete(&src); CLOSE(fd); return 1; } /* * test_offset_not_aligned -- try to map with unaligned offset */ static int test_offset_not_aligned(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_offset_not_aligned <file>"); char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t len, alignment; int ret = pmem2_source_size(src, &len); UT_PMEM2_EXPECT_RETURN(ret, 0); PMEM2_SOURCE_ALIGNMENT(src, &alignment); /* break the offset */ size_t offset = alignment - 1; ret = pmem2_config_set_offset(cfg, offset); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERT(len > alignment); /* in this case len has to be aligned, only offset will be unaligned */ size_t aligned_len = ALIGN_DOWN(len, alignment); ret = pmem2_config_set_length(cfg, aligned_len - alignment); UT_PMEM2_EXPECT_RETURN(ret, 0); map_invalid(cfg, src, PMEM2_E_OFFSET_UNALIGNED); pmem2_config_delete(&cfg); pmem2_source_delete(&src); CLOSE(fd); return 1; } /* * test_offset_aligned -- try to map with aligned offset */ static int test_offset_aligned(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_offset_aligned <file>"); char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t len, alignment; int ret = pmem2_source_size(src, &len); UT_PMEM2_EXPECT_RETURN(ret, 0); PMEM2_SOURCE_ALIGNMENT(src, &alignment); /* set the aligned offset */ size_t offset = alignment; ret = pmem2_config_set_offset(cfg, offset); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERT(len > alignment * 2); /* set the aligned len */ size_t map_len = ALIGN_DOWN(len / 2, alignment); ret = pmem2_config_set_length(cfg, map_len); UT_PMEM2_EXPECT_RETURN(ret, 0); struct pmem2_map *map = map_valid(cfg, src, map_len); pmem2_unmap(&map); pmem2_config_delete(&cfg); pmem2_source_delete(&src); CLOSE(fd); return 1; } /* * test_mem_move_cpy_set_with_map_private -- map O_RDONLY file and do * pmem2_[cpy|set|move]_fns with PMEM2_PRIVATE sharing */ static int test_mem_move_cpy_set_with_map_private(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL( "usage: test_mem_move_cpy_set_with_map_private <file>"); char *file = argv[0]; int fd = OPEN(file, O_RDONLY); const char *word1 = "Persistent memory..."; const char *word2 = "Nonpersistent memory"; const char *word3 = "XXXXXXXXXXXXXXXXXXXX"; struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); pmem2_config_set_sharing(cfg, PMEM2_PRIVATE); size_t size = 0; UT_ASSERTeq(pmem2_source_size(src, &size), 0); struct pmem2_map *map = map_valid(cfg, src, size); char *addr = pmem2_map_get_address(map); /* copy inital state */ char *initial_state = MALLOC(size); memcpy(initial_state, addr, size); pmem2_memmove_fn memmove_fn = pmem2_get_memmove_fn(map); pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map); pmem2_memset_fn memset_fn = pmem2_get_memset_fn(map); memcpy_fn(addr, word1, strlen(word1), 0); UT_ASSERTeq(strcmp(addr, word1), 0); memmove_fn(addr, word2, strlen(word2), 0); UT_ASSERTeq(strcmp(addr, word2), 0); memset_fn(addr, 'X', strlen(word3), 0); UT_ASSERTeq(strcmp(addr, word3), 0); /* remap memory, and check that the data has not been saved */ pmem2_unmap(&map); map = map_valid(cfg, src, size); addr = pmem2_map_get_address(map); UT_ASSERTeq(strcmp(addr, initial_state), 0); /* cleanup after the test */ pmem2_unmap(&map); FREE(initial_state); pmem2_config_delete(&cfg); pmem2_source_delete(&src); CLOSE(fd); return 1; } /* * test_deep_flush_valid -- perform valid deep_flush for whole map */ static int test_deep_flush_valid(const struct test_case *tc, int argc, char *argv[]) { char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t len; PMEM2_SOURCE_SIZE(src, &len); struct pmem2_map *map = map_valid(cfg, src, len); char *addr = pmem2_map_get_address(map); pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map); memset(addr, 0, len); persist_fn(addr, len); int ret = pmem2_deep_flush(map, addr, len); UT_PMEM2_EXPECT_RETURN(ret, 0); pmem2_unmap(&map); PMEM2_CONFIG_DELETE(&cfg); PMEM2_SOURCE_DELETE(&src); CLOSE(fd); return 1; } /* * test_deep_flush_e_range_behind -- try deep_flush for range behind a map */ static int test_deep_flush_e_range_behind(const struct test_case *tc, int argc, char *argv[]) { char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t len; PMEM2_SOURCE_SIZE(src, &len); struct pmem2_map *map = map_valid(cfg, src, len); size_t map_size = pmem2_map_get_size(map); char *addr = pmem2_map_get_address(map); pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map); memset(addr, 0, len); persist_fn(addr, len); int ret = pmem2_deep_flush(map, addr + map_size + 1, 64); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_DEEP_FLUSH_RANGE); pmem2_unmap(&map); PMEM2_CONFIG_DELETE(&cfg); PMEM2_SOURCE_DELETE(&src); CLOSE(fd); return 1; } /* * test_deep_flush_e_range_before -- try deep_flush for range before a map */ static int test_deep_flush_e_range_before(const struct test_case *tc, int argc, char *argv[]) { char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t len; PMEM2_SOURCE_SIZE(src, &len); struct pmem2_map *map = map_valid(cfg, src, len); size_t map_size = pmem2_map_get_size(map); char *addr = pmem2_map_get_address(map); pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map); memset(addr, 0, len); persist_fn(addr, len); int ret = pmem2_deep_flush(map, addr - map_size, 64); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_DEEP_FLUSH_RANGE); pmem2_unmap(&map); PMEM2_CONFIG_DELETE(&cfg); PMEM2_SOURCE_DELETE(&src); CLOSE(fd); return 1; } /* * test_deep_flush_slice -- try deep_flush for slice of a map */ static int test_deep_flush_slice(const struct test_case *tc, int argc, char *argv[]) { char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t len; PMEM2_SOURCE_SIZE(src, &len); struct pmem2_map *map = map_valid(cfg, src, len); size_t map_size = pmem2_map_get_size(map); size_t map_part = map_size / 4; char *addr = pmem2_map_get_address(map); pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map); memset(addr, 0, map_part); persist_fn(addr, map_part); int ret = pmem2_deep_flush(map, addr + map_part, map_part); UT_PMEM2_EXPECT_RETURN(ret, 0); pmem2_unmap(&map); PMEM2_CONFIG_DELETE(&cfg); PMEM2_SOURCE_DELETE(&src); CLOSE(fd); return 1; } /* * test_deep_flush_overlap -- try deep_flush for range overlaping map */ static int test_deep_flush_overlap(const struct test_case *tc, int argc, char *argv[]) { char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t len; PMEM2_SOURCE_SIZE(src, &len); struct pmem2_map *map = map_valid(cfg, src, len); size_t map_size = pmem2_map_get_size(map); char *addr = pmem2_map_get_address(map); pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map); memset(addr, 0, len); persist_fn(addr, len); int ret = pmem2_deep_flush(map, addr + 1024, map_size); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_DEEP_FLUSH_RANGE); pmem2_unmap(&map); PMEM2_CONFIG_DELETE(&cfg); PMEM2_SOURCE_DELETE(&src); CLOSE(fd); return 1; } /* * test_source_anon -- tests map/config/source functions in combination * with anonymous source. */ static int test_source_anon(enum pmem2_sharing_type sharing, enum pmem2_granularity granularity, size_t source_len, size_t map_len) { int ret = 0; struct pmem2_config *cfg; struct pmem2_source *src; struct pmem2_map *map; struct pmem2_badblock_context *bbctx; UT_ASSERTeq(pmem2_source_from_anon(&src, source_len), 0); UT_ASSERTeq(pmem2_source_device_id(src, NULL, NULL), PMEM2_E_NOSUPP); UT_ASSERTeq(pmem2_source_device_usc(src, NULL), PMEM2_E_NOSUPP); UT_ASSERTeq(pmem2_badblock_context_new(src, &bbctx), PMEM2_E_NOSUPP); size_t alignment; UT_ASSERTeq(pmem2_source_alignment(src, &alignment), 0); UT_ASSERT(alignment >= Ut_pagesize); size_t size; UT_ASSERTeq(pmem2_source_size(src, &size), 0); UT_ASSERTeq(size, source_len); PMEM2_CONFIG_NEW(&cfg); UT_ASSERTeq(pmem2_config_set_length(cfg, map_len), 0); UT_ASSERTeq(pmem2_config_set_offset(cfg, alignment), 0); /* ignored */ UT_ASSERTeq(pmem2_config_set_required_store_granularity(cfg, granularity), 0); UT_ASSERTeq(pmem2_config_set_sharing(cfg, sharing), 0); if ((ret = pmem2_map(cfg, src, &map)) != 0) goto map_fail; void *addr = pmem2_map_get_address(map); UT_ASSERTne(addr, NULL); UT_ASSERTeq(pmem2_map_get_size(map), map_len ? map_len : source_len); UT_ASSERTeq(pmem2_map_get_store_granularity(map), PMEM2_GRANULARITY_BYTE); UT_ASSERTeq(pmem2_deep_flush(map, addr, alignment), PMEM2_E_NOSUPP); UT_ASSERTeq(pmem2_unmap(&map), 0); map_fail: PMEM2_CONFIG_DELETE(&cfg); pmem2_source_delete(&src); return ret; } /* * test_source_anon_ok_private -- valid config /w private flag */ static int test_source_anon_private(const struct test_case *tc, int argc, char *argv[]) { int ret = test_source_anon(PMEM2_PRIVATE, PMEM2_GRANULARITY_BYTE, 1 << 30ULL, 1 << 20ULL); UT_ASSERTeq(ret, 0); return 1; } /* * test_source_anon_shared -- valid config /w shared flag */ static int test_source_anon_shared(const struct test_case *tc, int argc, char *argv[]) { int ret = test_source_anon(PMEM2_SHARED, PMEM2_GRANULARITY_BYTE, 1 << 30ULL, 1 << 20ULL); UT_ASSERTeq(ret, 0); return 1; } /* * test_source_anon_page -- valid config /w page granularity */ static int test_source_anon_page(const struct test_case *tc, int argc, char *argv[]) { int ret = test_source_anon(PMEM2_SHARED, PMEM2_GRANULARITY_PAGE, 1 << 30ULL, 1 << 20ULL); UT_ASSERTeq(ret, 0); return 1; } /* * test_source_anon_zero_len -- valid config /w zero (src inherited) map length */ static int test_source_anon_zero_len(const struct test_case *tc, int argc, char *argv[]) { int ret = test_source_anon(PMEM2_SHARED, PMEM2_GRANULARITY_BYTE, 1 << 30ULL, 0); UT_ASSERTeq(ret, 0); return 1; } /* * test_source_anon_too_small -- valid config /w small mapping length */ static int test_source_anon_too_small(const struct test_case *tc, int argc, char *argv[]) { int ret = test_source_anon(PMEM2_SHARED, PMEM2_GRANULARITY_BYTE, 1 << 30ULL, 1 << 10ULL); UT_ASSERTne(ret, 0); return 1; } /* * test_cases -- available test cases */ static struct test_case test_cases[] = { TEST_CASE(test_reuse_cfg), TEST_CASE(test_reuse_cfg_with_diff_fd), TEST_CASE(test_register_pmem), TEST_CASE(test_use_misc_lens_and_offsets), TEST_CASE(test_granularity), TEST_CASE(test_len_not_aligned), TEST_CASE(test_len_aligned), TEST_CASE(test_offset_not_aligned), TEST_CASE(test_offset_aligned), TEST_CASE(test_mem_move_cpy_set_with_map_private), TEST_CASE(test_deep_flush_valid), TEST_CASE(test_deep_flush_e_range_behind), TEST_CASE(test_deep_flush_e_range_before), TEST_CASE(test_deep_flush_slice), TEST_CASE(test_deep_flush_overlap), TEST_CASE(test_source_anon_private), TEST_CASE(test_source_anon_shared), TEST_CASE(test_source_anon_page), TEST_CASE(test_source_anon_too_small), TEST_CASE(test_source_anon_zero_len), }; #define NTESTS (sizeof(test_cases) / sizeof(test_cases[0])) int main(int argc, char *argv[]) { START(argc, argv, "pmem2_integration"); TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS); DONE(NULL); }
22,113
23.736018
79
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_ctl_alloc_class_config/obj_ctl_alloc_class_config.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017, Intel Corporation */ /* * obj_ctl_alloc_class_config.c -- tests for the ctl alloc class config */ #include "unittest.h" #define LAYOUT "obj_ctl_alloc_class_config" int main(int argc, char *argv[]) { START(argc, argv, "obj_ctl_alloc_class_config"); if (argc != 2) UT_FATAL("usage: %s file-name", argv[0]); const char *path = argv[1]; PMEMobjpool *pop; if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create: %s", path); struct pobj_alloc_class_desc alloc_class; int ret; ret = pmemobj_ctl_get(pop, "heap.alloc_class.128.desc", &alloc_class); UT_ASSERTeq(ret, 0); UT_OUT("%d %lu %d", alloc_class.header_type, alloc_class.unit_size, alloc_class.units_per_block); ret = pmemobj_ctl_get(pop, "heap.alloc_class.129.desc", &alloc_class); UT_ASSERTeq(ret, 0); UT_OUT("%d %lu %d", alloc_class.header_type, alloc_class.unit_size, alloc_class.units_per_block); ret = pmemobj_ctl_get(pop, "heap.alloc_class.130.desc", &alloc_class); UT_ASSERTeq(ret, 0); UT_OUT("%d %lu %d", alloc_class.header_type, alloc_class.unit_size, alloc_class.units_per_block); pmemobj_close(pop); DONE(NULL); }
1,242
22.45283
71
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_action/obj_action.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2019, Intel Corporation */ /* * obj_action.c -- test the action API */ #include <stdlib.h> #include "unittest.h" #define LAYOUT_NAME "obj_action" struct macro_reserve_s { PMEMoid oid; uint64_t value; }; TOID_DECLARE(struct macro_reserve_s, 1); struct foo { int bar; }; struct root { struct { PMEMoid oid; uint64_t value; } reserved; struct { PMEMoid oid; uint64_t value; } published; struct { PMEMoid oid; } tx_reserved; struct { PMEMoid oid; } tx_reserved_fulfilled; struct { PMEMoid oid; } tx_published; }; #define HUGE_ALLOC_SIZE ((1 << 20) * 3) #define MAX_ACTS 10 static void test_resv_cancel_huge(PMEMobjpool *pop) { PMEMoid oid; unsigned nallocs = 0; struct pobj_action *act = (struct pobj_action *) ZALLOC(sizeof(struct pobj_action) * MAX_ACTS); do { oid = pmemobj_reserve(pop, &act[nallocs++], HUGE_ALLOC_SIZE, 0); } while (!OID_IS_NULL(oid)); pmemobj_cancel(pop, act, nallocs - 1); unsigned nallocs2 = 0; do { oid = pmemobj_reserve(pop, &act[nallocs2++], HUGE_ALLOC_SIZE, 0); } while (!OID_IS_NULL(oid)); pmemobj_cancel(pop, act, nallocs2 - 1); UT_ASSERTeq(nallocs, nallocs2); FREE(act); } static void test_defer_free(PMEMobjpool *pop) { PMEMoid oid; int ret = pmemobj_alloc(pop, &oid, sizeof(struct foo), 0, NULL, NULL); UT_ASSERTeq(ret, 0); struct pobj_action act; pmemobj_defer_free(pop, oid, &act); pmemobj_publish(pop, &act, 1); struct foo *f = (struct foo *)pmemobj_direct(oid); f->bar = 5; /* should trigger memcheck error */ ret = pmemobj_alloc(pop, &oid, sizeof(struct foo), 0, NULL, NULL); UT_ASSERTeq(ret, 0); pmemobj_defer_free(pop, oid, &act); pmemobj_cancel(pop, &act, 1); f = (struct foo *)pmemobj_direct(oid); f->bar = 5; /* should NOT trigger memcheck error */ } /* * This function tests if macros included in action.h api compile and * allocate memory. */ static void test_api_macros(PMEMobjpool *pop) { struct pobj_action macro_reserve_act[1]; TOID(struct macro_reserve_s) macro_reserve_p = POBJ_RESERVE_NEW(pop, struct macro_reserve_s, &macro_reserve_act[0]); UT_ASSERT(!OID_IS_NULL(macro_reserve_p.oid)); pmemobj_publish(pop, macro_reserve_act, 1); POBJ_FREE(&macro_reserve_p); macro_reserve_p = POBJ_RESERVE_ALLOC(pop, struct macro_reserve_s, sizeof(struct macro_reserve_s), &macro_reserve_act[0]); UT_ASSERT(!OID_IS_NULL(macro_reserve_p.oid)); pmemobj_publish(pop, macro_reserve_act, 1); POBJ_FREE(&macro_reserve_p); macro_reserve_p = POBJ_XRESERVE_NEW(pop, struct macro_reserve_s, &macro_reserve_act[0], 0); UT_ASSERT(!OID_IS_NULL(macro_reserve_p.oid)); pmemobj_publish(pop, macro_reserve_act, 1); POBJ_FREE(&macro_reserve_p); macro_reserve_p = POBJ_XRESERVE_ALLOC(pop, struct macro_reserve_s, sizeof(struct macro_reserve_s), &macro_reserve_act[0], 0); UT_ASSERT(!OID_IS_NULL(macro_reserve_p.oid)); pmemobj_publish(pop, macro_reserve_act, 1); POBJ_FREE(&macro_reserve_p); } #define POBJ_MAX_ACTIONS 60 static void test_many(PMEMobjpool *pop, size_t n) { struct pobj_action *act = (struct pobj_action *) MALLOC(sizeof(struct pobj_action) * n); PMEMoid *oid = (PMEMoid *) MALLOC(sizeof(PMEMoid) * n); for (int i = 0; i < n; ++i) { oid[i] = pmemobj_reserve(pop, &act[i], 1, 0); UT_ASSERT(!OID_IS_NULL(oid[i])); } UT_ASSERTeq(pmemobj_publish(pop, act, n), 0); for (int i = 0; i < n; ++i) { pmemobj_defer_free(pop, oid[i], &act[i]); } UT_ASSERTeq(pmemobj_publish(pop, act, n), 0); FREE(oid); FREE(act); } static void test_duplicate(PMEMobjpool *pop) { struct pobj_alloc_class_desc alloc_class_128; alloc_class_128.header_type = POBJ_HEADER_COMPACT; alloc_class_128.unit_size = 1024 * 100; alloc_class_128.units_per_block = 1; alloc_class_128.alignment = 0; int ret = pmemobj_ctl_set(pop, "heap.alloc_class.128.desc", &alloc_class_128); UT_ASSERTeq(ret, 0); struct pobj_action a[10]; PMEMoid oid[10]; oid[0] = pmemobj_xreserve(pop, &a[0], 1, 0, POBJ_CLASS_ID(128)); UT_ASSERT(!OID_IS_NULL(oid[0])); pmemobj_cancel(pop, a, 1); oid[0] = pmemobj_xreserve(pop, &a[0], 1, 0, POBJ_CLASS_ID(128)); UT_ASSERT(!OID_IS_NULL(oid[0])); oid[0] = pmemobj_xreserve(pop, &a[1], 1, 0, POBJ_CLASS_ID(128)); UT_ASSERT(!OID_IS_NULL(oid[0])); oid[0] = pmemobj_xreserve(pop, &a[2], 1, 0, POBJ_CLASS_ID(128)); UT_ASSERT(!OID_IS_NULL(oid[0])); pmemobj_cancel(pop, a, 3); oid[0] = pmemobj_xreserve(pop, &a[0], 1, 0, POBJ_CLASS_ID(128)); UT_ASSERT(!OID_IS_NULL(oid[0])); oid[0] = pmemobj_xreserve(pop, &a[1], 1, 0, POBJ_CLASS_ID(128)); UT_ASSERT(!OID_IS_NULL(oid[0])); oid[0] = pmemobj_xreserve(pop, &a[2], 1, 0, POBJ_CLASS_ID(128)); UT_ASSERT(!OID_IS_NULL(oid[0])); oid[0] = pmemobj_xreserve(pop, &a[3], 1, 0, POBJ_CLASS_ID(128)); UT_ASSERT(!OID_IS_NULL(oid[0])); oid[0] = pmemobj_xreserve(pop, &a[4], 1, 0, POBJ_CLASS_ID(128)); UT_ASSERT(!OID_IS_NULL(oid[0])); pmemobj_cancel(pop, a, 5); } static void test_many_sets(PMEMobjpool *pop, size_t n) { struct pobj_action *act = (struct pobj_action *) MALLOC(sizeof(struct pobj_action) * n); PMEMoid oid; pmemobj_alloc(pop, &oid, sizeof(uint64_t) * n, 0, NULL, NULL); UT_ASSERT(!OID_IS_NULL(oid)); uint64_t *values = (uint64_t *)pmemobj_direct(oid); for (uint64_t i = 0; i < n; ++i) pmemobj_set_value(pop, &act[i], values + i, i); UT_ASSERTeq(pmemobj_publish(pop, act, n), 0); for (uint64_t i = 0; i < n; ++i) UT_ASSERTeq(*(values + i), i); pmemobj_free(&oid); FREE(act); } int main(int argc, char *argv[]) { START(argc, argv, "obj_action"); if (argc < 2) UT_FATAL("usage: %s filename", argv[0]); const char *path = argv[1]; PMEMobjpool *pop = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR); if (pop == NULL) UT_FATAL("!pmemobj_create: %s", path); PMEMoid root = pmemobj_root(pop, sizeof(struct root)); struct root *rootp = (struct root *)pmemobj_direct(root); struct pobj_action reserved[2]; struct pobj_action published[2]; struct pobj_action tx_reserved; struct pobj_action tx_reserved_fulfilled; struct pobj_action tx_published; rootp->reserved.oid = pmemobj_reserve(pop, &reserved[0], sizeof(struct foo), 0); pmemobj_set_value(pop, &reserved[1], &rootp->reserved.value, 1); rootp->tx_reserved.oid = pmemobj_reserve(pop, &tx_reserved, sizeof(struct foo), 0); rootp->tx_reserved_fulfilled.oid = pmemobj_reserve(pop, &tx_reserved_fulfilled, sizeof(struct foo), 0); rootp->tx_published.oid = pmemobj_reserve(pop, &tx_published, sizeof(struct foo), 0); rootp->published.oid = pmemobj_reserve(pop, &published[0], sizeof(struct foo), 0); TX_BEGIN(pop) { pmemobj_tx_publish(&tx_reserved, 1); pmemobj_tx_abort(EINVAL); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END TX_BEGIN(pop) { pmemobj_tx_publish(&tx_reserved_fulfilled, 1); pmemobj_tx_publish(NULL, 0); /* this is to force resv fulfill */ pmemobj_tx_abort(EINVAL); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END pmemobj_set_value(pop, &published[1], &rootp->published.value, 1); pmemobj_publish(pop, published, 2); TX_BEGIN(pop) { pmemobj_tx_publish(&tx_published, 1); } TX_ONABORT { UT_ASSERT(0); } TX_END pmemobj_persist(pop, rootp, sizeof(*rootp)); pmemobj_close(pop); UT_ASSERTeq(pmemobj_check(path, LAYOUT_NAME), 1); UT_ASSERTne(pop = pmemobj_open(path, LAYOUT_NAME), NULL); root = pmemobj_root(pop, sizeof(struct root)); rootp = (struct root *)pmemobj_direct(root); struct foo *reserved_foop = (struct foo *)pmemobj_direct(rootp->reserved.oid); reserved_foop->bar = 1; /* should trigger memcheck error */ UT_ASSERTeq(rootp->reserved.value, 0); struct foo *published_foop = (struct foo *)pmemobj_direct(rootp->published.oid); published_foop->bar = 1; /* should NOT trigger memcheck error */ UT_ASSERTeq(rootp->published.value, 1); struct foo *tx_reserved_foop = (struct foo *)pmemobj_direct(rootp->tx_reserved.oid); tx_reserved_foop->bar = 1; /* should trigger memcheck error */ struct foo *tx_reserved_fulfilled_foop = (struct foo *)pmemobj_direct(rootp->tx_reserved_fulfilled.oid); tx_reserved_fulfilled_foop->bar = 1; /* should trigger memcheck error */ struct foo *tx_published_foop = (struct foo *)pmemobj_direct(rootp->tx_published.oid); tx_published_foop->bar = 1; /* should NOT trigger memcheck error */ test_resv_cancel_huge(pop); test_defer_free(pop); test_api_macros(pop); test_many(pop, POBJ_MAX_ACTIONS * 2); test_many_sets(pop, POBJ_MAX_ACTIONS * 2); test_duplicate(pop); pmemobj_close(pop); DONE(NULL); }
8,548
23.286932
73
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/pmem2_source_size/pmem2_source_size.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * pmem2_source_size.c -- pmem2_source_size unittests */ #include <stdint.h> #include "fault_injection.h" #include "unittest.h" #include "ut_pmem2.h" #include "ut_fh.h" #include "config.h" #include "out.h" typedef void (*test_fun)(const char *path, os_off_t size); /* * test_normal_file - tests normal file (common) */ static void test_normal_file(const char *path, os_off_t expected_size, enum file_handle_type type) { struct FHandle *fh = UT_FH_OPEN(type, path, FH_RDWR); struct pmem2_source *src; PMEM2_SOURCE_FROM_FH(&src, fh); size_t size; int ret = pmem2_source_size(src, &size); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTeq(size, expected_size); PMEM2_SOURCE_DELETE(&src); UT_FH_CLOSE(fh); } /* * test_normal_file_fd - tests normal file using a file descriptor */ static int test_normal_file_fd(const struct test_case *tc, int argc, char *argv[]) { if (argc < 2) UT_FATAL("usage: test_normal_file_fd <file> <expected_size>"); char *path = argv[0]; os_off_t expected_size = ATOLL(argv[1]); test_normal_file(path, expected_size, FH_FD); return 2; } /* * test_normal_file_handle - tests normal file using a HANDLE */ static int test_normal_file_handle(const struct test_case *tc, int argc, char *argv[]) { if (argc < 2) UT_FATAL("usage: test_normal_file_handle" " <file> <expected_size>"); char *path = argv[0]; os_off_t expected_size = ATOLL(argv[1]); test_normal_file(path, expected_size, FH_HANDLE); return 2; } /* * test_tmpfile - tests temporary file */ static void test_tmpfile(const char *dir, os_off_t requested_size, enum file_handle_type type) { struct FHandle *fh = UT_FH_OPEN(type, dir, FH_RDWR | FH_TMPFILE); UT_FH_TRUNCATE(fh, requested_size); struct pmem2_source *src; PMEM2_SOURCE_FROM_FH(&src, fh); size_t size = SIZE_MAX; int ret = pmem2_source_size(src, &size); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTeq(size, requested_size); PMEM2_SOURCE_DELETE(&src); UT_FH_CLOSE(fh); } /* * test_tmpfile_fd - tests temporary file using file descriptor interface */ static int test_tmpfile_fd(const struct test_case *tc, int argc, char *argv[]) { if (argc < 2) UT_FATAL("usage: test_tmpfile_fd <file> <requested_size>"); char *dir = argv[0]; os_off_t requested_size = ATOLL(argv[1]); test_tmpfile(dir, requested_size, FH_FD); return 2; } /* * test_tmpfile_handle - tests temporary file using file handle interface */ static int test_tmpfile_handle(const struct test_case *tc, int argc, char *argv[]) { if (argc < 2) UT_FATAL("usage: test_tmpfile_handle <file> <requested_size>"); char *dir = argv[0]; os_off_t requested_size = ATOLL(argv[1]); test_tmpfile(dir, requested_size, FH_HANDLE); return 2; } /* * test_cases -- available test cases */ static struct test_case test_cases[] = { TEST_CASE(test_normal_file_fd), TEST_CASE(test_normal_file_handle), TEST_CASE(test_tmpfile_fd), TEST_CASE(test_tmpfile_handle), }; #define NTESTS (sizeof(test_cases) / sizeof(test_cases[0])) int main(int argc, char **argv) { START(argc, argv, "pmem2_source_size"); util_init(); out_init("pmem2_source_size", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0); TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS); out_fini(); DONE(NULL); }
3,326
20.191083
75
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/ex_linkedlist/ex_linkedlist.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2017, Intel Corporation */ /* * ex_linkedlist.c - test of linkedlist example */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include "pmemobj_list.h" #include "unittest.h" #define ELEMENT_NO 10 #define PRINT_RES(res, struct_name) do {\ if ((res) == 0) {\ UT_OUT("Outcome for " #struct_name " is correct!");\ } else {\ UT_ERR("Outcome for " #struct_name\ " does not match expected result!!!");\ }\ } while (0) POBJ_LAYOUT_BEGIN(list); POBJ_LAYOUT_ROOT(list, struct base); POBJ_LAYOUT_TOID(list, struct tqueuehead); POBJ_LAYOUT_TOID(list, struct slisthead); POBJ_LAYOUT_TOID(list, struct tqnode); POBJ_LAYOUT_TOID(list, struct snode); POBJ_LAYOUT_END(list); POBJ_TAILQ_HEAD(tqueuehead, struct tqnode); struct tqnode { int data; POBJ_TAILQ_ENTRY(struct tqnode) tnd; }; POBJ_SLIST_HEAD(slisthead, struct snode); struct snode { int data; POBJ_SLIST_ENTRY(struct snode) snd; }; struct base { struct tqueuehead tqueue; struct slisthead slist; }; static const int expectedResTQ[] = { 111, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 222 }; static const int expectedResSL[] = { 111, 8, 222, 6, 5, 4, 3, 2, 1, 0, 333 }; /* * dump_tq -- dumps list on standard output */ static void dump_tq(struct tqueuehead *head, const char *str) { TOID(struct tqnode) var; UT_OUT("%s start", str); POBJ_TAILQ_FOREACH(var, head, tnd) UT_OUT("%d", D_RW(var)->data); UT_OUT("%s end", str); } /* * init_tqueue -- initialize tail queue */ static void init_tqueue(PMEMobjpool *pop, struct tqueuehead *head) { if (!POBJ_TAILQ_EMPTY(head)) return; TOID(struct tqnode) node; TOID(struct tqnode) middleNode; TOID(struct tqnode) node888; TOID(struct tqnode) tempNode; int i = 0; TX_BEGIN(pop) { POBJ_TAILQ_INIT(head); dump_tq(head, "after init"); for (i = 0; i < ELEMENT_NO; ++i) { node = TX_NEW(struct tqnode); D_RW(node)->data = i; if (0 == i) { middleNode = node; } POBJ_TAILQ_INSERT_HEAD(head, node, tnd); node = TX_NEW(struct tqnode); D_RW(node)->data = i; POBJ_TAILQ_INSERT_TAIL(head, node, tnd); } dump_tq(head, "after insert[head|tail]"); node = TX_NEW(struct tqnode); D_RW(node)->data = 666; POBJ_TAILQ_INSERT_AFTER(middleNode, node, tnd); dump_tq(head, "after insert_after1"); middleNode = POBJ_TAILQ_NEXT(middleNode, tnd); node = TX_NEW(struct tqnode); D_RW(node)->data = 888; node888 = node; POBJ_TAILQ_INSERT_BEFORE(middleNode, node, tnd); dump_tq(head, "after insert_before1"); node = TX_NEW(struct tqnode); D_RW(node)->data = 555; POBJ_TAILQ_INSERT_BEFORE(middleNode, node, tnd); dump_tq(head, "after insert_before2"); node = TX_NEW(struct tqnode); D_RW(node)->data = 111; tempNode = POBJ_TAILQ_FIRST(head); POBJ_TAILQ_INSERT_BEFORE(tempNode, node, tnd); dump_tq(head, "after insert_before3"); node = TX_NEW(struct tqnode); D_RW(node)->data = 222; tempNode = POBJ_TAILQ_LAST(head); POBJ_TAILQ_INSERT_AFTER(tempNode, node, tnd); dump_tq(head, "after insert_after2"); tempNode = middleNode; middleNode = POBJ_TAILQ_PREV(tempNode, tnd); POBJ_TAILQ_MOVE_ELEMENT_TAIL(head, middleNode, tnd); dump_tq(head, "after move_element_tail"); POBJ_TAILQ_MOVE_ELEMENT_HEAD(head, tempNode, tnd); dump_tq(head, "after move_element_head"); tempNode = POBJ_TAILQ_FIRST(head); POBJ_TAILQ_REMOVE(head, tempNode, tnd); dump_tq(head, "after remove1"); tempNode = POBJ_TAILQ_LAST(head); POBJ_TAILQ_REMOVE(head, tempNode, tnd); dump_tq(head, "after remove2"); POBJ_TAILQ_REMOVE(head, node888, tnd); dump_tq(head, "after remove3"); } TX_ONABORT { abort(); } TX_END } /* * dump_sl -- dumps list on standard output */ static void dump_sl(struct slisthead *head, const char *str) { TOID(struct snode) var; UT_OUT("%s start", str); POBJ_SLIST_FOREACH(var, head, snd) UT_OUT("%d", D_RW(var)->data); UT_OUT("%s end", str); } /* * init_slist -- initialize SLIST */ static void init_slist(PMEMobjpool *pop, struct slisthead *head) { if (!POBJ_SLIST_EMPTY(head)) return; TOID(struct snode) node; TOID(struct snode) tempNode; int i = 0; TX_BEGIN(pop) { POBJ_SLIST_INIT(head); dump_sl(head, "after init"); for (i = 0; i < ELEMENT_NO; ++i) { node = TX_NEW(struct snode); D_RW(node)->data = i; POBJ_SLIST_INSERT_HEAD(head, node, snd); } dump_sl(head, "after insert_head"); tempNode = POBJ_SLIST_FIRST(head); node = TX_NEW(struct snode); D_RW(node)->data = 111; POBJ_SLIST_INSERT_AFTER(tempNode, node, snd); dump_sl(head, "after insert_after1"); tempNode = POBJ_SLIST_NEXT(node, snd); node = TX_NEW(struct snode); D_RW(node)->data = 222; POBJ_SLIST_INSERT_AFTER(tempNode, node, snd); dump_sl(head, "after insert_after2"); tempNode = POBJ_SLIST_NEXT(node, snd); POBJ_SLIST_REMOVE_FREE(head, tempNode, snd); dump_sl(head, "after remove_free1"); POBJ_SLIST_REMOVE_HEAD(head, snd); dump_sl(head, "after remove_head"); TOID(struct snode) element = POBJ_SLIST_FIRST(head); while (!TOID_IS_NULL(D_RO(element)->snd.pe_next)) { element = D_RO(element)->snd.pe_next; } node = TX_NEW(struct snode); D_RW(node)->data = 333; POBJ_SLIST_INSERT_AFTER(element, node, snd); dump_sl(head, "after insert_after3"); element = node; node = TX_NEW(struct snode); D_RW(node)->data = 123; POBJ_SLIST_INSERT_AFTER(element, node, snd); dump_sl(head, "after insert_after4"); tempNode = POBJ_SLIST_NEXT(node, snd); POBJ_SLIST_REMOVE_FREE(head, node, snd); dump_sl(head, "after remove_free2"); } TX_ONABORT { abort(); } TX_END } int main(int argc, char *argv[]) { unsigned res = 0; PMEMobjpool *pop; const char *path; START(argc, argv, "ex_linkedlist"); /* root doesn't count */ UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(list) != 4); if (argc != 2) { UT_FATAL("usage: %s file-name", argv[0]); } path = argv[1]; if (os_access(path, F_OK) != 0) { if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(list), PMEMOBJ_MIN_POOL, 0666)) == NULL) { UT_FATAL("!pmemobj_create: %s", path); } } else { if ((pop = pmemobj_open(path, POBJ_LAYOUT_NAME(list))) == NULL) { UT_FATAL("!pmemobj_open: %s", path); } } TOID(struct base) base = POBJ_ROOT(pop, struct base); struct tqueuehead *tqhead = &D_RW(base)->tqueue; struct slisthead *slhead = &D_RW(base)->slist; init_tqueue(pop, tqhead); init_slist(pop, slhead); int i = 0; TOID(struct tqnode) tqelement; POBJ_TAILQ_FOREACH(tqelement, tqhead, tnd) { if (D_RO(tqelement)->data != expectedResTQ[i]) { res = 1; break; } i++; } PRINT_RES(res, tail queue); i = 0; res = 0; TOID(struct snode) slelement; POBJ_SLIST_FOREACH(slelement, slhead, snd) { if (D_RO(slelement)->data != expectedResSL[i]) { res = 1; break; } i++; } PRINT_RES(res, singly linked list); pmemobj_close(pop); DONE(NULL); }
6,919
22.862069
77
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_persist_count/mocks_windows.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * mocks_windows.h -- redefinitions of pmem functions * * This file is Windows-specific. * * This file should be included (i.e. using Forced Include) by libpmemobj * files, when compiled for the purpose of obj_persist_count test. * It would replace default implementation with mocked functions defined * in obj_persist_count.c. * * These defines could be also passed as preprocessor definitions. */ #ifndef WRAP_REAL #define pmem_persist __wrap_pmem_persist #define pmem_flush __wrap_pmem_flush #define pmem_drain __wrap_pmem_drain #define pmem_msync __wrap_pmem_msync #define pmem_memcpy_persist __wrap_pmem_memcpy_persist #define pmem_memcpy_nodrain __wrap_pmem_memcpy_nodrain #define pmem_memcpy __wrap_pmem_memcpy #define pmem_memmove_persist __wrap_pmem_memmove_persist #define pmem_memmove_nodrain __wrap_pmem_memmove_nodrain #define pmem_memmove __wrap_pmem_memmove #define pmem_memset_persist __wrap_pmem_memset_persist #define pmem_memset_nodrain __wrap_pmem_memset_nodrain #define pmem_memset __wrap_pmem_memset #endif
1,130
34.34375
73
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/obj_persist_count/obj_persist_count.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * obj_persist_count.c -- counting number of persists */ #define _GNU_SOURCE #include "obj.h" #include "pmalloc.h" #include "unittest.h" struct ops_counter { unsigned n_cl_stores; unsigned n_drain; unsigned n_pmem_persist; unsigned n_pmem_msync; unsigned n_pmem_flush; unsigned n_pmem_drain; unsigned n_flush_from_pmem_memcpy; unsigned n_flush_from_pmem_memset; unsigned n_drain_from_pmem_memcpy; unsigned n_drain_from_pmem_memset; unsigned n_pot_cache_misses; }; static struct ops_counter ops_counter; static struct ops_counter tx_counter; #define FLUSH_ALIGN ((uintptr_t)64) #define MOVNT_THRESHOLD 256 static unsigned cl_flushed(const void *addr, size_t len, uintptr_t alignment) { uintptr_t start = (uintptr_t)addr & ~(alignment - 1); uintptr_t end = ((uintptr_t)addr + len + alignment - 1) & ~(alignment - 1); return (unsigned)(end - start) / FLUSH_ALIGN; } #define PMEM_F_MEM_MOVNT (PMEM_F_MEM_WC | PMEM_F_MEM_NONTEMPORAL) #define PMEM_F_MEM_MOV (PMEM_F_MEM_WB | PMEM_F_MEM_TEMPORAL) static unsigned bulk_cl_changed(const void *addr, size_t len, unsigned flags) { uintptr_t start = (uintptr_t)addr & ~(FLUSH_ALIGN - 1); uintptr_t end = ((uintptr_t)addr + len + FLUSH_ALIGN - 1) & ~(FLUSH_ALIGN - 1); unsigned cl_changed = (unsigned)(end - start) / FLUSH_ALIGN; int wc; /* write combining */ if (flags & PMEM_F_MEM_NOFLUSH) wc = 0; /* NOFLUSH always uses temporal instructions */ else if (flags & PMEM_F_MEM_MOVNT) wc = 1; else if (flags & PMEM_F_MEM_MOV) wc = 0; else if (len < MOVNT_THRESHOLD) wc = 0; else wc = 1; /* count number of potential cache misses */ if (!wc) { /* * When we don't use write combining, it means all * cache lines may be missing. */ ops_counter.n_pot_cache_misses += cl_changed; } else { /* * When we use write combining there won't be any cache misses, * with an exception of unaligned beginning or end. */ if (start != (uintptr_t)addr) ops_counter.n_pot_cache_misses++; if (end != ((uintptr_t)addr + len) && start + FLUSH_ALIGN != end) ops_counter.n_pot_cache_misses++; } return cl_changed; } static void flush_cl(const void *addr, size_t len) { unsigned flushed = cl_flushed(addr, len, FLUSH_ALIGN); ops_counter.n_cl_stores += flushed; ops_counter.n_pot_cache_misses += flushed; } static void flush_msync(const void *addr, size_t len) { unsigned flushed = cl_flushed(addr, len, Pagesize); ops_counter.n_cl_stores += flushed; ops_counter.n_pot_cache_misses += flushed; } FUNC_MOCK(pmem_persist, void, const void *addr, size_t len) FUNC_MOCK_RUN_DEFAULT { ops_counter.n_pmem_persist++; flush_cl(addr, len); ops_counter.n_drain++; _FUNC_REAL(pmem_persist)(addr, len); } FUNC_MOCK_END FUNC_MOCK(pmem_msync, int, const void *addr, size_t len) FUNC_MOCK_RUN_DEFAULT { ops_counter.n_pmem_msync++; flush_msync(addr, len); ops_counter.n_drain++; return _FUNC_REAL(pmem_msync)(addr, len); } FUNC_MOCK_END FUNC_MOCK(pmem_flush, void, const void *addr, size_t len) FUNC_MOCK_RUN_DEFAULT { ops_counter.n_pmem_flush++; flush_cl(addr, len); _FUNC_REAL(pmem_flush)(addr, len); } FUNC_MOCK_END FUNC_MOCK(pmem_drain, void, void) FUNC_MOCK_RUN_DEFAULT { ops_counter.n_pmem_drain++; ops_counter.n_drain++; _FUNC_REAL(pmem_drain)(); } FUNC_MOCK_END static void memcpy_nodrain_count(void *dest, const void *src, size_t len, unsigned flags) { unsigned cl_stores = bulk_cl_changed(dest, len, flags); if (!(flags & PMEM_F_MEM_NOFLUSH)) ops_counter.n_flush_from_pmem_memcpy += cl_stores; ops_counter.n_cl_stores += cl_stores; } static void memcpy_persist_count(void *dest, const void *src, size_t len, unsigned flags) { memcpy_nodrain_count(dest, src, len, flags); ops_counter.n_drain_from_pmem_memcpy++; ops_counter.n_drain++; } FUNC_MOCK(pmem_memcpy_persist, void *, void *dest, const void *src, size_t len) FUNC_MOCK_RUN_DEFAULT { memcpy_persist_count(dest, src, len, 0); return _FUNC_REAL(pmem_memcpy_persist)(dest, src, len); } FUNC_MOCK_END FUNC_MOCK(pmem_memcpy_nodrain, void *, void *dest, const void *src, size_t len) FUNC_MOCK_RUN_DEFAULT { memcpy_nodrain_count(dest, src, len, 0); return _FUNC_REAL(pmem_memcpy_nodrain)(dest, src, len); } FUNC_MOCK_END static unsigned sanitize_flags(unsigned flags) { if (flags & PMEM_F_MEM_NOFLUSH) { /* NOFLUSH implies NODRAIN */ flags |= PMEM_F_MEM_NODRAIN; } return flags; } FUNC_MOCK(pmem_memcpy, void *, void *dest, const void *src, size_t len, unsigned flags) FUNC_MOCK_RUN_DEFAULT { flags = sanitize_flags(flags); if (flags & PMEM_F_MEM_NODRAIN) memcpy_nodrain_count(dest, src, len, flags); else memcpy_persist_count(dest, src, len, flags); return _FUNC_REAL(pmem_memcpy)(dest, src, len, flags); } FUNC_MOCK_END FUNC_MOCK(pmem_memmove_persist, void *, void *dest, const void *src, size_t len) FUNC_MOCK_RUN_DEFAULT { memcpy_persist_count(dest, src, len, 0); return _FUNC_REAL(pmem_memmove_persist)(dest, src, len); } FUNC_MOCK_END FUNC_MOCK(pmem_memmove_nodrain, void *, void *dest, const void *src, size_t len) FUNC_MOCK_RUN_DEFAULT { memcpy_nodrain_count(dest, src, len, 0); return _FUNC_REAL(pmem_memmove_nodrain)(dest, src, len); } FUNC_MOCK_END FUNC_MOCK(pmem_memmove, void *, void *dest, const void *src, size_t len, unsigned flags) FUNC_MOCK_RUN_DEFAULT { flags = sanitize_flags(flags); if (flags & PMEM_F_MEM_NODRAIN) memcpy_nodrain_count(dest, src, len, flags); else memcpy_persist_count(dest, src, len, flags); return _FUNC_REAL(pmem_memmove)(dest, src, len, flags); } FUNC_MOCK_END static void memset_nodrain_count(void *dest, size_t len, unsigned flags) { unsigned cl_set = bulk_cl_changed(dest, len, flags); if (!(flags & PMEM_F_MEM_NOFLUSH)) ops_counter.n_flush_from_pmem_memset += cl_set; ops_counter.n_cl_stores += cl_set; } static void memset_persist_count(void *dest, size_t len, unsigned flags) { memset_nodrain_count(dest, len, flags); ops_counter.n_drain_from_pmem_memset++; ops_counter.n_drain++; } FUNC_MOCK(pmem_memset_persist, void *, void *dest, int c, size_t len) FUNC_MOCK_RUN_DEFAULT { memset_persist_count(dest, len, 0); return _FUNC_REAL(pmem_memset_persist)(dest, c, len); } FUNC_MOCK_END FUNC_MOCK(pmem_memset_nodrain, void *, void *dest, int c, size_t len) FUNC_MOCK_RUN_DEFAULT { memset_nodrain_count(dest, len, 0); return _FUNC_REAL(pmem_memset_nodrain)(dest, c, len); } FUNC_MOCK_END FUNC_MOCK(pmem_memset, void *, void *dest, int c, size_t len, unsigned flags) FUNC_MOCK_RUN_DEFAULT { flags = sanitize_flags(flags); if (flags & PMEM_F_MEM_NODRAIN) memset_nodrain_count(dest, len, flags); else memset_persist_count(dest, len, flags); return _FUNC_REAL(pmem_memset)(dest, c, len, flags); } FUNC_MOCK_END /* * reset_counters -- zero all counters */ static void reset_counters(void) { memset(&ops_counter, 0, sizeof(ops_counter)); } /* * print_reset_counters -- print and then zero all counters */ static void print_reset_counters(const char *task, unsigned tx) { #define CNT(name) (ops_counter.name - tx * tx_counter.name) UT_OUT( "%-14s %-7d %-10d %-12d %-10d %-10d %-10d %-15d %-17d %-15d %-17d %-23d", task, CNT(n_cl_stores), CNT(n_drain), CNT(n_pmem_persist), CNT(n_pmem_msync), CNT(n_pmem_flush), CNT(n_pmem_drain), CNT(n_flush_from_pmem_memcpy), CNT(n_drain_from_pmem_memcpy), CNT(n_flush_from_pmem_memset), CNT(n_drain_from_pmem_memset), CNT(n_pot_cache_misses)); #undef CNT reset_counters(); } #define LARGE_SNAPSHOT ((1 << 10) * 10) struct foo_large { uint8_t snapshot[LARGE_SNAPSHOT]; }; struct foo { int val; uint64_t dest; PMEMoid bar; PMEMoid bar2; }; int main(int argc, char *argv[]) { START(argc, argv, "obj_persist_count"); if (argc != 2) UT_FATAL("usage: %s file-name", argv[0]); const char *path = argv[1]; PMEMobjpool *pop; if ((pop = pmemobj_create(path, "persist_count", PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create: %s", path); UT_OUT( "%-14s %-7s %-10s %-12s %-10s %-10s %-10s %-15s %-17s %-15s %-17s %-23s", "task", "cl(all)", "drain(all)", "pmem_persist", "pmem_msync", "pmem_flush", "pmem_drain", "pmem_memcpy_cls", "pmem_memcpy_drain", "pmem_memset_cls", "pmem_memset_drain", "potential_cache_misses"); print_reset_counters("pool_create", 0); /* allocate one structure to create a run */ pmemobj_alloc(pop, NULL, sizeof(struct foo), 0, NULL, NULL); reset_counters(); PMEMoid root = pmemobj_root(pop, sizeof(struct foo)); UT_ASSERT(!OID_IS_NULL(root)); print_reset_counters("root_alloc", 0); PMEMoid oid; int ret = pmemobj_alloc(pop, &oid, sizeof(struct foo), 0, NULL, NULL); UT_ASSERTeq(ret, 0); print_reset_counters("atomic_alloc", 0); pmemobj_free(&oid); print_reset_counters("atomic_free", 0); struct foo *f = pmemobj_direct(root); TX_BEGIN(pop) { } TX_END memcpy(&tx_counter, &ops_counter, sizeof(ops_counter)); print_reset_counters("tx_begin_end", 0); TX_BEGIN(pop) { f->bar = pmemobj_tx_alloc(sizeof(struct foo), 0); UT_ASSERT(!OID_IS_NULL(f->bar)); } TX_END print_reset_counters("tx_alloc", 1); TX_BEGIN(pop) { f->bar2 = pmemobj_tx_alloc(sizeof(struct foo), 0); UT_ASSERT(!OID_IS_NULL(f->bar2)); } TX_END print_reset_counters("tx_alloc_next", 1); TX_BEGIN(pop) { pmemobj_tx_free(f->bar); } TX_END print_reset_counters("tx_free", 1); TX_BEGIN(pop) { pmemobj_tx_free(f->bar2); } TX_END print_reset_counters("tx_free_next", 1); TX_BEGIN(pop) { pmemobj_tx_xadd_range_direct(&f->val, sizeof(f->val), POBJ_XADD_NO_FLUSH); } TX_END print_reset_counters("tx_add", 1); TX_BEGIN(pop) { pmemobj_tx_xadd_range_direct(&f->val, sizeof(f->val), POBJ_XADD_NO_FLUSH); } TX_END print_reset_counters("tx_add_next", 1); PMEMoid large_foo; pmemobj_zalloc(pop, &large_foo, sizeof(struct foo_large), 0); UT_ASSERT(!OID_IS_NULL(large_foo)); reset_counters(); struct foo_large *flarge = pmemobj_direct(large_foo); TX_BEGIN(pop) { pmemobj_tx_xadd_range_direct(&flarge->snapshot, sizeof(flarge->snapshot), POBJ_XADD_NO_FLUSH); } TX_END print_reset_counters("tx_add_large", 1); TX_BEGIN(pop) { pmemobj_tx_xadd_range_direct(&flarge->snapshot, sizeof(flarge->snapshot), POBJ_XADD_NO_FLUSH); } TX_END print_reset_counters("tx_add_lnext", 1); pmalloc(pop, &f->dest, sizeof(f->val), 0, 0); print_reset_counters("pmalloc", 0); pfree(pop, &f->dest); print_reset_counters("pfree", 0); uint64_t stack_var; pmalloc(pop, &stack_var, sizeof(f->val), 0, 0); print_reset_counters("pmalloc_stack", 0); pfree(pop, &stack_var); print_reset_counters("pfree_stack", 0); pmemobj_close(pop); DONE(NULL); } #ifdef _MSC_VER /* * Since libpmemobj is linked statically, we need to invoke its ctor/dtor. */ MSVC_CONSTR(libpmemobj_init) MSVC_DESTR(libpmemobj_fini) #endif
10,962
22.832609
80
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/rpmem_proto/rpmem_proto.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * rpmem_proto.c -- unit test for rpmem_proto header * * The purpose of this test is to make sure the structures which describe * rpmem protocol messages does not have any padding. */ #include "unittest.h" #include "librpmem.h" #include "rpmem_proto.h" int main(int argc, char *argv[]) { START(argc, argv, "rpmem_proto"); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_hdr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_hdr, type); ASSERT_ALIGNED_FIELD(struct rpmem_msg_hdr, size); ASSERT_ALIGNED_CHECK(struct rpmem_msg_hdr); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_hdr_resp); ASSERT_ALIGNED_FIELD(struct rpmem_msg_hdr_resp, status); ASSERT_ALIGNED_FIELD(struct rpmem_msg_hdr_resp, type); ASSERT_ALIGNED_FIELD(struct rpmem_msg_hdr_resp, size); ASSERT_ALIGNED_CHECK(struct rpmem_msg_hdr_resp); ASSERT_ALIGNED_BEGIN(struct rpmem_pool_attr); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, signature); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, major); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, compat_features); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, incompat_features); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, ro_compat_features); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, poolset_uuid); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, uuid); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, next_uuid); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, prev_uuid); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, user_flags); ASSERT_ALIGNED_CHECK(struct rpmem_pool_attr); ASSERT_ALIGNED_BEGIN(struct rpmem_pool_attr_packed); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, signature); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, major); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, compat_features); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, incompat_features); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, ro_compat_features); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, poolset_uuid); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, uuid); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, next_uuid); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, prev_uuid); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, user_flags); ASSERT_ALIGNED_CHECK(struct rpmem_pool_attr_packed); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_ibc_attr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_ibc_attr, port); ASSERT_ALIGNED_FIELD(struct rpmem_msg_ibc_attr, persist_method); ASSERT_ALIGNED_FIELD(struct rpmem_msg_ibc_attr, rkey); ASSERT_ALIGNED_FIELD(struct rpmem_msg_ibc_attr, raddr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_ibc_attr, nlanes); ASSERT_ALIGNED_CHECK(struct rpmem_msg_ibc_attr); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_common); ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, major); ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, minor); ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, pool_size); ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, nlanes); ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, provider); ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, buff_size); ASSERT_ALIGNED_CHECK(struct rpmem_msg_common); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_pool_desc); ASSERT_ALIGNED_FIELD(struct rpmem_msg_pool_desc, size); ASSERT_ALIGNED_CHECK(struct rpmem_msg_pool_desc); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_create); ASSERT_ALIGNED_FIELD(struct rpmem_msg_create, hdr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_create, c); ASSERT_ALIGNED_FIELD(struct rpmem_msg_create, pool_attr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_create, pool_desc); ASSERT_ALIGNED_CHECK(struct rpmem_msg_create); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_create_resp); ASSERT_ALIGNED_FIELD(struct rpmem_msg_create_resp, hdr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_create_resp, ibc); ASSERT_ALIGNED_CHECK(struct rpmem_msg_create_resp); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_open); ASSERT_ALIGNED_FIELD(struct rpmem_msg_open, hdr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_open, c); ASSERT_ALIGNED_FIELD(struct rpmem_msg_open, pool_desc); ASSERT_ALIGNED_CHECK(struct rpmem_msg_open); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_open_resp); ASSERT_ALIGNED_FIELD(struct rpmem_msg_open_resp, hdr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_open_resp, ibc); ASSERT_ALIGNED_FIELD(struct rpmem_msg_open_resp, pool_attr); ASSERT_ALIGNED_CHECK(struct rpmem_msg_open_resp); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_close); ASSERT_ALIGNED_FIELD(struct rpmem_msg_close, hdr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_close, flags); ASSERT_ALIGNED_CHECK(struct rpmem_msg_close); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_close_resp); ASSERT_ALIGNED_FIELD(struct rpmem_msg_close_resp, hdr); ASSERT_ALIGNED_CHECK(struct rpmem_msg_close_resp); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_persist); ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist, flags); ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist, lane); ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist, addr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist, size); ASSERT_ALIGNED_CHECK(struct rpmem_msg_persist); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_persist_resp); ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist_resp, flags); ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist_resp, lane); ASSERT_ALIGNED_CHECK(struct rpmem_msg_persist_resp); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_set_attr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_set_attr, hdr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_set_attr, pool_attr); ASSERT_ALIGNED_CHECK(struct rpmem_msg_set_attr); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_set_attr_resp); ASSERT_ALIGNED_FIELD(struct rpmem_msg_set_attr_resp, hdr); ASSERT_ALIGNED_CHECK(struct rpmem_msg_set_attr_resp); DONE(NULL); }
5,733
41.474074
73
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/compat_incompat_features/pool_open.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2018, Intel Corporation */ /* * pool_open.c -- a tool for verifying that an obj/blk/log pool opens correctly * * usage: pool_open <path> <obj|blk|log> <layout> */ #include "unittest.h" int main(int argc, char *argv[]) { START(argc, argv, "compat_incompat_features"); if (argc < 3) UT_FATAL("usage: %s <obj|blk|log> <path>", argv[0]); char *type = argv[1]; char *path = argv[2]; if (strcmp(type, "obj") == 0) { PMEMobjpool *pop = pmemobj_open(path, ""); if (pop == NULL) { UT_FATAL("!%s: pmemobj_open failed", path); } else { UT_OUT("%s: pmemobj_open succeeded", path); pmemobj_close(pop); } } else if (strcmp(type, "blk") == 0) { PMEMblkpool *pop = pmemblk_open(path, 0); if (pop == NULL) { UT_FATAL("!%s: pmemblk_open failed", path); } else { UT_OUT("%s: pmemblk_open succeeded", path); pmemblk_close(pop); } } else if (strcmp(type, "log") == 0) { PMEMlogpool *pop = pmemlog_open(path); if (pop == NULL) { UT_FATAL("!%s: pmemlog_open failed", path); } else { UT_OUT("%s: pmemlog_open succeeded", path); pmemlog_close(pop); } } else { UT_FATAL("usage: %s <obj|blk|log> <path>", argv[0]); } DONE(NULL); }
1,237
23.27451
79
c
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/util_poolset/mocks_windows.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2017, Intel Corporation */ /* * mocks_windows.h -- redefinitions of libc functions used in util_poolset * * This file is Windows-specific. * * This file should be included (i.e. using Forced Include) by libpmem * files, when compiled for the purpose of util_poolset test. * It would replace default implementation with mocked functions defined * in util_poolset.c. * * These defines could be also passed as preprocessor definitions. */ #ifndef WRAP_REAL_OPEN #define os_open __wrap_os_open #endif #ifndef WRAP_REAL_FALLOCATE #define os_posix_fallocate __wrap_os_posix_fallocate #endif #ifndef WRAP_REAL_PMEM #define pmem_is_pmem __wrap_pmem_is_pmem #endif
730
25.107143
74
h
null
NearPMSW-main/nearpm/checkpointing/pmdk-checkpoint1/src/test/util_poolset/util_poolset.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * util_poolset.c -- unit test for util_pool_create() / util_pool_open() * * usage: util_poolset cmd minlen hdrsize [mockopts] setfile ... */ #include <stdbool.h> #include "unittest.h" #include "pmemcommon.h" #include "set.h" #include <errno.h> #include "mocks.h" #include "fault_injection.h" #define LOG_PREFIX "ut" #define LOG_LEVEL_VAR "TEST_LOG_LEVEL" #define LOG_FILE_VAR "TEST_LOG_FILE" #define MAJOR_VERSION 1 #define MINOR_VERSION 0 #define SIG "PMEMXXX" #define MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */ #define TEST_FORMAT_INCOMPAT_DEFAULT POOL_FEAT_CKSUM_2K #define TEST_FORMAT_INCOMPAT_CHECK POOL_FEAT_INCOMPAT_VALID static size_t Extend_size = MIN_PART * 2; const char *Open_path = ""; os_off_t Fallocate_len = -1; size_t Is_pmem_len = 0; /* * poolset_info -- (internal) dumps poolset info and checks its integrity * * Performs the following checks: * - part_size[i] == rounddown(file_size - pool_hdr_size, Mmap_align) * - replica_size == sum(part_size) * - pool_size == min(replica_size) */ static void poolset_info(const char *fname, struct pool_set *set, int o) { if (o) UT_OUT("%s: opened: nreps %d poolsize %zu rdonly %d", fname, set->nreplicas, set->poolsize, set->rdonly); else UT_OUT("%s: created: nreps %d poolsize %zu zeroed %d", fname, set->nreplicas, set->poolsize, set->zeroed); size_t poolsize = SIZE_MAX; for (unsigned r = 0; r < set->nreplicas; r++) { struct pool_replica *rep = set->replica[r]; size_t repsize = 0; UT_OUT(" replica[%d]: nparts %d nhdrs %d repsize %zu " "is_pmem %d", r, rep->nparts, rep->nhdrs, rep->repsize, rep->is_pmem); for (unsigned i = 0; i < rep->nparts; i++) { struct pool_set_part *part = &rep->part[i]; UT_OUT(" part[%d] path %s filesize %zu size %zu", i, part->path, part->filesize, part->size); size_t partsize = (part->filesize & ~(Ut_mmap_align - 1)); repsize += partsize; if (i > 0 && (set->options & OPTION_SINGLEHDR) == 0) UT_ASSERTeq(part->size, partsize - Ut_mmap_align); /* XXX */ } repsize -= (rep->nhdrs - 1) * Ut_mmap_align; UT_ASSERTeq(rep->repsize, repsize); UT_ASSERT(rep->resvsize >= repsize); if (rep->repsize < poolsize) poolsize = rep->repsize; } UT_ASSERTeq(set->poolsize, poolsize); } /* * mock_options -- (internal) parse mock options and enable mocked functions */ static int mock_options(const char *arg) { /* reset to defaults */ Open_path = ""; Fallocate_len = -1; Is_pmem_len = 0; if (arg[0] != '-' || arg[1] != 'm') return 0; switch (arg[2]) { case 'n': /* do nothing */ break; case 'o': /* open */ Open_path = &arg[4]; break; case 'f': /* fallocate */ Fallocate_len = ATOLL(&arg[4]); break; case 'p': /* is_pmem */ Is_pmem_len = ATOULL(&arg[4]); break; default: UT_FATAL("unknown mock option: %c", arg[2]); } return 1; } int main(int argc, char *argv[]) { START(argc, argv, "util_poolset"); common_init(LOG_PREFIX, LOG_LEVEL_VAR, LOG_FILE_VAR, MAJOR_VERSION, MINOR_VERSION); if (argc < 3) UT_FATAL("usage: %s cmd minsize [mockopts] " "setfile ...", argv[0]); char *fname; struct pool_set *set; int ret; size_t minsize = strtoul(argv[2], &fname, 0); for (int arg = 3; arg < argc; arg++) { arg += mock_options(argv[arg]); fname = argv[arg]; struct pool_attr attr; memset(&attr, 0, sizeof(attr)); memcpy(attr.signature, SIG, sizeof(SIG)); attr.major = 1; switch (argv[1][0]) { case 'c': attr.features.incompat = TEST_FORMAT_INCOMPAT_DEFAULT; ret = util_pool_create(&set, fname, 0, minsize, MIN_PART, &attr, NULL, REPLICAS_ENABLED); if (ret == -1) UT_OUT("!%s: util_pool_create", fname); else { /* * XXX: On Windows pool files are created with * R/W permissions, so no need for chmod(). */ #ifndef _WIN32 util_poolset_chmod(set, S_IWUSR | S_IRUSR); #endif poolset_info(fname, set, 0); util_poolset_close(set, DO_NOT_DELETE_PARTS); } break; case 'o': attr.features.incompat = TEST_FORMAT_INCOMPAT_CHECK; ret = util_pool_open(&set, fname, MIN_PART, &attr, NULL, NULL, 0 /* flags */); if (ret == -1) UT_OUT("!%s: util_pool_open", fname); else { poolset_info(fname, set, 1); util_poolset_close(set, DO_NOT_DELETE_PARTS); } break; case 'e': attr.features.incompat = TEST_FORMAT_INCOMPAT_CHECK; ret = util_pool_open(&set, fname, MIN_PART, &attr, NULL, NULL, 0 /* flags */); UT_ASSERTeq(ret, 0); size_t esize = Extend_size; void *nptr = util_pool_extend(set, &esize, MIN_PART); if (nptr == NULL) UT_OUT("!%s: util_pool_extend", fname); else { poolset_info(fname, set, 1); } util_poolset_close(set, DO_NOT_DELETE_PARTS); break; case 'f': if (!core_fault_injection_enabled()) break; attr.features.incompat = TEST_FORMAT_INCOMPAT_CHECK; ret = util_pool_open(&set, fname, MIN_PART, &attr, NULL, NULL, 0 /* flags */); UT_ASSERTeq(ret, 0); size_t fsize = Extend_size; core_inject_fault_at(PMEM_MALLOC, 2, "util_poolset_append_new_part"); void *fnptr = util_pool_extend(set, &fsize, MIN_PART); UT_ASSERTeq(fnptr, NULL); UT_ASSERTeq(errno, ENOMEM); util_poolset_close(set, DO_NOT_DELETE_PARTS); break; } } common_fini(); DONE(NULL); }
5,390
23.843318
76
c